summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorZhiqiang Tu <ztu@codeaurora.org>2017-06-12 15:39:30 +0800
committerZhiqiang Tu <ztu@codeaurora.org>2017-06-12 15:40:13 +0800
commitd992f38d6dad08d7566d252b9bc8577c67331f44 (patch)
treea76ca1e7ad903b1442cca0a2170bfc891a2984bb /drivers/gpu
parent5beccaf9302888780a40e529c8c835a0b3eacaef (diff)
parentc1a2472056c800ff46e0ac21a4b67c179a570ad0 (diff)
Merge remote-tracking branch 'remotes/quic/msm-4.4' into dev/msm-4.4-8996au
Conflicts: arch/arm64/configs/msm-auto-perf_defconfig Change-Id: Ibc59804762c3e14031c22b03a52d49ff2acc36d4 Signed-off-by: Zhiqiang Tu <ztu@codeaurora.org>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c817
-rw-r--r--drivers/gpu/drm/drm_mm.c248
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c73
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c12
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c113
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_snapshot.c39
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c70
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/dba_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_defs.h4
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.c8
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_panel.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c5
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c1006
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h175
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c317
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c227
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h32
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c25
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c7
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c22
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c98
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h33
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c710
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h52
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c12
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c112
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c242
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c92
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c271
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h45
-rw-r--r--drivers/gpu/drm/msm/msm_prop.c67
-rw-r--r--drivers/gpu/drm/msm/msm_prop.h41
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c5
-rw-r--r--drivers/gpu/drm/msm/msm_smmu.c151
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot.h4
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot_api.h13
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c63
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c6
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.c123
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.h32
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_intf.c9
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_top.c4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c83
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c1269
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.h6
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.c68
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.h14
-rw-r--r--drivers/gpu/drm/msm/sde_edid_parser.c512
-rw-r--r--drivers/gpu/drm/msm/sde_edid_parser.h148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c79
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c31
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c2
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c14
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c1
-rw-r--r--drivers/gpu/msm/kgsl.c66
-rw-r--r--drivers/gpu/msm/kgsl_device.h1
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c2
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c132
91 files changed, 6038 insertions, 2034 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 7c42ff670080..a0924330d125 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/irq.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5b261adb4b69..3a25da4a6e60 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1126,23 +1126,10 @@ static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1250,14 +1237,14 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1272,7 +1259,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1311,7 +1298,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 267749a94c5a..d6d3cda77762 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1114,23 +1114,10 @@ static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1238,14 +1225,14 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1260,7 +1247,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1299,7 +1286,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 9b4dcf76ce6c..d6e51d4b04f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1096,23 +1096,10 @@ static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1220,14 +1207,14 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce8_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1242,7 +1229,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1281,7 +1268,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 7e9154c7f1db..d1c9525d81eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2258,7 +2258,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
if (pi->caps_stable_p_state) {
stable_p_state_sclk = (max_limits->sclk * 75) / 100;
- for (i = table->count - 1; i >= 0; i++) {
+ for (i = table->count - 1; i >= 0; i--) {
if (stable_p_state_sclk >= table->entries[i].clk) {
stable_p_state_sclk = table->entries[i].clk;
break;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index cc1e16fd7e76..39b8e171cad5 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -75,6 +75,8 @@
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
/* Force 6bpc */
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
+/* Force 10bpc */
+#define EDID_QUIRK_FORCE_10BPC (1 << 11)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -89,6 +91,14 @@ struct detailed_mode_closure {
#define LEVEL_GTF2 2
#define LEVEL_CVT 3
+/*Enum storing luminance types for HDR blocks in EDID*/
+enum luminance_value {
+ NO_LUMINANCE_DATA = 3,
+ MAXIMUM_LUMINANCE = 4,
+ FRAME_AVERAGE_LUMINANCE = 5,
+ MINIMUM_LUMINANCE = 6
+};
+
static struct edid_quirk {
char vendor[4];
int product_id;
@@ -117,6 +127,9 @@ static struct edid_quirk {
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM },
+ /* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
+ { "LGD", 764, EDID_QUIRK_FORCE_10BPC },
+
/* LG Philips LCD LP154W01-A5 */
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
@@ -992,6 +1005,221 @@ static const struct drm_display_mode edid_cea_modes[] = {
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
.vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ /* 65 - 1280x720@24Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 66 - 1280x720@25Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 67 - 1280x720@30Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 68 - 1280x720@50Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 69 - 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 70 - 1280x720@100Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 71 - 1280x720@120Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 72 - 1920x1080@24Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 73 - 1920x1080@25Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 74 - 1920x1080@30Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 75 - 1920x1080@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 76 - 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 77 - 1920x1080@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 78 - 1920x1080@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 79 - 1680x720@24Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 80 - 1680x720@25Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
+ 2948, 3168, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 81 - 1680x720@30Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
+ 2420, 2640, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 82 - 1680x720@50Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
+ 1980, 2200, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 83 - 1680x720@60Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
+ 1980, 2200, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 84 - 1680x720@100Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
+ 1780, 2000, 0, 720, 725, 730, 825, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 85 - 1680x720@120Hz */
+ { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
+ 1780, 2000, 0, 720, 725, 730, 825, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 86 - 2560x1080@24Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
+ 3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 87 - 2560x1080@25Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
+ 3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 88 - 2560x1080@30Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
+ 3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 89 - 2560x1080@50Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
+ 3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 90 - 2560x1080@60Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
+ 2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 91 - 2560x1080@100Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
+ 2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 92 - 2560x1080@120Hz */
+ { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
+ 3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ /* 93 - 3840x2160p@24Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9,},
+ /* 94 - 3840x2160p@25Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+ /* 95 - 3840x2160p@30Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+ /* 96 - 3840x2160p@50Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+ /* 97 - 3840x2160p@60Hz 16:9 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9},
+ /* 98 - 4096x2160p@24Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+ /* 99 - 4096x2160p@25Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+ /* 100 - 4096x2160p@30Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+ /* 101 - 4096x2160p@50Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
+ 5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+ /* 102 - 4096x2160p@60Hz 256:135 */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
+ 4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135},
+ /* 103 - 3840x2160p@24Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
+ 5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+ /* 104 - 3840x2160p@25Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+ /* 105 - 3840x2160p@30Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+ /* 106 - 3840x2160p@50Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
+ 4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
+ /* 107 - 3840x2160p@60Hz 64:27 */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
+ 4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27},
};
/*
@@ -2482,12 +2710,15 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
return closure.modes;
}
-
+#define VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK 0x0
#define AUDIO_BLOCK 0x01
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
+#define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06
+#define EXTENDED_TAG 0x07
#define VIDEO_CAPABILITY_BLOCK 0x07
+#define Y420_VIDEO_DATA_BLOCK 0x0E
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
@@ -3076,6 +3307,21 @@ static bool cea_db_is_hdmi_vsdb(const u8 *db)
return hdmi_id == HDMI_IEEE_OUI;
}
+static bool cea_db_is_hdmi_hf_vsdb(const u8 *db)
+{
+ int hdmi_id;
+
+ if (cea_db_tag(db) != VENDOR_BLOCK)
+ return false;
+
+ if (cea_db_payload_len(db) < 7)
+ return false;
+
+ hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+ return hdmi_id == HDMI_IEEE_OUI_HF;
+}
+
#define for_each_cea_db(cea, i, start, end) \
for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
@@ -3199,6 +3445,258 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
}
static void
+parse_hdmi_hf_vsdb(struct drm_connector *connector, const u8 *db)
+{
+ u8 len = cea_db_payload_len(db);
+
+ if (len < 7)
+ return;
+
+ if (db[4] != 1)
+ return; /* invalid version */
+
+ connector->max_tmds_char = db[5] * 5;
+ connector->scdc_present = db[6] & (1 << 7);
+ connector->rr_capable = db[6] & (1 << 6);
+ connector->flags_3d = db[6] & 0x7;
+ connector->supports_scramble = connector->scdc_present &&
+ (db[6] & (1 << 3));
+
+ DRM_DEBUG_KMS("HDMI v2: max TMDS char %d, "
+ "scdc %s, "
+ "rr %s, "
+ "3D flags 0x%x, "
+ "scramble %s\n",
+ connector->max_tmds_char,
+ connector->scdc_present ? "available" : "not available",
+ connector->rr_capable ? "capable" : "not capable",
+ connector->flags_3d,
+ connector->supports_scramble ?
+ "supported" : "not supported");
+}
+
+static void
+drm_hdmi_extract_vsdbs_info(struct drm_connector *connector, struct edid *edid)
+{
+ const u8 *cea = drm_find_cea_extension(edid);
+ const u8 *db = NULL;
+
+ if (cea && cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end))
+ return;
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+
+ if (cea_db_tag(db) == VENDOR_BLOCK) {
+ /* HDMI Vendor-Specific Data Block */
+ if (cea_db_is_hdmi_vsdb(db))
+ parse_hdmi_vsdb(connector, db);
+ /* HDMI Forum Vendor-Specific Data Block */
+ else if (cea_db_is_hdmi_hf_vsdb(db))
+ parse_hdmi_hf_vsdb(connector, db);
+ }
+ }
+ }
+}
+
+/*
+ * drm_extract_vcdb_info - Parse the HDMI Video Capability Data Block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the CEA vendor specific block
+ *
+ * Parses the HDMI VCDB to extract sink info for @connector.
+ */
+static void
+drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db)
+{
+ /*
+ * Check if the sink specifies underscan
+ * support for:
+ * BIT 5: preferred video format
+ * BIT 3: IT video format
+ * BIT 1: CE video format
+ */
+
+ connector->pt_scan_info =
+ (db[2] & (BIT(4) | BIT(5))) >> 4;
+ connector->it_scan_info =
+ (db[2] & (BIT(3) | BIT(2))) >> 2;
+ connector->ce_scan_info =
+ db[2] & (BIT(1) | BIT(0));
+
+ DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)",
+ (int) connector->pt_scan_info,
+ (int) connector->it_scan_info,
+ (int) connector->ce_scan_info);
+}
+
+static bool drm_edid_is_luminance_value_present(
+u32 block_length, enum luminance_value value)
+{
+ return block_length > NO_LUMINANCE_DATA && value <= block_length;
+}
+
+/*
+ * drm_extract_hdr_db - Parse the HDMI HDR extended block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the HDMI HDR extended block
+ *
+ * Parses the HDMI HDR extended block to extract sink info for @connector.
+ */
+static void
+drm_extract_hdr_db(struct drm_connector *connector, const u8 *db)
+{
+
+ u8 len = 0;
+
+ if (!db) {
+ DRM_ERROR("invalid db\n");
+ return;
+ }
+
+ len = db[0] & 0x1f;
+ /* Byte 3: Electro-Optical Transfer Functions */
+ connector->hdr_eotf = db[2] & 0x3F;
+
+ /* Byte 4: Static Metadata Descriptor Type 1 */
+ connector->hdr_metadata_type_one = (db[3] & BIT(0));
+
+ /* Byte 5: Desired Content Maximum Luminance */
+ if (drm_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE))
+ connector->hdr_max_luminance =
+ db[MAXIMUM_LUMINANCE];
+
+ /* Byte 6: Desired Content Max Frame-average Luminance */
+ if (drm_edid_is_luminance_value_present(len, FRAME_AVERAGE_LUMINANCE))
+ connector->hdr_avg_luminance =
+ db[FRAME_AVERAGE_LUMINANCE];
+
+ /* Byte 7: Desired Content Min Luminance */
+ if (drm_edid_is_luminance_value_present(len, MINIMUM_LUMINANCE))
+ connector->hdr_min_luminance =
+ db[MINIMUM_LUMINANCE];
+
+ connector->hdr_supported = true;
+
+ DRM_DEBUG_KMS("HDR electro-optical %d\n", connector->hdr_eotf);
+ DRM_DEBUG_KMS("metadata desc 1 %d\n", connector->hdr_metadata_type_one);
+ DRM_DEBUG_KMS("max luminance %d\n", connector->hdr_max_luminance);
+ DRM_DEBUG_KMS("avg luminance %d\n", connector->hdr_avg_luminance);
+ DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance);
+}
+
+/*
+ * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks
+ * @connector: connector corresponding to the HDMI sink
+ * @edid: handle to the EDID structure
+ * Parses the all extended tag blocks extract sink info for @connector.
+ */
+static void
+drm_hdmi_extract_extended_blk_info(struct drm_connector *connector,
+struct edid *edid)
+{
+ const u8 *cea = drm_find_cea_extension(edid);
+ const u8 *db = NULL;
+
+ if (cea && cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end))
+ return;
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+
+ if (cea_db_tag(db) == EXTENDED_TAG) {
+ DRM_DEBUG_KMS("found extended tag block = %d\n",
+ db[1]);
+ switch (db[1]) {
+ case VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK:
+ drm_extract_vcdb_info(connector, db);
+ break;
+ case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK:
+ drm_extract_hdr_db(connector, db);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+}
+
+static u8 *
+drm_edid_find_extended_tag_block(struct edid *edid, int blk_id)
+{
+ u8 *db = NULL;
+ u8 *cea = NULL;
+
+ if (!edid) {
+ pr_err("%s: invalid input\n", __func__);
+ return NULL;
+ }
+
+ cea = drm_find_cea_extension(edid);
+
+ if (cea && cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (cea_db_offsets(cea, &start, &end))
+ return NULL;
+
+ for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ if ((cea_db_tag(db) == EXTENDED_TAG) &&
+ (db[1] == blk_id))
+ return db;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * add_YCbCr420VDB_modes - add the modes found in Ycbcr420 VDB block
+ * @connector: connector corresponding to the HDMI sink
+ * @edid: handle to the EDID structure
+ * Parses the YCbCr420 VDB block and adds the modes to @connector.
+ */
+static int
+add_YCbCr420VDB_modes(struct drm_connector *connector, struct edid *edid)
+{
+
+ const u8 *db = NULL;
+ u32 i = 0;
+ u32 modes = 0;
+ u32 video_format = 0;
+ u8 len = 0;
+
+ /*Find the YCbCr420 VDB*/
+ db = drm_edid_find_extended_tag_block(edid, Y420_VIDEO_DATA_BLOCK);
+ /* Offset to byte 3 */
+ if (db) {
+ len = db[0] & 0x1F;
+ db += 2;
+ for (i = 0; i < len - 1; i++) {
+ struct drm_display_mode *mode;
+
+ video_format = *(db + i) & 0x7F;
+ mode = drm_display_mode_from_vic_index(connector,
+ db, len-1, i);
+ if (mode) {
+ DRM_DEBUG_KMS("Adding mode for vic = %d\n",
+ video_format);
+ drm_mode_probed_add(connector, mode);
+ modes++;
+ }
+ }
+ }
+ return modes;
+}
+
+static void
monitor_name(struct detailed_timing *t, void *data)
{
if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
@@ -3277,6 +3775,9 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
/* HDMI Vendor-Specific Data Block */
if (cea_db_is_hdmi_vsdb(db))
parse_hdmi_vsdb(connector, db);
+ /* HDMI Forum Vendor-Specific Data Block */
+ else if (cea_db_is_hdmi_hf_vsdb(db))
+ parse_hdmi_hf_vsdb(connector, db);
break;
default:
break;
@@ -3733,6 +4234,10 @@ static void drm_add_display_info(struct edid *edid,
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
+ /* Extract audio and video latency fields for the sink */
+ drm_hdmi_extract_vsdbs_info(connector, edid);
+ /* Extract info from extended tag blocks */
+ drm_hdmi_extract_extended_blk_info(connector, edid);
/* HDMI deep color modes supported? Assign to info, if so */
drm_assign_hdmi_deep_color_info(edid, info, connector);
@@ -3775,6 +4280,148 @@ static void drm_add_display_info(struct edid *edid,
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
+static int validate_displayid(u8 *displayid, int length, int idx)
+{
+ int i;
+ u8 csum = 0;
+ struct displayid_hdr *base;
+
+ base = (struct displayid_hdr *)&displayid[idx];
+
+ DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
+ base->rev, base->bytes, base->prod_id, base->ext_count);
+
+ if (base->bytes + 5 > length - idx)
+ return -EINVAL;
+ for (i = idx; i <= base->bytes + 5; i++)
+ csum += displayid[i];
+
+ if (csum) {
+ DRM_ERROR("DisplayID checksum invalid, remainder is %d\n",
+ csum);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct drm_display_mode *
+drm_mode_displayid_detailed(struct drm_device *dev,
+struct displayid_detailed_timings_1 *timings)
+{
+ struct drm_display_mode *mode;
+ unsigned pixel_clock = (timings->pixel_clock[0] |
+ (timings->pixel_clock[1] << 8) |
+ (timings->pixel_clock[2] << 16));
+ unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
+ unsigned hblank =
+ (timings->hblank[0] |
+ timings->hblank[1] << 8) + 1;
+ unsigned hsync = (timings->hsync[0] |
+ (timings->hsync[1] & 0x7f) << 8) + 1;
+ unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
+ unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
+ unsigned vblank =
+ (timings->vblank[0] |
+ timings->vblank[1] << 8) + 1;
+ unsigned vsync =
+ (timings->vsync[0] |
+ (timings->vsync[1] & 0x7f) << 8) + 1;
+ unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
+ bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
+ bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
+
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+ mode->clock = pixel_clock * 10;
+ mode->hdisplay = hactive;
+ mode->hsync_start = mode->hdisplay + hsync;
+ mode->hsync_end = mode->hsync_start + hsync_width;
+ mode->htotal = mode->hdisplay + hblank;
+
+ mode->vdisplay = vactive;
+ mode->vsync_start = mode->vdisplay + vsync;
+ mode->vsync_end = mode->vsync_start + vsync_width;
+ mode->vtotal = mode->vdisplay + vblank;
+
+ mode->flags = 0;
+ mode->flags |= hsync_positive ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= vsync_positive ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ if (timings->flags & 0x80)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+static int add_displayid_detailed_1_modes(struct drm_connector *connector,
+ struct displayid_block *block)
+{
+ struct displayid_detailed_timing_block *det =
+ (struct displayid_detailed_timing_block *)block;
+ int i;
+ int num_timings;
+ struct drm_display_mode *newmode;
+ int num_modes = 0;
+ /* blocks must be multiple of 20 bytes length */
+ if (block->num_bytes % 20)
+ return 0;
+
+ num_timings = block->num_bytes / 20;
+ for (i = 0; i < num_timings; i++) {
+ struct displayid_detailed_timings_1 *timings = &det->timings[i];
+
+ newmode = drm_mode_displayid_detailed(connector->dev, timings);
+ if (!newmode)
+ continue;
+
+ drm_mode_probed_add(connector, newmode);
+ num_modes++;
+ }
+ return num_modes;
+}
+
+static int add_displayid_detailed_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ u8 *displayid;
+ int ret;
+ int idx = 1;
+ int length = EDID_LENGTH;
+ struct displayid_block *block;
+ int num_modes = 0;
+
+ displayid = drm_find_displayid_extension(edid);
+ if (!displayid)
+ return 0;
+
+ ret = validate_displayid(displayid, length, idx);
+ if (ret)
+ return 0;
+
+ idx += sizeof(struct displayid_hdr);
+ while (block = (struct displayid_block *)&displayid[idx],
+ idx + sizeof(struct displayid_block) <= length &&
+ idx + sizeof(struct displayid_block) +
+ block->num_bytes <= length &&
+ block->num_bytes > 0) {
+ idx += block->num_bytes + sizeof(struct displayid_block);
+ switch (block->tag) {
+ case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
+ num_modes += add_displayid_detailed_1_modes(connector,
+ block);
+ break;
+ }
+ }
+ return num_modes;
+}
+
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
@@ -3820,6 +4467,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
num_modes += add_established_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
num_modes += add_alternate_cea_modes(connector, edid);
+ num_modes += add_displayid_detailed_modes(connector, edid);
+ num_modes += add_YCbCr420VDB_modes(connector, edid);
if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
num_modes += add_inferred_modes(connector, edid);
@@ -3834,6 +4483,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
+ if (quirks & EDID_QUIRK_FORCE_10BPC)
+ connector->display_info.bpc = 10;
+
if (quirks & EDID_QUIRK_FORCE_12BPC)
connector->display_info.bpc = 12;
@@ -4029,96 +4681,105 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
+static int drm_parse_tiled_block(struct drm_connector *connector,
+ struct displayid_block *block)
+{
+ struct displayid_tiled_block *tile =
+ (struct displayid_tiled_block *)block;
+ u16 w, h;
+ u8 tile_v_loc, tile_h_loc;
+ u8 num_v_tile, num_h_tile;
+ struct drm_tile_group *tg;
+
+ w = tile->tile_size[0] | tile->tile_size[1] << 8;
+ h = tile->tile_size[2] | tile->tile_size[3] << 8;
+
+ num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
+ num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
+ tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
+ tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
+
+ connector->has_tile = true;
+ if (tile->tile_cap & 0x80)
+ connector->tile_is_single_monitor = true;
+
+ connector->num_h_tile = num_h_tile + 1;
+ connector->num_v_tile = num_v_tile + 1;
+ connector->tile_h_loc = tile_h_loc;
+ connector->tile_v_loc = tile_v_loc;
+ connector->tile_h_size = w + 1;
+ connector->tile_v_size = h + 1;
+
+ DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
+ DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
+ DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
+ num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
+ DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0],
+ tile->topology_id[1], tile->topology_id[2]);
+
+ tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
+ if (!tg)
+ tg = drm_mode_create_tile_group(connector->dev,
+ tile->topology_id);
+
+ if (!tg)
+ return -ENOMEM;
+
+ if (connector->tile_group != tg) {
+ /* if we haven't got a pointer,
+ * take the reference, drop ref to old tile group
+ */
+ if (connector->tile_group)
+ drm_mode_put_tile_group(connector->dev,
+ connector->tile_group);
+
+ connector->tile_group = tg;
+ } else
+ /* if same tile group, then release the ref we just took. */
+ drm_mode_put_tile_group(connector->dev, tg);
+ return 0;
+}
+
static int drm_parse_display_id(struct drm_connector *connector,
u8 *displayid, int length,
bool is_edid_extension)
{
/* if this is an EDID extension the first byte will be 0x70 */
int idx = 0;
- struct displayid_hdr *base;
struct displayid_block *block;
- u8 csum = 0;
- int i;
+ int ret;
if (is_edid_extension)
idx = 1;
- base = (struct displayid_hdr *)&displayid[idx];
-
- DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
- base->rev, base->bytes, base->prod_id, base->ext_count);
-
- if (base->bytes + 5 > length - idx)
- return -EINVAL;
-
- for (i = idx; i <= base->bytes + 5; i++) {
- csum += displayid[i];
- }
- if (csum) {
- DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
- return -EINVAL;
- }
+ ret = validate_displayid(displayid, length, idx);
+ if (ret)
+ return ret;
- block = (struct displayid_block *)&displayid[idx + 4];
- DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
- block->tag, block->rev, block->num_bytes);
-
- switch (block->tag) {
- case DATA_BLOCK_TILED_DISPLAY: {
- struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
-
- u16 w, h;
- u8 tile_v_loc, tile_h_loc;
- u8 num_v_tile, num_h_tile;
- struct drm_tile_group *tg;
-
- w = tile->tile_size[0] | tile->tile_size[1] << 8;
- h = tile->tile_size[2] | tile->tile_size[3] << 8;
-
- num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
- num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
- tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
- tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
-
- connector->has_tile = true;
- if (tile->tile_cap & 0x80)
- connector->tile_is_single_monitor = true;
-
- connector->num_h_tile = num_h_tile + 1;
- connector->num_v_tile = num_v_tile + 1;
- connector->tile_h_loc = tile_h_loc;
- connector->tile_v_loc = tile_v_loc;
- connector->tile_h_size = w + 1;
- connector->tile_v_size = h + 1;
-
- DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
- DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
- DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
- num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
- DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
-
- tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
- if (!tg) {
- tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
+ idx += sizeof(struct displayid_hdr);
+ while (block = (struct displayid_block *)&displayid[idx],
+ idx + sizeof(struct displayid_block) <= length &&
+ idx + sizeof(struct displayid_block) +
+ block->num_bytes <= length &&
+ block->num_bytes > 0) {
+ idx += block->num_bytes + sizeof(struct displayid_block);
+ DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n",
+ block->tag, block->rev, block->num_bytes);
+
+ switch (block->tag) {
+ case DATA_BLOCK_TILED_DISPLAY:
+ ret = drm_parse_tiled_block(connector, block);
+ if (ret)
+ return ret;
+ break;
+ case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
+ /* handled in mode gathering code. */
+ break;
+ default:
+ DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n",
+ block->tag);
+ break;
}
- if (!tg)
- return -ENOMEM;
-
- if (connector->tile_group != tg) {
- /* if we haven't got a pointer,
- take the reference, drop ref to old tile group */
- if (connector->tile_group) {
- drm_mode_put_tile_group(connector->dev, connector->tile_group);
- }
- connector->tile_group = tg;
- } else
- /* if same tile group, then release the ref we just took. */
- drm_mode_put_tile_group(connector->dev, tg);
- }
- break;
- default:
- printk("unknown displayid tag %d\n", block->tag);
- break;
}
return 0;
}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04de6fd88f8c..6e4dd62d4ed9 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -46,6 +46,8 @@
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/export.h>
+#include <linux/interval_tree_generic.h>
+#include <linux/rbtree.h>
/**
* DOC: Overview
@@ -73,7 +75,8 @@
* allocations and avoiding too much fragmentation. This means free space
* searches are O(num_holes). Given that all the fancy features drm_mm supports
* something better would be fairly complex and since gfx thrashing is a fairly
- * steep cliff not a real concern. Removing a node again is O(1).
+ * steep cliff not a real concern. Removing a node again is O(1). With the
+ * rbtree to track free holes, free hole search becomes O(log(num_holes)).
*
* drm_mm supports a few features: Alignment and range restrictions can be
* supplied. Further more every &drm_mm_node has a color value (which is just an
@@ -103,6 +106,98 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
u64 end,
enum drm_mm_search_flags flags);
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->start + (node)->size - 1)
+
+INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
+ u64, __subtree_last,
+ START, LAST, static inline, drm_mm_interval_tree)
+
+struct drm_mm_node *
+drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last)
+{
+ return drm_mm_interval_tree_iter_first(&mm->interval_tree,
+ start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_first);
+
+struct drm_mm_node *
+drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last)
+{
+ return drm_mm_interval_tree_iter_next(node, start, last);
+}
+EXPORT_SYMBOL(drm_mm_interval_next);
+
+static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node)
+{
+ struct drm_mm *mm = hole_node->mm;
+ struct rb_node **link, *rb;
+ struct drm_mm_node *parent;
+
+ node->__subtree_last = LAST(node);
+
+ if (hole_node->allocated) {
+ rb = &hole_node->rb;
+ while (rb) {
+ parent = rb_entry(rb, struct drm_mm_node, rb);
+ if (parent->__subtree_last >= node->__subtree_last)
+ break;
+
+ parent->__subtree_last = node->__subtree_last;
+ rb = rb_parent(rb);
+ }
+
+ rb = &hole_node->rb;
+ link = &hole_node->rb.rb_right;
+ } else {
+ rb = NULL;
+ link = &mm->interval_tree.rb_node;
+ }
+
+ while (*link) {
+ rb = *link;
+ parent = rb_entry(rb, struct drm_mm_node, rb);
+ if (parent->__subtree_last < node->__subtree_last)
+ parent->__subtree_last = node->__subtree_last;
+ if (node->start < parent->start)
+ link = &parent->rb.rb_left;
+ else
+ link = &parent->rb.rb_right;
+ }
+
+ rb_link_node(&node->rb, rb, link);
+ rb_insert_augmented(&node->rb,
+ &mm->interval_tree,
+ &drm_mm_interval_tree_augment);
+}
+
+static void
+rb_insert_hole_node(struct drm_mm_node *hole_node, struct drm_mm *mm)
+{
+ struct rb_node **new = &(mm->holes_tree.rb_node);
+ struct rb_node *parent = NULL;
+ struct drm_mm_node *cur;
+
+ while (*new) {
+ parent = *new;
+ cur = rb_entry(parent, struct drm_mm_node, hole_node);
+
+ if (__drm_mm_hole_node_start(hole_node)
+ < __drm_mm_hole_node_start(cur))
+ new = &parent->rb_left;
+ else
+ new = &parent->rb_right;
+ }
+ rb_link_node(&hole_node->hole_node, parent, new);
+ rb_insert_color(&hole_node->hole_node, &mm->holes_tree);
+}
+
+static void rb_erase_hole_node(struct drm_mm_node *hole_node, struct drm_mm *mm)
+{
+ rb_erase(&hole_node->hole_node, &mm->holes_tree);
+}
+
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
u64 size, unsigned alignment,
@@ -142,6 +237,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del(&hole_node->hole_stack);
+ rb_erase_hole_node(hole_node, mm);
}
node->start = adj_start;
@@ -150,14 +246,16 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
node->color = color;
node->allocated = 1;
- INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
+ drm_mm_interval_tree_add_node(hole_node, node);
+
BUG_ON(node->start + node->size > adj_end);
node->hole_follows = 0;
if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
+ rb_insert_hole_node(node, mm);
node->hole_follows = 1;
}
}
@@ -178,39 +276,54 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
*/
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
- struct drm_mm_node *hole;
u64 end = node->start + node->size;
- u64 hole_start;
- u64 hole_end;
+ struct drm_mm_node *hole;
+ u64 hole_start, hole_end;
- BUG_ON(node == NULL);
+ if (WARN_ON(node->size == 0))
+ return -EINVAL;
/* Find the relevant hole to add our node to */
- drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- if (hole_start > node->start || hole_end < end)
- continue;
+ hole = drm_mm_interval_tree_iter_first(&mm->interval_tree,
+ node->start, ~(u64)0);
+ if (hole) {
+ if (hole->start < end)
+ return -ENOSPC;
+ } else {
+ hole = list_entry(&mm->head_node.node_list,
+ typeof(*hole), node_list);
+ }
- node->mm = mm;
- node->allocated = 1;
+ hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
+ if (!hole->hole_follows)
+ return -ENOSPC;
- INIT_LIST_HEAD(&node->hole_stack);
- list_add(&node->node_list, &hole->node_list);
+ hole_start = __drm_mm_hole_node_start(hole);
+ hole_end = __drm_mm_hole_node_end(hole);
+ if (hole_start > node->start || hole_end < end)
+ return -ENOSPC;
- if (node->start == hole_start) {
- hole->hole_follows = 0;
- list_del_init(&hole->hole_stack);
- }
+ node->mm = mm;
+ node->allocated = 1;
- node->hole_follows = 0;
- if (end != hole_end) {
- list_add(&node->hole_stack, &mm->hole_stack);
- node->hole_follows = 1;
- }
+ list_add(&node->node_list, &hole->node_list);
- return 0;
+ drm_mm_interval_tree_add_node(hole, node);
+
+ if (node->start == hole_start) {
+ hole->hole_follows = 0;
+ list_del(&hole->hole_stack);
+ rb_erase_hole_node(hole, mm);
}
- return -ENOSPC;
+ node->hole_follows = 0;
+ if (end != hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ rb_insert_hole_node(node, mm);
+ node->hole_follows = 1;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(drm_mm_reserve_node);
@@ -237,6 +350,9 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
{
struct drm_mm_node *hole_node;
+ if (WARN_ON(size == 0))
+ return -EINVAL;
+
hole_node = drm_mm_search_free_generic(mm, size, alignment,
color, sflags);
if (!hole_node)
@@ -289,6 +405,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del(&hole_node->hole_stack);
+ rb_erase_hole_node(hole_node, mm);
}
node->start = adj_start;
@@ -297,9 +414,10 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
node->color = color;
node->allocated = 1;
- INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
+ drm_mm_interval_tree_add_node(hole_node, node);
+
BUG_ON(node->start < start);
BUG_ON(node->start < adj_start);
BUG_ON(node->start + node->size > adj_end);
@@ -308,6 +426,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
node->hole_follows = 0;
if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
+ rb_insert_hole_node(node, mm);
node->hole_follows = 1;
}
}
@@ -338,6 +457,9 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
{
struct drm_mm_node *hole_node;
+ if (WARN_ON(size == 0))
+ return -EINVAL;
+
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
start, end, sflags);
@@ -377,6 +499,7 @@ void drm_mm_remove_node(struct drm_mm_node *node)
BUG_ON(__drm_mm_hole_node_start(node) ==
__drm_mm_hole_node_end(node));
list_del(&node->hole_stack);
+ rb_erase_hole_node(node, mm);
} else
BUG_ON(__drm_mm_hole_node_start(node) !=
__drm_mm_hole_node_end(node));
@@ -385,9 +508,11 @@ void drm_mm_remove_node(struct drm_mm_node *node)
if (!prev_node->hole_follows) {
prev_node->hole_follows = 1;
list_add(&prev_node->hole_stack, &mm->hole_stack);
+ rb_insert_hole_node(prev_node, mm);
} else
list_move(&prev_node->hole_stack, &mm->hole_stack);
+ drm_mm_interval_tree_remove(node, &mm->interval_tree);
list_del(&node->node_list);
node->allocated = 0;
}
@@ -410,6 +535,46 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
return end >= start + size;
}
+static struct drm_mm_node *get_first_hole(const struct drm_mm *mm,
+ enum drm_mm_search_flags flags)
+{
+ if (flags & DRM_MM_SEARCH_BOTTOM_UP) {
+ struct rb_node *node = rb_first(&mm->holes_tree);
+
+ return rb_entry(node, struct drm_mm_node, hole_node);
+ } else if (flags & DRM_MM_SEARCH_BELOW) {
+ return list_entry((mm)->hole_stack.prev,
+ struct drm_mm_node, hole_stack);
+ } else {
+ return list_entry((mm)->hole_stack.next,
+ struct drm_mm_node, hole_stack);
+ }
+}
+
+static struct drm_mm_node *get_next_hole(struct drm_mm_node *entry,
+ enum drm_mm_search_flags flags)
+{
+ if (flags & DRM_MM_SEARCH_BOTTOM_UP) {
+ return rb_entry(rb_next(&entry->hole_node),
+ struct drm_mm_node, hole_node);
+ } else if (flags & DRM_MM_SEARCH_BELOW) {
+ return list_entry(entry->hole_stack.prev,
+ struct drm_mm_node, hole_stack);
+ } else {
+ return list_entry(entry->hole_stack.next,
+ struct drm_mm_node, hole_stack);
+ }
+}
+
+static bool drm_mm_hole_traversal_condition(const struct drm_mm *mm,
+ struct drm_mm_node *entry, enum drm_mm_search_flags flags)
+{
+ if (flags & DRM_MM_SEARCH_BOTTOM_UP)
+ return entry ? 1 : 0;
+ else
+ return (&entry->hole_stack != &(mm)->hole_stack) ? 1 : 0;
+}
+
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
u64 size,
unsigned alignment,
@@ -427,9 +592,14 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
- flags & DRM_MM_SEARCH_BELOW) {
- u64 hole_size = adj_end - adj_start;
+ for (entry = get_first_hole(mm, flags);
+ drm_mm_hole_traversal_condition(mm, entry, flags);
+ entry = get_next_hole(entry, flags)) {
+ u64 hole_size;
+
+ adj_start = drm_mm_hole_node_start(entry);
+ adj_end = drm_mm_hole_node_end(entry);
+ hole_size = adj_end - adj_start;
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
@@ -471,9 +641,14 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
best = NULL;
best_size = ~0UL;
- __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
- flags & DRM_MM_SEARCH_BELOW) {
- u64 hole_size = adj_end - adj_start;
+ for (entry = get_first_hole(mm, flags);
+ drm_mm_hole_traversal_condition(mm, entry, flags);
+ entry = get_next_hole(entry, flags)) {
+ u64 hole_size;
+
+ adj_start = drm_mm_hole_node_start(entry);
+ adj_end = drm_mm_hole_node_end(entry);
+ hole_size = adj_end - adj_start;
if (adj_start < start)
adj_start = start;
@@ -514,14 +689,21 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
list_replace(&old->node_list, &new->node_list);
list_replace(&old->hole_stack, &new->hole_stack);
+ rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree);
new->hole_follows = old->hole_follows;
new->mm = old->mm;
new->start = old->start;
new->size = old->size;
new->color = old->color;
+ new->__subtree_last = old->__subtree_last;
old->allocated = 0;
new->allocated = 1;
+
+ if (old->hole_follows)
+ rb_replace_node(&old->hole_node, &new->hole_node,
+ &old->mm->holes_tree);
+
}
EXPORT_SYMBOL(drm_mm_replace_node);
@@ -746,7 +928,6 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
- INIT_LIST_HEAD(&mm->head_node.hole_stack);
mm->head_node.hole_follows = 1;
mm->head_node.scanned_block = 0;
mm->head_node.scanned_prev_free = 0;
@@ -756,7 +937,10 @@ void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
mm->head_node.size = start - mm->head_node.start;
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+ mm->interval_tree = RB_ROOT;
mm->color_adjust = NULL;
+ mm->holes_tree = RB_ROOT;
+ rb_insert_hole_node(&mm->head_node, mm);
}
EXPORT_SYMBOL(drm_mm_init);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fb9f647bb5cd..5044f2257e89 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1159,7 +1159,7 @@ struct intel_gen6_power_mgmt {
struct intel_rps_client semaphores, mmioflips;
/* manual wa residency calculations */
- struct intel_rps_ei up_ei, down_ei;
+ struct intel_rps_ei ei;
/*
* Protects RPS/RC6 register access and PCU communication.
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0f42a2782afc..b7b0a38acd67 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -994,68 +994,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
- const struct intel_rps_ei *old,
- const struct intel_rps_ei *now,
- int threshold)
-{
- u64 time, c0;
- unsigned int mul = 100;
-
- if (old->cz_clock == 0)
- return false;
-
- if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
- mul <<= 8;
-
- time = now->cz_clock - old->cz_clock;
- time *= threshold * dev_priv->czclk_freq;
-
- /* Workload can be split between render + media, e.g. SwapBuffers
- * being blitted in X after being rendered in mesa. To account for
- * this we need to combine both engines into our activity counter.
- */
- c0 = now->render_c0 - old->render_c0;
- c0 += now->media_c0 - old->media_c0;
- c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
-
- return c0 >= time;
-}
-
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
- dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+ memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
}
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ const struct intel_rps_ei *prev = &dev_priv->rps.ei;
struct intel_rps_ei now;
u32 events = 0;
- if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
return 0;
vlv_c0_read(dev_priv, &now);
if (now.cz_clock == 0)
return 0;
- if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
- if (!vlv_c0_above(dev_priv,
- &dev_priv->rps.down_ei, &now,
- dev_priv->rps.down_threshold))
- events |= GEN6_PM_RP_DOWN_THRESHOLD;
- dev_priv->rps.down_ei = now;
- }
+ if (prev->cz_clock) {
+ u64 time, c0;
+ unsigned int mul;
- if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
- if (vlv_c0_above(dev_priv,
- &dev_priv->rps.up_ei, &now,
- dev_priv->rps.up_threshold))
- events |= GEN6_PM_RP_UP_THRESHOLD;
- dev_priv->rps.up_ei = now;
+ mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
+ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+ mul <<= 8;
+
+ time = now.cz_clock - prev->cz_clock;
+ time *= dev_priv->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ c0 = now.render_c0 - prev->render_c0;
+ c0 += now.media_c0 - prev->media_c0;
+ c0 *= mul;
+
+ if (c0 > time * dev_priv->rps.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * dev_priv->rps.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
}
+ dev_priv->rps.ei = now;
return events;
}
@@ -4390,7 +4373,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e7c18519274a..fd4690ed93c0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4376,6 +4376,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
break;
}
+ /* When byt can survive without system hang with dynamic
+ * sw freq adjustments, this restriction can be lifted.
+ */
+ if (IS_VALLEYVIEW(dev_priv))
+ goto skip_hw_write;
+
I915_WRITE(GEN6_RP_UP_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4394,6 +4400,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
+skip_hw_write:
dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up;
dev_priv->rps.down_threshold = threshold_down;
@@ -4404,8 +4411,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
{
u32 mask = 0;
+ /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
if (val > dev_priv->rps.min_freq_softlimit)
- mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
@@ -4509,7 +4517,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+ if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 5838545468f8..dbc198b00792 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -12,6 +12,8 @@ config DRM_MSM
select QCOM_SCM
select BACKLIGHT_CLASS_DEVICE
select MSM_EXT_DISPLAY
+ select MMU_NOTIFIER
+ select INTERVAL_TREE
default y
help
DRM/KMS driver for MSM/snapdragon.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index d7e56f57c78e..f3a8a8416c7a 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -50,6 +50,7 @@ msm_drm-y := \
sde_dbg_evtlog.o \
sde_io_util.o \
dba_bridge.o \
+ sde_edid_parser.o
# use drm gpu driver only if qcom_kgsl driver not available
ifneq ($(CONFIG_QCOM_KGSL),y)
@@ -103,7 +104,6 @@ msm_drm-$(CONFIG_DRM_SDE_HDMI) += \
hdmi-staging/sde_hdmi.o \
hdmi-staging/sde_hdmi_bridge.o \
hdmi-staging/sde_hdmi_audio.o \
- hdmi-staging/sde_hdmi_edid.o
msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
dsi/pll/dsi_pll_28nm.o
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index c4f886fd6037..a417e42944fc 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -466,6 +466,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_gpu_config a3xx_config = { 0 };
int ret;
if (!pdev) {
@@ -491,7 +492,13 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ a3xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+ a3xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+ a3xx_config.nr_rings = 1;
+ a3xx_config.va_start = 0x300000;
+ a3xx_config.va_end = 0xffffffff;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a3xx_config);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 534a7c3fbdca..069823f054f7 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -543,6 +543,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_gpu_config a4xx_config = { 0 };
int ret;
if (!pdev) {
@@ -568,7 +569,13 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ a4xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+ a4xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+ a4xx_config.nr_rings = 1;
+ a4xx_config.va_start = 0x300000;
+ a4xx_config.va_end = 0xffffffff;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a4xx_config);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 9dc413ea949e..8a136fef86f1 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,6 +15,9 @@
#include "msm_iommu.h"
#include "a5xx_gpu.h"
+#define SECURE_VA_START 0xc0000000
+#define SECURE_VA_SIZE SZ_256M
+
static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -100,7 +103,7 @@ static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
OUT_RING(ring, 1);
}
-static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
@@ -133,10 +136,36 @@ static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
OUT_RING(ring, 0x02);
+ /* Turn on secure mode if the submission is secure */
+ if (submit->secure) {
+ OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+ OUT_RING(ring, 1);
+ }
+
+ /* Record the always on counter before command execution */
+ if (submit->profile_buf_iova) {
+ uint64_t gpuaddr = submit->profile_buf_iova +
+ offsetof(struct drm_msm_gem_submit_profile_buffer,
+ ticks_submitted);
+
+ /*
+ * Set bit[30] to make this command a 64 bit write operation.
+ * bits[18-29] is to specify number of consecutive registers
+ * to copy, so set this space with 2, since we want to copy
+ * data from REG_A5XX_RBBM_ALWAYSON_COUNTER_LO and [HI].
+ */
+ OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+ OUT_RING(ring, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO |
+ (1 << 30) | (2 << 18));
+ OUT_RING(ring, lower_32_bits(gpuaddr));
+ OUT_RING(ring, upper_32_bits(gpuaddr));
+ }
+
/* Submit the commands */
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ case MSM_SUBMIT_CMD_PROFILE_BUF:
break;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
@@ -164,6 +193,19 @@ static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
OUT_RING(ring, 0x01);
+ /* Record the always on counter after command execution */
+ if (submit->profile_buf_iova) {
+ uint64_t gpuaddr = submit->profile_buf_iova +
+ offsetof(struct drm_msm_gem_submit_profile_buffer,
+ ticks_retired);
+
+ OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+ OUT_RING(ring, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO |
+ (1 << 30) | (2 << 18));
+ OUT_RING(ring, lower_32_bits(gpuaddr));
+ OUT_RING(ring, upper_32_bits(gpuaddr));
+ }
+
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->fence);
@@ -179,6 +221,11 @@ static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
OUT_RING(ring, submit->fence);
+ if (submit->secure) {
+ OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+ OUT_RING(ring, 0);
+ }
+
/* Yield the floor on command completion */
OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
/*
@@ -193,12 +240,37 @@ static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* Set bit 0 to trigger an interrupt on preempt complete */
OUT_RING(ring, 0x01);
+ if (submit->profile_buf_iova) {
+ unsigned long flags;
+ uint64_t ktime;
+ struct drm_msm_gem_submit_profile_buffer *profile_buf =
+ submit->profile_buf_vaddr;
+
+ /*
+ * With this profiling, we are trying to create closest
+ * possible mapping between the CPU time domain(monotonic clock)
+ * and the GPU time domain(ticks). In order to make this
+ * happen, we need to briefly turn off interrupts to make sure
+ * interrupts do not run between collecting these two samples.
+ */
+ local_irq_save(flags);
+
+ profile_buf->ticks_queued = gpu_read64(gpu,
+ REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+ REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
+
+ ktime = ktime_get_raw_ns();
+
+ local_irq_restore(flags);
+
+ profile_buf->queue_time = ktime;
+ profile_buf->submit_time = ktime;
+ }
+
a5xx_flush(gpu, ring);
/* Check to see if we need to start preemption */
a5xx_preempt_trigger(gpu);
-
- return 0;
}
static const struct {
@@ -409,10 +481,8 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
struct drm_gem_object *bo;
void *ptr;
- mutex_lock(&drm->struct_mutex);
bo = msm_gem_new(drm, fw->size - 4,
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
- mutex_unlock(&drm->struct_mutex);
if (IS_ERR(bo))
return bo;
@@ -700,14 +770,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
ADRENO_PROTECT_RW(0x10000, 0x8000));
gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
- /*
- * Disable the trusted memory range - we don't actually supported secure
- * memory rendering at this point in time and we don't want to block off
- * part of the virtual memory space.
- */
+
gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
- REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
- gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+ REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, SECURE_VA_START);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, SECURE_VA_SIZE);
/* Put the GPU into 64 bit by default */
gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
@@ -1039,8 +1105,10 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
a5xx_gpmu_err_irq(gpu);
- if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
+ a5xx_preempt_trigger(gpu);
msm_gpu_retire(gpu);
+ }
if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
a5xx_preempt_irq(gpu);
@@ -1304,6 +1372,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
struct a5xx_gpu *a5xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
+ struct msm_gpu_config a5xx_config = { 0 };
int ret;
if (!pdev) {
@@ -1327,7 +1396,23 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
/* Check the efuses for some configuration */
a5xx_efuses_read(pdev, adreno_gpu);
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+ a5xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+ a5xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+
+ /* Set the number of rings to 4 - yay preemption */
+ a5xx_config.nr_rings = 4;
+
+ /*
+ * Set the user domain range to fall into the TTBR1 region for global
+ * objects
+ */
+ a5xx_config.va_start = 0xfffffff000000000ULL;
+ a5xx_config.va_end = 0xffffffffffffffffULL;
+
+ a5xx_config.secure_va_start = SECURE_VA_START;
+ a5xx_config.secure_va_end = SECURE_VA_START + SECURE_VA_SIZE - 1;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a5xx_config);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index e04feaadefb9..0025922540df 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -458,10 +458,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
*/
bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
- mutex_lock(&drm->struct_mutex);
a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize,
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
- mutex_unlock(&drm->struct_mutex);
if (IS_ERR(a5xx_gpu->gpmu_bo))
goto err;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 648494c75abc..57046089434c 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -24,9 +24,7 @@ static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu,
void *ptr;
int ret;
- mutex_lock(&drm->struct_mutex);
_bo = msm_gem_new(drm, size, flags);
- mutex_unlock(&drm->struct_mutex);
if (IS_ERR(_bo))
return _bo;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
index 5a2edb0ea518..c2773cb325d5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
@@ -217,18 +217,19 @@ static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump)
struct drm_device *drm = gpu->dev;
int ret = -ENOMEM;
- crashdump->bo = msm_gem_new(drm, CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED);
+ crashdump->bo = msm_gem_new_locked(drm, CRASHDUMP_BO_SIZE,
+ MSM_BO_UNCACHED);
if (IS_ERR(crashdump->bo)) {
ret = PTR_ERR(crashdump->bo);
crashdump->bo = NULL;
return ret;
}
- crashdump->ptr = msm_gem_vaddr_locked(crashdump->bo);
+ crashdump->ptr = msm_gem_vaddr(crashdump->bo);
if (!crashdump->ptr)
goto out;
- ret = msm_gem_get_iova_locked(crashdump->bo, gpu->aspace,
+ ret = msm_gem_get_iova(crashdump->bo, gpu->aspace,
&crashdump->iova);
out:
@@ -733,6 +734,35 @@ static void a5xx_snapshot_indexed_registers(struct msm_gpu *gpu,
}
}
+static void a5xx_snapshot_preemption(struct msm_gpu *gpu, struct msm_snapshot
+ *snapshot)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_snapshot_gpu_object header = {
+ .type = SNAPSHOT_GPU_OBJECT_GLOBAL,
+ .size = A5XX_PREEMPT_RECORD_SIZE >> 2,
+ .pt_base = 0,
+ };
+ int index;
+
+ if (gpu->nr_rings <= 1)
+ return;
+
+ for (index = 0; index < gpu->nr_rings; index++) {
+
+ header.gpuaddr = a5xx_gpu->preempt_iova[index];
+
+ if (!SNAPSHOT_HEADER(snapshot, header,
+ SNAPSHOT_SECTION_GPU_OBJECT_V2,
+ A5XX_PREEMPT_RECORD_SIZE >> 2))
+ return;
+
+ SNAPSHOT_MEMCPY(snapshot, a5xx_gpu->preempt[index],
+ A5XX_PREEMPT_RECORD_SIZE);
+ }
+}
+
int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
{
struct crashdump crashdump = { 0 };
@@ -787,6 +817,9 @@ int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
/* CP MERCIU */
a5xx_snapshot_cp_merciu(gpu, snapshot);
+ /* Preemption records*/
+ a5xx_snapshot_preemption(gpu, snapshot);
+
crashdump_destroy(gpu, &crashdump);
snapshot->priv = NULL;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 969ed810ce9d..81fa37ee9671 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -172,17 +172,18 @@ void adreno_recover(struct msm_gpu *gpu)
enable_irq(gpu->irq);
}
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct msm_ringbuffer *ring = gpu->rb[submit->ring];
- unsigned i, ibs = 0;
+ unsigned i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
/* ignore IB-targets */
break;
+ case MSM_SUBMIT_CMD_PROFILE_BUF:
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
break;
case MSM_SUBMIT_CMD_BUF:
@@ -190,18 +191,11 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
- ibs++;
+ OUT_PKT2(ring);
break;
}
}
- /* on a320, at least, we seem to need to pad things out to an
- * even number of qwords to avoid issue w/ CP hanging on wrap-
- * around:
- */
- if (ibs % 2)
- OUT_PKT2(ring);
-
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence);
@@ -247,8 +241,6 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
#endif
gpu->funcs->flush(gpu, ring);
-
- return 0;
}
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
@@ -404,10 +396,6 @@ void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
ring->gpu->name, ring->id);
}
-static const char *iommu_ports[] = {
- "gfx3d_user",
-};
-
/* Read the set of powerlevels */
static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
{
@@ -523,10 +511,10 @@ static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu,
- const struct adreno_gpu_funcs *funcs, int nr_rings)
+ const struct adreno_gpu_funcs *funcs,
+ struct msm_gpu_config *gpu_config)
{
struct adreno_platform_config *config = pdev->dev.platform_data;
- struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
struct msm_mmu *mmu;
int ret;
@@ -540,26 +528,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
/* Get the rest of the target configuration from the device tree */
adreno_of_parse(pdev, gpu);
- adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
- adreno_gpu_config.irqname = "kgsl_3d0_irq";
- adreno_gpu_config.nr_rings = nr_rings;
-
- adreno_gpu_config.va_start = SZ_16M;
- adreno_gpu_config.va_end = 0xffffffff;
-
- if (adreno_gpu->revn >= 500) {
- /* 5XX targets use a 64 bit region */
- adreno_gpu_config.va_start = 0x800000000;
- adreno_gpu_config.va_end = 0x8ffffffff;
- } else {
- adreno_gpu_config.va_start = 0x300000;
- adreno_gpu_config.va_end = 0xffffffff;
- }
-
- adreno_gpu_config.nr_rings = nr_rings;
-
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
- adreno_gpu->info->name, &adreno_gpu_config);
+ adreno_gpu->info->name, gpu_config);
if (ret)
return ret;
@@ -579,16 +549,22 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
mmu = gpu->aspace->mmu;
if (mmu) {
- ret = mmu->funcs->attach(mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = mmu->funcs->attach(mmu, NULL, 0);
if (ret)
return ret;
}
- mutex_lock(&drm->struct_mutex);
+ if (gpu->secure_aspace) {
+ mmu = gpu->secure_aspace->mmu;
+ if (mmu) {
+ ret = mmu->funcs->attach(mmu, NULL, 0);
+ if (ret)
+ return ret;
+ }
+ }
+
adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
MSM_BO_UNCACHED);
- mutex_unlock(&drm->struct_mutex);
if (IS_ERR(adreno_gpu->memptrs_bo)) {
ret = PTR_ERR(adreno_gpu->memptrs_bo);
adreno_gpu->memptrs_bo = NULL;
@@ -630,6 +606,12 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
+
+ if (gpu->base.secure_aspace) {
+ aspace = gpu->base.secure_aspace;
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
+ }
}
static void adreno_snapshot_os(struct msm_gpu *gpu,
@@ -721,7 +703,7 @@ static struct adreno_counter_group *get_counter_group(struct msm_gpu *gpu,
return ERR_PTR(-ENODEV);
if (groupid >= adreno_gpu->nr_counter_groups)
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENODEV);
return (struct adreno_counter_group *)
adreno_gpu->counter_groups[groupid];
@@ -744,7 +726,7 @@ u64 adreno_read_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
struct adreno_counter_group *group =
get_counter_group(gpu, groupid);
- if (!IS_ERR(group) && group->funcs.read)
+ if (!IS_ERR_OR_NULL(group) && group->funcs.read)
return group->funcs.read(gpu, group, counterid);
return 0;
@@ -755,6 +737,6 @@ void adreno_put_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
struct adreno_counter_group *group =
get_counter_group(gpu, groupid);
- if (!IS_ERR(group) && group->funcs.put)
+ if (!IS_ERR_OR_NULL(group) && group->funcs.put)
group->funcs.put(gpu, group, counterid);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 8e8f3e5182d6..9e622fa06ce4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -244,7 +244,7 @@ uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
struct msm_ringbuffer *ring);
void adreno_recover(struct msm_gpu *gpu);
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
#ifdef CONFIG_DEBUG_FS
@@ -257,7 +257,7 @@ struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
- int nr_rings);
+ struct msm_gpu_config *config);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
diff --git a/drivers/gpu/drm/msm/dba_bridge.c b/drivers/gpu/drm/msm/dba_bridge.c
index 14547837782b..f933a7f3dcfb 100644
--- a/drivers/gpu/drm/msm/dba_bridge.c
+++ b/drivers/gpu/drm/msm/dba_bridge.c
@@ -275,7 +275,7 @@ struct drm_bridge *dba_bridge_init(struct drm_device *dev,
struct msm_drm_private *priv = NULL;
if (!dev || !encoder || !data) {
- SDE_ERROR("dev=%p or encoder=%p or data=%p is NULL\n",
+ SDE_ERROR("dev=%pK or encoder=%pK or data=%pK is NULL\n",
dev, encoder, data);
rc = -EINVAL;
goto error;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
index d9fcec60693d..91501a2efd20 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -198,12 +198,12 @@ enum dsi_video_traffic_mode {
* @h_sync_width: HSYNC width in pixels.
* @h_front_porch: Horizontal fron porch in pixels.
* @h_skew:
- * @h_sync_polarity: Polarity of HSYNC (false is active low).
+ * @h_sync_polarity: Polarity of HSYNC (false is active high).
* @v_active: Active height of one frame in lines.
* @v_back_porch: Vertical back porch in lines.
* @v_sync_width: VSYNC width in lines.
* @v_front_porch: Vertical front porch in lines.
- * @v_sync_polarity: Polarity of VSYNC (false is active low).
+ * @v_sync_polarity: Polarity of VSYNC (false is active high).
* @refresh_rate: Refresh rate in Hz.
*/
struct dsi_mode_info {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 995cda97a2f0..c34713a13332 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -56,6 +56,10 @@ static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode,
dsi_mode->flags |= DSI_MODE_FLAG_DFPS;
if (msm_needs_vblank_pre_modeset(drm_mode))
dsi_mode->flags |= DSI_MODE_FLAG_VBLANK_PRE_MODESET;
+ dsi_mode->timing.h_sync_polarity =
+ (drm_mode->flags & DRM_MODE_FLAG_PHSYNC) ? false : true;
+ dsi_mode->timing.v_sync_polarity =
+ (drm_mode->flags & DRM_MODE_FLAG_PVSYNC) ? false : true;
}
static void convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
@@ -87,6 +91,10 @@ static void convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS;
if (dsi_mode->flags & DSI_MODE_FLAG_VBLANK_PRE_MODESET)
drm_mode->private_flags |= MSM_MODE_FLAG_VBLANK_PRE_MODESET;
+ drm_mode->flags |= (dsi_mode->timing.h_sync_polarity) ?
+ DRM_MODE_FLAG_NHSYNC : DRM_MODE_FLAG_PHSYNC;
+ drm_mode->flags |= (dsi_mode->timing.v_sync_polarity) ?
+ DRM_MODE_FLAG_NVSYNC : DRM_MODE_FLAG_PVSYNC;
drm_mode_set_name(drm_mode);
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 5556e851c0b4..b1319a68429f 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -1522,7 +1522,7 @@ static int dsi_panel_parse_dba_config(struct dsi_panel *panel,
"qcom,bridge-name", &len);
if (!panel->dba_config.bridge_name || len <= 0) {
SDE_ERROR(
- "%s:%d Unable to read bridge_name, data=%p,len=%d\n",
+ "%s:%d Unable to read bridge_name, data=%pK,len=%d\n",
__func__, __LINE__, panel->dba_config.bridge_name, len);
rc = -EINVAL;
goto error;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4580a6e3c877..e2b8deda46c2 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -838,22 +838,19 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
int ret;
u64 iova;
- mutex_lock(&dev->struct_mutex);
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
if (IS_ERR(msm_host->tx_gem_obj)) {
ret = PTR_ERR(msm_host->tx_gem_obj);
pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
msm_host->tx_gem_obj = NULL;
- mutex_unlock(&dev->struct_mutex);
return ret;
}
- ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
+ ret = msm_gem_get_iova(msm_host->tx_gem_obj, NULL, &iova);
if (ret) {
pr_err("%s: failed to get iova, %d\n", __func__, ret);
return ret;
}
- mutex_unlock(&dev->struct_mutex);
if (iova & 0x07) {
pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index 347b78886b24..437f88f29a69 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -23,6 +23,7 @@
#include <linux/gpio.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/irqdomain.h>
#include "sde_kms.h"
#include "sde_connector.h"
@@ -32,6 +33,25 @@
static DEFINE_MUTEX(sde_hdmi_list_lock);
static LIST_HEAD(sde_hdmi_list);
+/* HDMI SCDC register offsets */
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_UPDATE_1 0x11
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS_0 0x40
+#define HDMI_SCDC_STATUS_FLAGS_1 0x41
+#define HDMI_SCDC_ERR_DET_0_L 0x50
+#define HDMI_SCDC_ERR_DET_0_H 0x51
+#define HDMI_SCDC_ERR_DET_1_L 0x52
+#define HDMI_SCDC_ERR_DET_1_H 0x53
+#define HDMI_SCDC_ERR_DET_2_L 0x54
+#define HDMI_SCDC_ERR_DET_2_H 0x55
+#define HDMI_SCDC_ERR_DET_CHECKSUM 0x56
+
+#define HDMI_DISPLAY_MAX_WIDTH 4096
+#define HDMI_DISPLAY_MAX_HEIGHT 2160
+
static const struct of_device_id sde_hdmi_dt_match[] = {
{.compatible = "qcom,hdmi-display"},
{}
@@ -69,16 +89,427 @@ static ssize_t _sde_hdmi_debugfs_dump_info_read(struct file *file,
return len;
}
+static ssize_t _sde_hdmi_debugfs_edid_modes_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+ struct drm_connector *connector;
+ u32 mode_count = 0;
+ struct drm_display_mode *mode;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ mode_count++;
+ }
+
+ /* Adding one more to store title */
+ mode_count++;
+
+ buf = kzalloc((mode_count * sizeof(*mode)), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "name refresh (Hz) hdisp hss hse htot vdisp");
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " vss vse vtot flags\n");
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ len += snprintf(buf + len, SZ_4K - len,
+ "%s %d %d %d %d %d %d %d %d %d 0x%x\n",
+ mode->name, mode->vrefresh, mode->hdisplay,
+ mode->hsync_start, mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->flags);
+ }
+
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ kfree(buf);
+ return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_vsdb_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[200];
+ u32 len = 0;
+ struct drm_connector *connector;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "max_tmds_clock = %d\n",
+ connector->max_tmds_clock);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "latency_present %d %d\n",
+ connector->latency_present[0],
+ connector->latency_present[1]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "video_latency %d %d\n",
+ connector->video_latency[0],
+ connector->video_latency[1]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "audio_latency %d %d\n",
+ connector->audio_latency[0],
+ connector->audio_latency[1]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "dvi_dual %d\n",
+ (int)connector->dvi_dual);
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_hdr_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[200];
+ u32 len = 0;
+ struct drm_connector *connector;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+ len += snprintf(buf, sizeof(buf), "hdr_eotf = %d\n"
+ "hdr_metadata_type_one %d\n"
+ "hdr_max_luminance %d\n"
+ "hdr_avg_luminance %d\n"
+ "hdr_min_luminance %d\n"
+ "hdr_supported %d\n",
+ connector->hdr_eotf,
+ connector->hdr_metadata_type_one,
+ connector->hdr_max_luminance,
+ connector->hdr_avg_luminance,
+ connector->hdr_min_luminance,
+ (int)connector->hdr_supported);
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_hfvsdb_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[200];
+ u32 len = 0;
+ struct drm_connector *connector;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+ len += snprintf(buf, PAGE_SIZE - len, "max_tmds_char = %d\n"
+ "scdc_present %d\n"
+ "rr_capable %d\n"
+ "supports_scramble %d\n"
+ "flags_3d %d\n",
+ connector->max_tmds_char,
+ (int)connector->scdc_present,
+ (int)connector->rr_capable,
+ (int)connector->supports_scramble,
+ connector->flags_3d);
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_vcdb_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[100];
+ u32 len = 0;
+ struct drm_connector *connector;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+ len += snprintf(buf, PAGE_SIZE - len, "pt_scan_info = %d\n"
+ "it_scan_info = %d\n"
+ "ce_scan_info = %d\n",
+ (int)connector->pt_scan_info,
+ (int)connector->it_scan_info,
+ (int)connector->ce_scan_info);
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
+
+static ssize_t _sde_hdmi_edid_vendor_name_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[100];
+ u32 len = 0;
+ struct drm_connector *connector;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+ len += snprintf(buf, PAGE_SIZE - len, "Vendor ID is %s\n",
+ display->edid_ctrl->vendor_id);
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
static const struct file_operations dump_info_fops = {
.open = simple_open,
.read = _sde_hdmi_debugfs_dump_info_read,
};
+static const struct file_operations edid_modes_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_edid_modes_read,
+};
+
+static const struct file_operations edid_vsdb_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_edid_vsdb_info_read,
+};
+
+static const struct file_operations edid_hdr_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_edid_hdr_info_read,
+};
+
+static const struct file_operations edid_hfvsdb_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_edid_hfvsdb_info_read,
+};
+
+static const struct file_operations edid_vcdb_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_edid_vcdb_info_read,
+};
+
+static const struct file_operations edid_vendor_name_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_edid_vendor_name_read,
+};
+
+static u64 _sde_hdmi_clip_valid_pclk(struct drm_display_mode *mode, u64 pclk_in)
+{
+ u32 pclk_delta, pclk;
+ u64 pclk_clip = pclk_in;
+
+ /* as per standard, 0.5% of deviation is allowed */
+ pclk = mode->clock * HDMI_KHZ_TO_HZ;
+ pclk_delta = pclk * 5 / 1000;
+
+ if (pclk_in < (pclk - pclk_delta))
+ pclk_clip = pclk - pclk_delta;
+ else if (pclk_in > (pclk + pclk_delta))
+ pclk_clip = pclk + pclk_delta;
+
+ if (pclk_in != pclk_clip)
+ pr_warn("clip pclk from %lld to %lld\n", pclk_in, pclk_clip);
+
+ return pclk_clip;
+}
+
+/**
+ * _sde_hdmi_update_pll_delta() - Update the HDMI pixel clock as per input ppm
+ *
+ * @ppm: ppm is parts per million multiplied by 1000.
+ * return: 0 on success, non-zero in case of failure.
+ *
+ * The input ppm will be clipped if it's more than or less than 5% of the TMDS
+ * clock rate defined by HDMI spec.
+ */
+static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm)
+{
+ struct hdmi *hdmi = display->ctrl.ctrl;
+ struct drm_display_mode *current_mode = &display->mode;
+ u64 cur_pclk, dst_pclk;
+ u64 clip_pclk;
+ int rc = 0;
+
+ if (!hdmi->power_on || !display->connected) {
+ SDE_ERROR("HDMI display is not ready\n");
+ return -EINVAL;
+ }
+
+ /* get current pclk */
+ cur_pclk = hdmi->pixclock;
+ /* get desired pclk */
+ dst_pclk = cur_pclk * (1000000000 + ppm);
+ do_div(dst_pclk, 1000000000);
+
+ clip_pclk = _sde_hdmi_clip_valid_pclk(current_mode, dst_pclk);
+
+ /* update pclk */
+ if (clip_pclk != cur_pclk) {
+ SDE_DEBUG("PCLK changes from %llu to %llu when delta is %d\n",
+ cur_pclk, clip_pclk, ppm);
+
+ rc = clk_set_rate(hdmi->pwr_clks[0], clip_pclk);
+ if (rc < 0) {
+ SDE_ERROR("PLL update failed, reset clock rate\n");
+ return rc;
+ }
+
+ hdmi->pixclock = clip_pclk;
+ }
+
+ return rc;
+}
+
+static ssize_t _sde_hdmi_debugfs_pll_delta_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[10];
+ int ppm = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &ppm))
+ return -EFAULT;
+
+ if (ppm)
+ _sde_hdmi_update_pll_delta(display, ppm);
+
+ return count;
+}
+
+static const struct file_operations pll_delta_fops = {
+ .open = simple_open,
+ .write = _sde_hdmi_debugfs_pll_delta_write,
+};
+
static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
{
int rc = 0;
- struct dentry *dir, *dump_file;
+ struct dentry *dir, *dump_file, *edid_modes;
+ struct dentry *edid_vsdb_info, *edid_hdr_info, *edid_hfvsdb_info;
+ struct dentry *edid_vcdb_info, *edid_vendor_name, *pll_file;
dir = debugfs_create_dir(display->name, NULL);
if (!dir) {
@@ -95,6 +526,95 @@ static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
&dump_info_fops);
if (IS_ERR_OR_NULL(dump_file)) {
rc = PTR_ERR(dump_file);
+ SDE_ERROR("[%s]debugfs create dump_info file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ pll_file = debugfs_create_file("pll_delta",
+ 0644,
+ dir,
+ display,
+ &pll_delta_fops);
+ if (IS_ERR_OR_NULL(pll_file)) {
+ rc = PTR_ERR(pll_file);
+ SDE_ERROR("[%s]debugfs create pll_delta file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_modes = debugfs_create_file("edid_modes",
+ 0444,
+ dir,
+ display,
+ &edid_modes_fops);
+
+ if (IS_ERR_OR_NULL(edid_modes)) {
+ rc = PTR_ERR(edid_modes);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_vsdb_info = debugfs_create_file("edid_vsdb_info",
+ 0444,
+ dir,
+ display,
+ &edid_vsdb_info_fops);
+
+ if (IS_ERR_OR_NULL(edid_vsdb_info)) {
+ rc = PTR_ERR(edid_vsdb_info);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_hdr_info = debugfs_create_file("edid_hdr_info",
+ 0444,
+ dir,
+ display,
+ &edid_hdr_info_fops);
+ if (IS_ERR_OR_NULL(edid_hdr_info)) {
+ rc = PTR_ERR(edid_hdr_info);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_hfvsdb_info = debugfs_create_file("edid_hfvsdb_info",
+ 0444,
+ dir,
+ display,
+ &edid_hfvsdb_info_fops);
+
+ if (IS_ERR_OR_NULL(edid_hfvsdb_info)) {
+ rc = PTR_ERR(edid_hfvsdb_info);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_vcdb_info = debugfs_create_file("edid_vcdb_info",
+ 0444,
+ dir,
+ display,
+ &edid_vcdb_info_fops);
+
+ if (IS_ERR_OR_NULL(edid_vcdb_info)) {
+ rc = PTR_ERR(edid_vcdb_info);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_vendor_name = debugfs_create_file("edid_vendor_name",
+ 0444,
+ dir,
+ display,
+ &edid_vendor_name_fops);
+
+ if (IS_ERR_OR_NULL(edid_vendor_name)) {
+ rc = PTR_ERR(edid_vendor_name);
SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
display->name, rc);
goto error_remove_dir;
@@ -390,28 +910,47 @@ static void _sde_hdmi_hdp_disable(struct sde_hdmi *sde_hdmi)
}
}
+static void _sde_hdmi_cec_update_phys_addr(struct sde_hdmi *display)
+{
+ struct edid *edid = display->edid_ctrl->edid;
+
+ if (edid)
+ cec_notifier_set_phys_addr_from_edid(display->notifier, edid);
+ else
+ cec_notifier_set_phys_addr(display->notifier,
+ CEC_PHYS_ADDR_INVALID);
+}
+
static void _sde_hdmi_hotplug_work(struct work_struct *work)
{
struct sde_hdmi *sde_hdmi =
container_of(work, struct sde_hdmi, hpd_work);
struct drm_connector *connector;
+ struct hdmi *hdmi = NULL;
+ u32 hdmi_ctrl;
if (!sde_hdmi || !sde_hdmi->ctrl.ctrl ||
- !sde_hdmi->ctrl.ctrl->connector) {
+ !sde_hdmi->ctrl.ctrl->connector ||
+ !sde_hdmi->edid_ctrl) {
SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
sde_hdmi);
return;
}
-
+ hdmi = sde_hdmi->ctrl.ctrl;
connector = sde_hdmi->ctrl.ctrl->connector;
- if (sde_hdmi->connected)
- sde_hdmi_get_edid(connector, sde_hdmi);
- else
- sde_hdmi_free_edid(sde_hdmi);
+ if (sde_hdmi->connected) {
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+ sde_get_edid(connector, hdmi->i2c,
+ (void **)&sde_hdmi->edid_ctrl);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+ hdmi->hdmi_mode = sde_detect_hdmi_monitor(sde_hdmi->edid_ctrl);
+ } else
+ sde_free_edid((void **)&sde_hdmi->edid_ctrl);
- sde_hdmi_notify_clients(connector, sde_hdmi->connected);
drm_helper_hpd_irq_event(connector->dev);
+ _sde_hdmi_cec_update_phys_addr(sde_hdmi);
}
static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi)
@@ -431,7 +970,7 @@ static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi)
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
HDMI_HPD_INT_CTRL_INT_ACK);
- DRM_DEBUG("status=%04x, ctrl=%04x", hpd_int_status,
+ SDE_HDMI_DEBUG("status=%04x, ctrl=%04x", hpd_int_status,
hpd_int_ctrl);
/* detect disconnect if we are connected or visa versa: */
@@ -440,11 +979,22 @@ static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi)
hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
- if (!sde_hdmi->non_pluggable)
- queue_work(hdmi->workq, &sde_hdmi->hpd_work);
+ queue_work(hdmi->workq, &sde_hdmi->hpd_work);
}
}
+static void _sde_hdmi_cec_irq(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ u32 cec_intr = hdmi_read(hdmi, REG_HDMI_CEC_INT);
+
+ /* Routing interrupt to external CEC drivers */
+ if (cec_intr)
+ generic_handle_irq(irq_find_mapping(
+ sde_hdmi->irq_domain, 1));
+}
+
+
static irqreturn_t _sde_hdmi_irq(int irq, void *dev_id)
{
struct sde_hdmi *sde_hdmi = dev_id;
@@ -465,7 +1015,8 @@ static irqreturn_t _sde_hdmi_irq(int irq, void *dev_id)
if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
hdmi_hdcp_ctrl_irq(hdmi->hdcp_ctrl);
- /* TODO audio.. */
+ /* Process CEC: */
+ _sde_hdmi_cec_irq(sde_hdmi);
return IRQ_HANDLED;
}
@@ -504,11 +1055,11 @@ static int _sde_hdmi_get_audio_edid_blk(struct platform_device *pdev,
return -ENODEV;
}
- blk->audio_data_blk = display->edid.audio_data_block;
- blk->audio_data_blk_size = display->edid.adb_size;
+ blk->audio_data_blk = display->edid_ctrl->audio_data_block;
+ blk->audio_data_blk_size = display->edid_ctrl->adb_size;
- blk->spk_alloc_data_blk = display->edid.spkr_alloc_data_block;
- blk->spk_alloc_data_blk_size = display->edid.sadb_size;
+ blk->spk_alloc_data_blk = display->edid_ctrl->spkr_alloc_data_block;
+ blk->spk_alloc_data_blk_size = display->edid_ctrl->sadb_size;
return 0;
}
@@ -530,6 +1081,25 @@ static int _sde_hdmi_get_cable_status(struct platform_device *pdev, u32 vote)
return hdmi->power_on && display->connected;
}
+static void _sde_hdmi_audio_codec_ready(struct platform_device *pdev)
+{
+ struct sde_hdmi *display = platform_get_drvdata(pdev);
+
+ if (!display) {
+ SDE_ERROR("invalid param(s), display %pK\n", display);
+ return;
+ }
+
+ mutex_lock(&display->display_lock);
+ if (!display->codec_ready) {
+ display->codec_ready = true;
+
+ if (display->client_notify_pending)
+ sde_hdmi_notify_clients(display, display->connected);
+ }
+ mutex_unlock(&display->display_lock);
+}
+
static int _sde_hdmi_ext_disp_init(struct sde_hdmi *display)
{
int rc = 0;
@@ -549,6 +1119,8 @@ static int _sde_hdmi_ext_disp_init(struct sde_hdmi *display)
_sde_hdmi_get_audio_edid_blk;
display->ext_audio_data.codec_ops.cable_status =
_sde_hdmi_get_cable_status;
+ display->ext_audio_data.codec_ops.codec_ready =
+ _sde_hdmi_audio_codec_ready;
if (!display->pdev->dev.of_node) {
SDE_ERROR("[%s]cannot find sde_hdmi of_node\n", display->name);
@@ -577,17 +1149,14 @@ static int _sde_hdmi_ext_disp_init(struct sde_hdmi *display)
return rc;
}
-void sde_hdmi_notify_clients(struct drm_connector *connector,
- bool connected)
+void sde_hdmi_notify_clients(struct sde_hdmi *display, bool connected)
{
- struct sde_connector *c_conn = to_sde_connector(connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
int state = connected ?
EXT_DISPLAY_CABLE_CONNECT : EXT_DISPLAY_CABLE_DISCONNECT;
if (display && display->ext_audio_data.intf_ops.hpd) {
struct hdmi *hdmi = display->ctrl.ctrl;
- u32 flags = MSM_EXT_DISP_HPD_VIDEO;
+ u32 flags = MSM_EXT_DISP_HPD_ASYNC_VIDEO;
if (hdmi->hdmi_mode)
flags |= MSM_EXT_DISP_HPD_AUDIO;
@@ -597,21 +1166,6 @@ void sde_hdmi_notify_clients(struct drm_connector *connector,
}
}
-void sde_hdmi_ack_state(struct drm_connector *connector,
- enum drm_connector_status status)
-{
- struct sde_connector *c_conn = to_sde_connector(connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
-
- if (display) {
- struct hdmi *hdmi = display->ctrl.ctrl;
-
- if (hdmi->hdmi_mode && display->ext_audio_data.intf_ops.notify)
- display->ext_audio_data.intf_ops.notify(
- display->ext_pdev, status);
- }
-}
-
void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
{
uint32_t ctrl = 0;
@@ -634,10 +1188,249 @@ void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
spin_unlock_irqrestore(&hdmi->reg_lock, flags);
- DRM_DEBUG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
+ SDE_HDMI_DEBUG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
power_on ? "Enable" : "Disable", ctrl);
}
+int sde_hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len)
+{
+ int rc;
+ int retry = 5;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr >> 1,
+ .flags = 0,
+ .len = 1,
+ .buf = &offset,
+ }, {
+ .addr = addr >> 1,
+ .flags = I2C_M_RD,
+ .len = data_len,
+ .buf = data,
+ }
+ };
+
+ SDE_HDMI_DEBUG("Start DDC read");
+ retry:
+ rc = i2c_transfer(hdmi->i2c, msgs, 2);
+
+ retry--;
+ if (rc == 2)
+ rc = 0;
+ else if (retry > 0)
+ goto retry;
+ else
+ rc = -EIO;
+
+ SDE_HDMI_DEBUG("End DDC read %d", rc);
+
+ return rc;
+}
+
+#define DDC_WRITE_MAX_BYTE_NUM 32
+
+int sde_hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len)
+{
+ int rc;
+ int retry = 10;
+ u8 buf[DDC_WRITE_MAX_BYTE_NUM];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr >> 1,
+ .flags = 0,
+ .len = 1,
+ }
+ };
+
+ SDE_HDMI_DEBUG("Start DDC write");
+ if (data_len > (DDC_WRITE_MAX_BYTE_NUM - 1)) {
+ SDE_ERROR("%s: write size too big\n", __func__);
+ return -ERANGE;
+ }
+
+ buf[0] = offset;
+ memcpy(&buf[1], data, data_len);
+ msgs[0].buf = buf;
+ msgs[0].len = data_len + 1;
+ retry:
+ rc = i2c_transfer(hdmi->i2c, msgs, 1);
+
+ retry--;
+ if (rc == 1)
+ rc = 0;
+ else if (retry > 0)
+ goto retry;
+ else
+ rc = -EIO;
+
+ SDE_HDMI_DEBUG("End DDC write %d", rc);
+
+ return rc;
+}
+
+int sde_hdmi_scdc_read(struct hdmi *hdmi, u32 data_type, u32 *val)
+{
+ int rc = 0;
+ u8 data_buf[2] = {0};
+ u16 dev_addr, data_len;
+ u8 offset;
+
+ if (!hdmi || !hdmi->i2c || !val) {
+ SDE_ERROR("Bad Parameters\n");
+ return -EINVAL;
+ }
+
+ if (data_type >= HDMI_TX_SCDC_MAX) {
+ SDE_ERROR("Unsupported data type\n");
+ return -EINVAL;
+ }
+
+ dev_addr = 0xA8;
+
+ switch (data_type) {
+ case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+ data_len = 1;
+ offset = HDMI_SCDC_SCRAMBLER_STATUS;
+ break;
+ case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+ case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+ data_len = 1;
+ offset = HDMI_SCDC_TMDS_CONFIG;
+ break;
+ case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+ case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+ case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+ case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+ data_len = 1;
+ offset = HDMI_SCDC_STATUS_FLAGS_0;
+ break;
+ case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+ data_len = 2;
+ offset = HDMI_SCDC_ERR_DET_0_L;
+ break;
+ case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+ data_len = 2;
+ offset = HDMI_SCDC_ERR_DET_1_L;
+ break;
+ case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+ data_len = 2;
+ offset = HDMI_SCDC_ERR_DET_2_L;
+ break;
+ case HDMI_TX_SCDC_READ_ENABLE:
+ data_len = 1;
+ offset = HDMI_SCDC_CONFIG_0;
+ break;
+ default:
+ break;
+ }
+
+ rc = sde_hdmi_ddc_read(hdmi, dev_addr, offset, data_buf, data_len);
+ if (rc) {
+ SDE_ERROR("DDC Read failed for %d\n", data_type);
+ return rc;
+ }
+
+ switch (data_type) {
+ case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+ *val = (data_buf[0] & BIT(1)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+ *val = (data_buf[0] & BIT(1)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+ *val = (data_buf[0] & BIT(2)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+ *val = (data_buf[0] & BIT(3)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+ case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+ case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+ if (data_buf[1] & BIT(7))
+ *val = (data_buf[0] | ((data_buf[1] & 0x7F) << 8));
+ else
+ *val = 0;
+ break;
+ case HDMI_TX_SCDC_READ_ENABLE:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int sde_hdmi_scdc_write(struct hdmi *hdmi, u32 data_type, u32 val)
+{
+ int rc = 0;
+ u8 data_buf[2] = {0};
+ u8 read_val = 0;
+ u16 dev_addr, data_len;
+ u8 offset;
+
+ if (!hdmi || !hdmi->i2c) {
+ SDE_ERROR("Bad Parameters\n");
+ return -EINVAL;
+ }
+
+ if (data_type >= HDMI_TX_SCDC_MAX) {
+ SDE_ERROR("Unsupported data type\n");
+ return -EINVAL;
+ }
+
+ dev_addr = 0xA8;
+
+ switch (data_type) {
+ case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+ case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+ dev_addr = 0xA8;
+ data_len = 1;
+ offset = HDMI_SCDC_TMDS_CONFIG;
+ rc = sde_hdmi_ddc_read(hdmi, dev_addr, offset, &read_val,
+ data_len);
+ if (rc) {
+ SDE_ERROR("scdc read failed\n");
+ return rc;
+ }
+ if (data_type == HDMI_TX_SCDC_SCRAMBLING_ENABLE) {
+ data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(0))) |
+ ((u8)(val & BIT(0))));
+ } else {
+ data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(1))) |
+ (((u8)(val & BIT(0))) << 1));
+ }
+ break;
+ case HDMI_TX_SCDC_READ_ENABLE:
+ data_len = 1;
+ offset = HDMI_SCDC_CONFIG_0;
+ data_buf[0] = (u8)(val & 0x1);
+ break;
+ default:
+ SDE_ERROR("Cannot write to read only reg (%d)\n",
+ data_type);
+ return -EINVAL;
+ }
+
+ rc = sde_hdmi_ddc_write(hdmi, dev_addr, offset, data_buf, data_len);
+ if (rc) {
+ SDE_ERROR("DDC Read failed for %d\n", data_type);
+ return rc;
+ }
+ return 0;
+}
+
int sde_hdmi_get_info(struct msm_display_info *info,
void *display)
{
@@ -664,14 +1457,36 @@ int sde_hdmi_get_info(struct msm_display_info *info,
MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_VID_MODE;
}
info->is_connected = hdmi_display->connected;
- info->max_width = 1920;
- info->max_height = 1080;
+ info->max_width = HDMI_DISPLAY_MAX_WIDTH;
+ info->max_height = HDMI_DISPLAY_MAX_HEIGHT;
info->compression = MSM_DISPLAY_COMPRESS_NONE;
mutex_unlock(&hdmi_display->display_lock);
return rc;
}
+int sde_hdmi_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display)
+{
+ int rc = 0;
+
+ if (!connector || !display) {
+ SDE_ERROR("connector=%pK or display=%pK is NULL\n",
+ connector, display);
+ return 0;
+ }
+
+ SDE_DEBUG("\n");
+
+ if (property_index == CONNECTOR_PROP_PLL_DELTA)
+ rc = _sde_hdmi_update_pll_delta(display, value);
+
+ return rc;
+}
+
u32 sde_hdmi_get_num_of_displays(void)
{
u32 count = 0;
@@ -746,7 +1561,7 @@ int sde_hdmi_connector_post_init(struct drm_connector *connector,
if (info)
sde_kms_info_add_keystr(info,
- "DISPLAY_TYPE",
+ "display type",
sde_hdmi->display_type);
hdmi->connector = connector;
@@ -775,8 +1590,6 @@ sde_hdmi_connector_detect(struct drm_connector *connector,
return status;
}
- SDE_DEBUG("\n");
-
/* get display dsi_info */
memset(&info, 0x0, sizeof(info));
rc = sde_hdmi_get_info(&info, display);
@@ -797,25 +1610,6 @@ sde_hdmi_connector_detect(struct drm_connector *connector,
return status;
}
-int _sde_hdmi_update_modes(struct drm_connector *connector,
- struct sde_hdmi *display)
-{
- int rc = 0;
- struct hdmi_edid_ctrl *edid_ctrl = &display->edid;
-
- if (edid_ctrl->edid) {
- drm_mode_connector_update_edid_property(connector,
- edid_ctrl->edid);
-
- rc = drm_add_edid_modes(connector, edid_ctrl->edid);
- return rc;
- }
-
- drm_mode_connector_update_edid_property(connector, NULL);
-
- return rc;
-}
-
int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display)
{
struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
@@ -828,8 +1622,6 @@ int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display)
return 0;
}
- SDE_DEBUG("\n");
-
if (hdmi_display->non_pluggable) {
list_for_each_entry(mode, &hdmi_display->mode_list, head) {
m = drm_mode_duplicate(connector->dev, mode);
@@ -842,7 +1634,9 @@ int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display)
}
ret = hdmi_display->num_of_modes;
} else {
- ret = _sde_hdmi_update_modes(connector, display);
+ /* pluggable case assumes EDID is read when HPD */
+ ret = _sde_edid_update_modes(connector,
+ hdmi_display->edid_ctrl);
}
return ret;
@@ -864,8 +1658,6 @@ enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
return 0;
}
- SDE_DEBUG("\n");
-
hdmi = hdmi_display->ctrl.ctrl;
priv = connector->dev->dev_private;
kms = priv->kms;
@@ -873,7 +1665,7 @@ enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
actual = kms->funcs->round_pixclk(kms,
requested, hdmi->encoder);
- SDE_DEBUG("requested=%ld, actual=%ld", requested, actual);
+ SDE_HDMI_DEBUG("requested=%ld, actual=%ld", requested, actual);
if (actual != requested)
return MODE_CLOCK_RANGE;
@@ -896,10 +1688,28 @@ int sde_hdmi_dev_deinit(struct sde_hdmi *display)
SDE_ERROR("Invalid params\n");
return -EINVAL;
}
+ return 0;
+}
+
+static int _sde_hdmi_cec_init(struct sde_hdmi *display)
+{
+ struct platform_device *pdev = display->pdev;
+
+ display->notifier = cec_notifier_get(&pdev->dev);
+ if (!display->notifier) {
+ SDE_ERROR("CEC notifier get failed\n");
+ return -ENOMEM;
+ }
return 0;
}
+static void _sde_hdmi_cec_deinit(struct sde_hdmi *display)
+{
+ cec_notifier_set_phys_addr(display->notifier, CEC_PHYS_ADDR_INVALID);
+ cec_notifier_put(display->notifier);
+}
+
static int sde_hdmi_bind(struct device *dev, struct device *master, void *data)
{
int rc = 0;
@@ -909,7 +1719,7 @@ static int sde_hdmi_bind(struct device *dev, struct device *master, void *data)
struct msm_drm_private *priv = NULL;
struct platform_device *pdev = to_platform_device(dev);
- SDE_ERROR("E\n");
+ SDE_HDMI_DEBUG(" %s +\n", __func__);
if (!dev || !pdev || !master) {
pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
dev, pdev, master);
@@ -938,22 +1748,34 @@ static int sde_hdmi_bind(struct device *dev, struct device *master, void *data)
if (rc) {
SDE_ERROR("[%s]Ext Disp init failed, rc=%d\n",
display->name, rc);
- goto error;
+ goto ext_error;
}
- rc = sde_hdmi_edid_init(display);
+ rc = _sde_hdmi_cec_init(display);
if (rc) {
- SDE_ERROR("[%s]Ext Disp init failed, rc=%d\n",
+ SDE_ERROR("[%s]CEC init failed, rc=%d\n",
display->name, rc);
- goto error;
+ goto ext_error;
+ }
+
+ display->edid_ctrl = sde_edid_init();
+ if (!display->edid_ctrl) {
+ SDE_ERROR("[%s]sde edid init failed\n",
+ display->name);
+ rc = -ENOMEM;
+ goto cec_error;
}
display_ctrl = &display->ctrl;
display_ctrl->ctrl = priv->hdmi;
- SDE_ERROR("display_ctrl->ctrl=%p\n", display_ctrl->ctrl);
display->drm_dev = drm;
-error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+
+cec_error:
+ (void)_sde_hdmi_cec_deinit(display);
+ext_error:
(void)_sde_hdmi_debugfs_deinit(display);
debug_error:
mutex_unlock(&display->display_lock);
@@ -978,7 +1800,8 @@ static void sde_hdmi_unbind(struct device *dev, struct device *master,
}
mutex_lock(&display->display_lock);
(void)_sde_hdmi_debugfs_deinit(display);
- (void)sde_hdmi_edid_deinit(display);
+ (void)sde_edid_deinit((void **)&display->edid_ctrl);
+ (void)_sde_hdmi_cec_deinit(display);
display->drm_dev = NULL;
mutex_unlock(&display->display_lock);
}
@@ -1252,6 +2075,29 @@ static struct platform_driver sde_hdmi_driver = {
},
};
+static int sde_hdmi_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct sde_hdmi *display;
+ int rc;
+
+ if (!domain || !domain->host_data) {
+ pr_err("invalid parameters domain\n");
+ return -EINVAL;
+ }
+ display = domain->host_data;
+
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq);
+ rc = irq_set_chip_data(irq, display);
+
+ return rc;
+}
+
+static const struct irq_domain_ops sde_hdmi_irqdomain_ops = {
+ .map = sde_hdmi_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
{
int rc = 0;
@@ -1306,6 +2152,13 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
goto error;
}
+ display->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 8,
+ &sde_hdmi_irqdomain_ops, display);
+ if (!display->irq_domain) {
+ SDE_ERROR("failed to create IRQ domain\n");
+ goto error;
+ }
+
enc->bridge = hdmi->bridge;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
@@ -1331,6 +2184,9 @@ int sde_hdmi_drm_deinit(struct sde_hdmi *display)
return -EINVAL;
}
+ if (display->irq_domain)
+ irq_domain_remove(display->irq_domain);
+
return rc;
}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index 869d1bebf9db..dff245dec764 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -23,11 +23,16 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <media/cec-notifier.h>
#include "hdmi.h"
-#define MAX_NUMBER_ADB 5
-#define MAX_AUDIO_DATA_BLOCK_SIZE 30
-#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3
+#include "sde_edid_parser.h"
+
+#ifdef HDMI_DEBUG_ENABLE
+#define SDE_HDMI_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args)
+#else
+#define SDE_HDMI_DEBUG(fmt, args...) SDE_DEBUG(fmt, ##args)
+#endif
/**
* struct sde_hdmi_info - defines hdmi display properties
@@ -64,14 +69,6 @@ struct sde_hdmi_ctrl {
u32 hdmi_ctrl_idx;
};
-struct hdmi_edid_ctrl {
- struct edid *edid;
- u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
- int adb_size;
- u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
- int sadb_size;
-};
-
/**
* struct sde_hdmi - hdmi display information
* @pdev: Pointer to platform device.
@@ -84,9 +81,14 @@ struct hdmi_edid_ctrl {
* @non_pluggable: If HDMI display is non pluggable
* @num_of_modes: Number of modes supported by display if non pluggable.
* @mode_list: Mode list if non pluggable.
+ * @mode: Current display mode.
* @connected: If HDMI display is connected.
* @is_tpg_enabled: TPG state.
* @hpd_work: HPD work structure.
+ * @codec_ready: If audio codec is ready.
+ * @client_notify_pending: If there is client notification pending.
+ * @irq_domain: IRQ domain structure.
+ * @notifier: CEC notifider to convey physical address information.
* @root: Debug fs root entry.
*/
struct sde_hdmi {
@@ -102,20 +104,58 @@ struct sde_hdmi {
struct platform_device *ext_pdev;
struct msm_ext_disp_init_data ext_audio_data;
- struct hdmi_edid_ctrl edid;
+ struct sde_edid_ctrl *edid_ctrl;
bool non_pluggable;
u32 num_of_modes;
struct list_head mode_list;
+ struct drm_display_mode mode;
bool connected;
bool is_tpg_enabled;
struct work_struct hpd_work;
+ bool codec_ready;
+ bool client_notify_pending;
+
+ struct irq_domain *irq_domain;
+ struct cec_notifier *notifier;
/* DEBUG FS */
struct dentry *root;
};
+/**
+ * hdmi_tx_scdc_access_type() - hdmi 2.0 DDC functionalities.
+ */
+enum hdmi_tx_scdc_access_type {
+ HDMI_TX_SCDC_SCRAMBLING_STATUS,
+ HDMI_TX_SCDC_SCRAMBLING_ENABLE,
+ HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+ HDMI_TX_SCDC_CLOCK_DET_STATUS,
+ HDMI_TX_SCDC_CH0_LOCK_STATUS,
+ HDMI_TX_SCDC_CH1_LOCK_STATUS,
+ HDMI_TX_SCDC_CH2_LOCK_STATUS,
+ HDMI_TX_SCDC_CH0_ERROR_COUNT,
+ HDMI_TX_SCDC_CH1_ERROR_COUNT,
+ HDMI_TX_SCDC_CH2_ERROR_COUNT,
+ HDMI_TX_SCDC_READ_ENABLE,
+ HDMI_TX_SCDC_MAX,
+};
+
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_MHZ_TO_HZ 1000000
+/**
+ * hdmi_tx_ddc_timer_type() - hdmi DDC timer functionalities.
+ */
+enum hdmi_tx_ddc_timer_type {
+ HDMI_TX_DDC_TIMER_HDCP2P2_RD_MSG,
+ HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS,
+ HDMI_TX_DDC_TIMER_UPDATE_FLAGS,
+ HDMI_TX_DDC_TIMER_STATUS_FLAGS,
+ HDMI_TX_DDC_TIMER_CED,
+ HDMI_TX_DDC_TIMER_MAX,
+ };
+
#ifdef CONFIG_DRM_SDE_HDMI
/**
* sde_hdmi_get_num_of_displays() - returns number of display devices
@@ -242,6 +282,22 @@ int sde_hdmi_get_info(struct msm_display_info *info,
void *display);
/**
+ * sde_hdmi_set_property() - set the connector properties
+ * @connector: Handle to the connector.
+ * @state: Handle to the connector state.
+ * @property_index: property index.
+ * @value: property value.
+ * @display: Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display);
+
+/**
* sde_hdmi_bridge_init() - init sde hdmi bridge
* @hdmi: Handle to the hdmi.
*
@@ -259,6 +315,52 @@ struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi);
void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
/**
+ * sde_hdmi_ddc_read() - common hdmi ddc read API.
+ * @hdmi: Handle to the hdmi.
+ * @addr: Command address.
+ * @offset: Command offset.
+ * @data: Data buffer for read back.
+ * @data_len: Data buffer length.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len);
+
+/**
+ * sde_hdmi_ddc_write() - common hdmi ddc write API.
+ * @hdmi: Handle to the hdmi.
+ * @addr: Command address.
+ * @offset: Command offset.
+ * @data: Data buffer for write.
+ * @data_len: Data buffer length.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len);
+
+/**
+ * sde_hdmi_scdc_read() - hdmi 2.0 ddc read API.
+ * @hdmi: Handle to the hdmi.
+ * @data_type: DDC data type, refer to enum hdmi_tx_scdc_access_type.
+ * @val: Read back value.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_scdc_read(struct hdmi *hdmi, u32 data_type, u32 *val);
+
+/**
+ * sde_hdmi_scdc_write() - hdmi 2.0 ddc write API.
+ * @hdmi: Handle to the hdmi.
+ * @data_type: DDC data type, refer to enum hdmi_tx_scdc_access_type.
+ * @val: Value write through DDC.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_scdc_write(struct hdmi *hdmi, u32 data_type, u32 val);
+
+/**
* sde_hdmi_audio_on() - enable hdmi audio.
* @hdmi: Handle to the hdmi.
* @params: audio setup parameters from codec.
@@ -287,13 +389,12 @@ int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set);
/**
* sde_hdmi_notify_clients() - notify hdmi clients of the connection status.
- * @connector: Handle to the drm_connector.
+ * @display: Handle to sde_hdmi.
* @connected: connection status.
*
* Return: void.
*/
-void sde_hdmi_notify_clients(struct drm_connector *connector,
- bool connected);
+void sde_hdmi_notify_clients(struct sde_hdmi *display, bool connected);
/**
* sde_hdmi_ack_state() - acknowledge the connection status.
@@ -305,40 +406,6 @@ void sde_hdmi_notify_clients(struct drm_connector *connector,
void sde_hdmi_ack_state(struct drm_connector *connector,
enum drm_connector_status status);
-/**
- * sde_hdmi_edid_init() - init edid structure.
- * @display: Handle to the sde_hdmi.
- *
- * Return: error code.
- */
-int sde_hdmi_edid_init(struct sde_hdmi *display);
-
-/**
- * sde_hdmi_edid_deinit() - deinit edid structure.
- * @display: Handle to the sde_hdmi.
- *
- * Return: error code.
- */
-int sde_hdmi_edid_deinit(struct sde_hdmi *display);
-
-/**
- * sde_hdmi_get_edid() - get edid info.
- * @connector: Handle to the drm_connector.
- * @display: Handle to the sde_hdmi.
- *
- * Return: void.
- */
-void sde_hdmi_get_edid(struct drm_connector *connector,
- struct sde_hdmi *display);
-
-/**
- * sde_hdmi_free_edid() - free edid structure.
- * @display: Handle to the sde_hdmi.
- *
- * Return: error code.
- */
-int sde_hdmi_free_edid(struct sde_hdmi *display);
-
#else /*#ifdef CONFIG_DRM_SDE_HDMI*/
static inline u32 sde_hdmi_get_num_of_displays(void)
@@ -413,5 +480,15 @@ static inline int sde_hdmi_get_info(struct msm_display_info *info,
{
return 0;
}
+
+static inline int sde_hdmi_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display)
+{
+ return 0;
+}
+
#endif /*#else of CONFIG_DRM_SDE_HDMI*/
#endif /* _SDE_HDMI_H_ */
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
index 13ea49cfa42d..48a3a9316a41 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
@@ -30,8 +30,6 @@
#define HDMI_AUDIO_INFO_FRAME_PACKET_VERSION 0x1
#define HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH 0x0A
-#define HDMI_KHZ_TO_HZ 1000
-#define HDMI_MHZ_TO_HZ 1000000
#define HDMI_ACR_N_MULTIPLIER 128
#define DEFAULT_AUDIO_SAMPLE_RATE_HZ 48000
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index 681dca501f9b..26a0638f7792 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -28,6 +28,13 @@ struct sde_hdmi_bridge {
};
#define to_hdmi_bridge(x) container_of(x, struct sde_hdmi_bridge, base)
+/* TX major version that supports scrambling */
+#define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04
+#define HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ 340000
+#define HDMI_TX_SCRAMBLER_TIMEOUT_MSEC 200
+/* default hsyncs for 4k@60 for 200ms */
+#define HDMI_DEFAULT_TIMEOUT_HSYNC 28571
+
/* for AVI program */
#define HDMI_AVI_INFOFRAME_BUFFER_SIZE \
(HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE)
@@ -101,11 +108,270 @@ static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
}
}
+static int _sde_hdmi_bridge_ddc_clear_irq(struct hdmi *hdmi,
+ char *what)
+{
+ u32 ddc_int_ctrl, ddc_status, in_use, timeout;
+ u32 sw_done_mask = BIT(2);
+ u32 sw_done_ack = BIT(1);
+ u32 in_use_by_sw = BIT(0);
+ u32 in_use_by_hw = BIT(1);
+
+ /* clear and enable interrutps */
+ ddc_int_ctrl = sw_done_mask | sw_done_ack;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL, ddc_int_ctrl);
+
+ /* wait until DDC HW is free */
+ timeout = 100;
+ do {
+ ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS);
+ in_use = ddc_status & (in_use_by_sw | in_use_by_hw);
+ if (in_use) {
+ SDE_DEBUG("ddc is in use by %s, timeout(%d)\n",
+ ddc_status & in_use_by_sw ? "sw" : "hw",
+ timeout);
+ udelay(100);
+ }
+ } while (in_use && --timeout);
+
+ if (!timeout) {
+ SDE_ERROR("%s: timedout\n", what);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int _sde_hdmi_bridge_scrambler_ddc_check_status(struct hdmi *hdmi)
+{
+ int rc = 0;
+ u32 reg_val;
+
+ /* check for errors and clear status */
+ reg_val = hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS);
+ if (reg_val & BIT(4)) {
+ SDE_ERROR("ddc aborted\n");
+ reg_val |= BIT(5);
+ rc = -ECONNABORTED;
+ }
+
+ if (reg_val & BIT(8)) {
+ SDE_ERROR("timed out\n");
+ reg_val |= BIT(9);
+ rc = -ETIMEDOUT;
+ }
+
+ if (reg_val & BIT(12)) {
+ SDE_ERROR("NACK0\n");
+ reg_val |= BIT(13);
+ rc = -EIO;
+ }
+ if (reg_val & BIT(14)) {
+ SDE_ERROR("NACK1\n");
+ reg_val |= BIT(15);
+ rc = -EIO;
+ }
+ hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS, reg_val);
+
+ return rc;
+}
+
+static void _sde_hdmi_bridge_scrambler_ddc_reset(struct hdmi *hdmi)
+{
+ u32 reg_val;
+
+ /* clear ack and disable interrupts */
+ reg_val = BIT(14) | BIT(9) | BIT(5) | BIT(1);
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL2, reg_val);
+
+ /* Reset DDC timers */
+ reg_val = BIT(0) | hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL);
+ hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val);
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL);
+ reg_val &= ~BIT(0);
+ hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val);
+}
+
+static void _sde_hdmi_bridge_scrambler_ddc_disable(struct hdmi *hdmi)
+{
+ u32 reg_val;
+
+ _sde_hdmi_bridge_scrambler_ddc_reset(hdmi);
+ /* Disable HW DDC access to RxStatus register */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HW_DDC_CTRL);
+ reg_val &= ~(BIT(8) | BIT(9));
+ hdmi_write(hdmi, REG_HDMI_HW_DDC_CTRL, reg_val);
+}
+
+static int _sde_hdmi_bridge_scrambler_status_timer_setup(struct hdmi *hdmi,
+ u32 timeout_hsync)
+{
+ u32 reg_val;
+ int rc;
+
+ _sde_hdmi_bridge_ddc_clear_irq(hdmi, "scrambler");
+
+ hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL,
+ timeout_hsync);
+ hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2,
+ timeout_hsync);
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL5);
+ reg_val |= BIT(10);
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL5, reg_val);
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL2);
+ /* Trigger interrupt if scrambler status is 0 or DDC failure */
+ reg_val |= BIT(10);
+ reg_val &= ~(BIT(15) | BIT(16));
+ reg_val |= BIT(16);
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL2, reg_val);
+
+ /* Enable DDC access */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HW_DDC_CTRL);
+
+ reg_val &= ~(BIT(8) | BIT(9));
+ reg_val |= BIT(8);
+ hdmi_write(hdmi, REG_HDMI_HW_DDC_CTRL, reg_val);
+
+ /* WAIT for 200ms as per HDMI 2.0 standard for sink to respond */
+ msleep(200);
+
+ /* clear the scrambler status */
+ rc = _sde_hdmi_bridge_scrambler_ddc_check_status(hdmi);
+ if (rc)
+ SDE_ERROR("scrambling ddc error %d\n", rc);
+
+ _sde_hdmi_bridge_scrambler_ddc_disable(hdmi);
+
+ return rc;
+}
+
+static int _sde_hdmi_bridge_setup_ddc_timers(struct hdmi *hdmi,
+ u32 type, u32 to_in_num_lines)
+{
+ if (type >= HDMI_TX_DDC_TIMER_MAX) {
+ SDE_ERROR("Invalid timer type %d\n", type);
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS:
+ _sde_hdmi_bridge_scrambler_status_timer_setup(hdmi,
+ to_in_num_lines);
+ break;
+ default:
+ SDE_ERROR("%d type not supported\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int _sde_hdmi_bridge_get_timeout_in_hysnc(
+ struct drm_display_mode *mode, u32 timeout_ms)
+{
+ /*
+ * pixel clock = h_total * v_total * fps
+ * 1 sec = pixel clock number of pixels are transmitted.
+ * time taken by one line (h_total) = 1s / (v_total * fps).
+ * lines for give time = (time_ms * 1000) / (1000000 / (v_total * fps))
+ * = (time_ms * clock) / h_total
+ */
+
+ return (timeout_ms * mode->clock / mode->htotal);
+}
+
+static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ int rc = 0;
+ int timeout_hsync;
+ u32 reg_val = 0;
+ u32 tmds_clock_ratio = 0;
+ bool scrambler_on = false;
+
+ struct drm_connector *connector = NULL;
+
+ if (!hdmi || !mode) {
+ SDE_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+ connector = hdmi->connector;
+
+ /* Read HDMI version */
+ reg_val = hdmi_read(hdmi, REG_HDMI_VERSION);
+ reg_val = (reg_val & 0xF0000000) >> 28;
+ /* Scrambling is supported from HDMI TX 4.0 */
+ if (reg_val < HDMI_TX_SCRAMBLER_MIN_TX_VERSION) {
+ DRM_INFO("scrambling not supported by tx\n");
+ return 0;
+ }
+
+ if (mode->clock > HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ) {
+ scrambler_on = true;
+ tmds_clock_ratio = 1;
+ } else {
+ scrambler_on = connector->supports_scramble;
+ }
+
+ DRM_INFO("scrambler %s\n", scrambler_on ? "on" : "off");
+
+ if (scrambler_on) {
+ rc = sde_hdmi_scdc_write(hdmi,
+ HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+ tmds_clock_ratio);
+ if (rc) {
+ SDE_ERROR("TMDS CLK RATIO ERR\n");
+ return rc;
+ }
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val |= BIT(28); /* Set SCRAMBLER_EN bit */
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+ rc = sde_hdmi_scdc_write(hdmi,
+ HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x1);
+ if (rc) {
+ SDE_ERROR("failed to enable scrambling\n");
+ return rc;
+ }
+
+ /*
+ * Setup hardware to periodically check for scrambler
+ * status bit on the sink. Sink should set this bit
+ * with in 200ms after scrambler is enabled.
+ */
+ timeout_hsync = _sde_hdmi_bridge_get_timeout_in_hysnc(
+ mode,
+ HDMI_TX_SCRAMBLER_TIMEOUT_MSEC);
+ if (timeout_hsync <= 0) {
+ SDE_ERROR("err in timeout hsync calc\n");
+ timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+ }
+ SDE_DEBUG("timeout for scrambling en: %d hsyncs\n",
+ timeout_hsync);
+
+ rc = _sde_hdmi_bridge_setup_ddc_timers(hdmi,
+ HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS, timeout_hsync);
+ } else {
+ sde_hdmi_scdc_write(hdmi, HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x0);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~BIT(28); /* Unset SCRAMBLER_EN bit */
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+ }
+ return rc;
+}
+
static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
+ struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
DRM_DEBUG("power up");
@@ -122,41 +388,20 @@ static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
hdmi_hdcp_ctrl_on(hdmi->hdcp_ctrl);
- sde_hdmi_ack_state(hdmi->connector, EXT_DISPLAY_CABLE_CONNECT);
-}
-
-static void sde_hdmi_force_update_audio(struct drm_connector *connector,
- enum drm_connector_status status)
-{
- struct sde_connector *c_conn = to_sde_connector(connector);
- struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
-
- if (display && display->non_pluggable) {
- display->ext_audio_data.intf_ops.hpd(display->ext_pdev,
- display->ext_audio_data.type,
- status,
- MSM_EXT_DISP_HPD_AUDIO);
- }
+ mutex_lock(&display->display_lock);
+ if (display->codec_ready)
+ sde_hdmi_notify_clients(display, display->connected);
+ else
+ display->client_notify_pending = true;
+ mutex_unlock(&display->display_lock);
}
static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge)
{
- struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
- struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
-
- /* force update audio ops when there's no HPD event */
- sde_hdmi_force_update_audio(hdmi->connector,
- EXT_DISPLAY_CABLE_CONNECT);
}
static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
{
- struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
- struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
-
- /* force update audio ops when there's no HPD event */
- sde_hdmi_force_update_audio(hdmi->connector,
- EXT_DISPLAY_CABLE_DISCONNECT);
}
static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
@@ -164,6 +409,10 @@ static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
struct hdmi_phy *phy = hdmi->phy;
+ struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ sde_hdmi_notify_clients(display, display->connected);
if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
hdmi_hdcp_ctrl_off(hdmi->hdcp_ctrl);
@@ -180,8 +429,6 @@ static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
_sde_hdmi_bridge_power_off(bridge);
hdmi->power_on = false;
}
-
- sde_hdmi_ack_state(hdmi->connector, EXT_DISPLAY_CABLE_DISCONNECT);
}
static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi,
@@ -302,6 +549,15 @@ static void _sde_hdmi_bridge_set_spd_infoframe(struct hdmi *hdmi,
hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, packet_control);
}
+static inline void _sde_hdmi_save_mode(struct hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ drm_mode_copy(&display->mode, mode);
+}
+
static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -373,6 +629,9 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
_sde_hdmi_bridge_set_spd_infoframe(hdmi, mode);
DRM_DEBUG("hdmi setup info frame\n");
}
+ _sde_hdmi_bridge_setup_scrambler(hdmi, mode);
+
+ _sde_hdmi_save_mode(hdmi, mode);
}
static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c
deleted file mode 100644
index 57c79e2aa812..000000000000
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <drm/drm_edid.h>
-
-#include "sde_kms.h"
-#include "sde_hdmi.h"
-
-/* TODO: copy from drm_edid.c and mdss_hdmi_edid.c. remove if using ELD */
-#define DBC_START_OFFSET 4
-#define EDID_DTD_LEN 18
-
-enum data_block_types {
- RESERVED,
- AUDIO_DATA_BLOCK,
- VIDEO_DATA_BLOCK,
- VENDOR_SPECIFIC_DATA_BLOCK,
- SPEAKER_ALLOCATION_DATA_BLOCK,
- VESA_DTC_DATA_BLOCK,
- RESERVED2,
- USE_EXTENDED_TAG
-};
-
-static u8 *_sde_hdmi_edid_find_cea_extension(struct edid *edid)
-{
- u8 *edid_ext = NULL;
- int i;
-
- /* No EDID or EDID extensions */
- if (edid == NULL || edid->extensions == 0)
- return NULL;
-
- /* Find CEA extension */
- for (i = 0; i < edid->extensions; i++) {
- edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
- if (edid_ext[0] == CEA_EXT)
- break;
- }
-
- if (i == edid->extensions)
- return NULL;
-
- return edid_ext;
-}
-
-static const u8 *_sde_hdmi_edid_find_block(const u8 *in_buf, u32 start_offset,
- u8 type, u8 *len)
-{
- /* the start of data block collection, start of Video Data Block */
- u32 offset = start_offset;
- u32 dbc_offset = in_buf[2];
-
- /*
- * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
- * collection present.
- * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
- * collection present and no DTD data present.
- */
- if ((dbc_offset == 0) || (dbc_offset == 4)) {
- SDE_ERROR("EDID: no DTD or non-DTD data present\n");
- return NULL;
- }
-
- while (offset < dbc_offset) {
- u8 block_len = in_buf[offset] & 0x1F;
-
- if ((offset + block_len <= dbc_offset) &&
- (in_buf[offset] >> 5) == type) {
- *len = block_len;
- SDE_DEBUG("EDID: block=%d found @ 0x%x w/ len=%d\n",
- type, offset, block_len);
-
- return in_buf + offset;
- }
- offset += 1 + block_len;
- }
-
- return NULL;
-}
-
-static void _sde_hdmi_extract_audio_data_blocks(
- struct hdmi_edid_ctrl *edid_ctrl)
-{
- u8 len = 0;
- u8 adb_max = 0;
- const u8 *adb = NULL;
- u32 offset = DBC_START_OFFSET;
- u8 *cea = NULL;
-
- if (!edid_ctrl) {
- SDE_ERROR("invalid edid_ctrl\n");
- return;
- }
-
- cea = _sde_hdmi_edid_find_cea_extension(edid_ctrl->edid);
- if (!cea) {
- SDE_DEBUG("CEA extension not found\n");
- return;
- }
-
- edid_ctrl->adb_size = 0;
-
- memset(edid_ctrl->audio_data_block, 0,
- sizeof(edid_ctrl->audio_data_block));
-
- do {
- len = 0;
- adb = _sde_hdmi_edid_find_block(cea, offset, AUDIO_DATA_BLOCK,
- &len);
-
- if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
- adb_max >= MAX_NUMBER_ADB)) {
- if (!edid_ctrl->adb_size) {
- SDE_DEBUG("No/Invalid Audio Data Block\n");
- return;
- }
-
- continue;
- }
-
- memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
- adb + 1, len);
- offset = (adb - cea) + 1 + len;
-
- edid_ctrl->adb_size += len;
- adb_max++;
- } while (adb);
-
-}
-
-static void _sde_hdmi_extract_speaker_allocation_data(
- struct hdmi_edid_ctrl *edid_ctrl)
-{
- u8 len;
- const u8 *sadb = NULL;
- u8 *cea = NULL;
-
- if (!edid_ctrl) {
- SDE_ERROR("invalid edid_ctrl\n");
- return;
- }
-
- cea = _sde_hdmi_edid_find_cea_extension(edid_ctrl->edid);
- if (!cea) {
- SDE_DEBUG("CEA extension not found\n");
- return;
- }
-
- sadb = _sde_hdmi_edid_find_block(cea, DBC_START_OFFSET,
- SPEAKER_ALLOCATION_DATA_BLOCK, &len);
- if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
- SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n");
- return;
- }
-
- memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
- edid_ctrl->sadb_size = len;
-
- SDE_DEBUG("EDID: speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
- sadb[1],
- (sadb[1] & BIT(0)) ? "FL/FR," : "",
- (sadb[1] & BIT(1)) ? "LFE," : "",
- (sadb[1] & BIT(2)) ? "FC," : "",
- (sadb[1] & BIT(3)) ? "RL/RR," : "",
- (sadb[1] & BIT(4)) ? "RC," : "",
- (sadb[1] & BIT(5)) ? "FLC/FRC," : "",
- (sadb[1] & BIT(6)) ? "RLC/RRC," : "");
-}
-
-int sde_hdmi_edid_init(struct sde_hdmi *display)
-{
- int rc = 0;
-
- if (!display) {
- SDE_ERROR("[%s]Invalid params\n", display->name);
- return -EINVAL;
- }
-
- memset(&display->edid, 0, sizeof(display->edid));
-
- return rc;
-}
-
-int sde_hdmi_free_edid(struct sde_hdmi *display)
-{
- struct hdmi_edid_ctrl *edid_ctrl = &display->edid;
-
- kfree(edid_ctrl->edid);
- edid_ctrl->edid = NULL;
-
- return 0;
-}
-
-int sde_hdmi_edid_deinit(struct sde_hdmi *display)
-{
- return sde_hdmi_free_edid(display);
-}
-
-void sde_hdmi_get_edid(struct drm_connector *connector,
- struct sde_hdmi *display)
-{
- u32 hdmi_ctrl;
- struct hdmi_edid_ctrl *edid_ctrl = &display->edid;
- struct hdmi *hdmi = display->ctrl.ctrl;
-
- /* Read EDID */
- hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
- hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
- edid_ctrl->edid = drm_get_edid(connector, hdmi->i2c);
- hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
-
- if (edid_ctrl->edid) {
- hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid_ctrl->edid);
-
- _sde_hdmi_extract_audio_data_blocks(edid_ctrl);
- _sde_hdmi_extract_speaker_allocation_data(edid_ctrl);
- }
-};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index aef20f76bf02..ea485a2ec2cd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -466,13 +466,13 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
#define REG_HDMI_CEC_RD_FILTER 0x000002b0
#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
-#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
+#define HDMI_ACTIVE_HSYNC_START__MASK 0x00001fff
#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
{
return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
}
-#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000
+#define HDMI_ACTIVE_HSYNC_END__MASK 0x1fff0000
#define HDMI_ACTIVE_HSYNC_END__SHIFT 16
static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
{
@@ -480,13 +480,13 @@ static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
}
#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
-#define HDMI_ACTIVE_VSYNC_START__MASK 0x00000fff
+#define HDMI_ACTIVE_VSYNC_START__MASK 0x00001fff
#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
{
return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
}
-#define HDMI_ACTIVE_VSYNC_END__MASK 0x0fff0000
+#define HDMI_ACTIVE_VSYNC_END__MASK 0x1fff0000
#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
{
@@ -494,13 +494,13 @@ static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
}
#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
-#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00000fff
+#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00001fff
#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
{
return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
}
-#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x0fff0000
+#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x1fff0000
#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
{
@@ -508,13 +508,13 @@ static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
}
#define REG_HDMI_TOTAL 0x000002c0
-#define HDMI_TOTAL_H_TOTAL__MASK 0x00000fff
+#define HDMI_TOTAL_H_TOTAL__MASK 0x00001fff
#define HDMI_TOTAL_H_TOTAL__SHIFT 0
static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
{
return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
}
-#define HDMI_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define HDMI_TOTAL_V_TOTAL__MASK 0x1fff0000
#define HDMI_TOTAL_V_TOTAL__SHIFT 16
static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
{
@@ -522,7 +522,7 @@ static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
}
#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
-#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00000fff
+#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00001fff
#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
{
@@ -563,6 +563,20 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
#define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370
+#define REG_HDMI_DDC_INT_CTRL0 0x00000430
+#define REG_HDMI_DDC_INT_CTRL1 0x00000434
+#define REG_HDMI_DDC_INT_CTRL2 0x00000438
+#define REG_HDMI_DDC_INT_CTRL3 0x0000043C
+#define REG_HDMI_DDC_INT_CTRL4 0x00000440
+#define REG_HDMI_DDC_INT_CTRL5 0x00000444
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL 0x00000464
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL 0x00000468
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2 0x0000046C
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS 0x00000470
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS 0x00000474
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS2 0x00000478
+#define REG_HDMI_HW_DDC_CTRL 0x000004CC
+
#define REG_HDMI_8x60_PHY_REG0 0x00000300
#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index caad2be87ae4..e5f42fe983c1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -392,7 +392,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo);
- msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
+ msm_gem_get_iova(next_bo, mdp4_kms->aspace,
&iova);
/* enable cursor: */
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index b6cddee0cf34..40509434a913 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -184,8 +184,7 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
+ aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_destroy(aspace);
}
}
@@ -202,8 +201,7 @@ static void mdp4_destroy(struct msm_kms *kms)
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
+ aspace->mmu->funcs->detach(aspace->mmu);
msm_gem_address_space_put(aspace);
}
@@ -416,10 +414,6 @@ fail:
return ret;
}
-static const char *iommu_ports[] = {
- "mdp_port0_cb0", "mdp_port1_cb0",
-};
-
struct msm_kms *mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
@@ -515,15 +509,11 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu);
-
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
- goto fail;
- }
+ config->iommu->geometry.aperture_start = 0x1000;
+ config->iommu->geometry.aperture_end = 0xffffffff;
aspace = msm_gem_address_space_create(&pdev->dev,
- mmu, "mdp4", 0x1000, 0xffffffff);
+ config->iommu, MSM_IOMMU_DOMAIN_DEFAULT, "mdp4");
if (IS_ERR(aspace)) {
ret = PTR_ERR(aspace);
goto fail;
@@ -531,8 +521,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdp4_kms->aspace = aspace;
- ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
if (ret)
goto fail;
} else {
@@ -547,9 +536,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- mutex_lock(&dev->struct_mutex);
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index e4e69ebd116e..4dbf456504b7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -22,10 +22,6 @@
#include "msm_mmu.h"
#include "mdp5_kms.h"
-static const char *iommu_ports[] = {
- "mdp_0",
-};
-
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -613,8 +609,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_kms->aspace = aspace;
- ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
if (ret) {
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
ret);
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index d8791155236c..fa746d71cd3b 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -429,11 +429,21 @@ static void complete_commit(struct msm_commit *commit)
commit_destroy(commit);
}
+static int msm_atomic_commit_dispatch(struct drm_device *dev,
+ struct drm_atomic_state *state, struct msm_commit *commit);
+
static void fence_cb(struct msm_fence_cb *cb)
{
struct msm_commit *commit =
container_of(cb, struct msm_commit, fence_cb);
- complete_commit(commit);
+ int ret = -EINVAL;
+
+ ret = msm_atomic_commit_dispatch(commit->dev, commit->state, commit);
+ if (ret) {
+ DRM_ERROR("%s: atomic commit failed\n", __func__);
+ drm_atomic_state_free(commit->state);
+ commit_destroy(commit);
+ }
}
static void _msm_drm_commit_work_cb(struct kthread_work *work)
@@ -624,13 +634,7 @@ int msm_atomic_commit(struct drm_device *dev,
*/
if (async) {
- ret = msm_atomic_commit_dispatch(dev, state, commit);
- if (ret) {
- DRM_ERROR("%s: atomic commit failed\n", __func__);
- drm_atomic_state_free(state);
- commit_destroy(commit);
- goto error;
- }
+ msm_queue_fence_cb(dev, &commit->fence_cb, commit->fence);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 276329b7b10c..a3bdc30b9620 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -251,17 +251,12 @@ static int msm_unload(struct drm_device *dev)
}
#define KMS_MDP4 0
-#define KMS_MDP5 1
-#define KMS_SDE 2
+#define KMS_SDE 1
static int get_mdp_ver(struct platform_device *pdev)
{
#ifdef CONFIG_OF
static const struct of_device_id match_types[] = { {
- .compatible = "qcom,mdss_mdp",
- .data = (void *)KMS_MDP5,
- },
- {
.compatible = "qcom,sde-kms",
.data = (void *)KMS_SDE,
/* end node */
@@ -327,6 +322,7 @@ static int msm_init_vram(struct drm_device *dev)
priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
+ spin_lock_init(&priv->vram.lock);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
@@ -395,6 +391,8 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
init_kthread_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
+ hash_init(priv->mn_hash);
+ mutex_init(&priv->mn_lock);
drm_mode_config_init(dev);
@@ -432,9 +430,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
case KMS_MDP4:
kms = mdp4_kms_init(dev);
break;
- case KMS_MDP5:
- kms = mdp5_kms_init(dev);
- break;
case KMS_SDE:
kms = sde_kms_init(dev);
break;
@@ -566,7 +561,8 @@ static struct msm_file_private *setup_pagetable(struct msm_drm_private *priv)
return ERR_PTR(-ENOMEM);
ctx->aspace = msm_gem_address_space_create_instance(
- priv->gpu->aspace->mmu, "gpu", 0x100000000, 0x1ffffffff);
+ priv->gpu->aspace->mmu, "gpu", 0x100000000ULL,
+ TASK_SIZE_64 - 1);
if (IS_ERR(ctx->aspace)) {
int ret = PTR_ERR(ctx->aspace);
@@ -639,12 +635,10 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
if (priv->gpu)
msm_gpu_cleanup_counters(priv->gpu, ctx);
- mutex_lock(&dev->struct_mutex);
if (ctx && ctx->aspace && ctx->aspace != priv->gpu->aspace) {
ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu);
msm_gem_address_space_put(ctx->aspace);
}
- mutex_unlock(&dev->struct_mutex);
kfree(ctx);
}
@@ -1150,6 +1144,20 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
args->flags, &args->handle);
}
+static int msm_ioctl_gem_svm_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_svm_new *args = data;
+
+ if (args->flags & ~MSM_BO_FLAGS) {
+ DRM_ERROR("invalid flags: %08x\n", args->flags);
+ return -EINVAL;
+ }
+
+ return msm_gem_svm_new_handle(dev, file, args->hostptr, args->size,
+ args->flags, &args->handle);
+}
+
static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
{
return ktime_set(timeout.tv_sec, timeout.tv_nsec);
@@ -1202,29 +1210,49 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
{
struct drm_msm_gem_info *args = data;
struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
struct msm_file_private *ctx = file->driver_priv;
int ret = 0;
if (args->flags & ~MSM_INFO_FLAGS)
return -EINVAL;
- if (!ctx || !ctx->aspace)
- return -EINVAL;
-
obj = drm_gem_object_lookup(dev, file, args->handle);
if (!obj)
return -ENOENT;
+ msm_obj = to_msm_bo(obj);
if (args->flags & MSM_INFO_IOVA) {
+ struct msm_gem_address_space *aspace = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
uint64_t iova;
- ret = msm_gem_get_iova(obj, ctx->aspace, &iova);
+ if (msm_obj->flags & MSM_BO_SECURE && priv->gpu)
+ aspace = priv->gpu->secure_aspace;
+ else if (ctx)
+ aspace = ctx->aspace;
+
+ if (!aspace) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = msm_gem_get_iova(obj, aspace, &iova);
if (!ret)
args->offset = iova;
} else {
+ if (msm_obj->flags & MSM_BO_SVM) {
+ /*
+ * Offset for an SVM object is not needed as they are
+ * already mmap'ed before the SVM ioctl is invoked.
+ */
+ ret = -EACCES;
+ goto out;
+ }
args->offset = msm_gem_mmap_offset(obj);
}
+out:
drm_gem_object_unreference_unlocked(obj);
return ret;
@@ -1537,6 +1565,37 @@ static int msm_ioctl_deregister_event(struct drm_device *dev, void *data,
return 0;
}
+static int msm_ioctl_gem_sync(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+
+ struct drm_msm_gem_sync *arg = data;
+ int i;
+
+ for (i = 0; i < arg->nr_ops; i++) {
+ struct drm_msm_gem_syncop syncop;
+ struct drm_gem_object *obj;
+ int ret;
+ void __user *ptr =
+ (void __user *)(uintptr_t)
+ (arg->ops + (i * sizeof(syncop)));
+
+ ret = copy_from_user(&syncop, ptr, sizeof(syncop));
+ if (ret)
+ return -EFAULT;
+
+ obj = drm_gem_object_lookup(dev, file, syncop.handle);
+ if (!obj)
+ return -ENOENT;
+
+ msm_gem_sync(obj, syncop.op);
+
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
+ return 0;
+}
+
void msm_send_crtc_notification(struct drm_crtc *crtc,
struct drm_event *event, u8 *payload)
{
@@ -1665,6 +1724,10 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_COUNTER_READ, msm_ioctl_counter_read,
DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SYNC, msm_ioctl_gem_sync,
+ DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SVM_NEW, msm_ioctl_gem_svm_new,
+ DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -1904,7 +1967,6 @@ static const struct platform_device_id msm_id[] = {
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp" }, /* mdp4 */
- { .compatible = "qcom,mdss_mdp" }, /* mdp5 */
{ .compatible = "qcom,sde-kms" }, /* sde */
{}
};
@@ -1934,6 +1996,7 @@ void __exit adreno_unregister(void)
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_smmu_driver_init();
msm_dsi_register();
msm_edp_register();
hdmi_register();
@@ -1949,6 +2012,7 @@ static void __exit msm_drm_unregister(void)
adreno_unregister();
msm_edp_unregister();
msm_dsi_unregister();
+ msm_smmu_driver_cleanup();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d2d118cf7e07..8f56a3126008 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -34,6 +34,7 @@
#include <linux/of_graph.h>
#include <linux/of_device.h>
#include <linux/sde_io_util.h>
+#include <linux/hashtable.h>
#include <asm/sizes.h>
#include <linux/kthread.h>
@@ -111,6 +112,7 @@ enum msm_mdp_plane_property {
PLANE_PROP_ROTATION,
PLANE_PROP_BLEND_OP,
PLANE_PROP_SRC_CONFIG,
+ PLANE_PROP_FB_TRANSLATION_MODE,
/* total # of properties */
PLANE_PROP_COUNT
@@ -129,6 +131,7 @@ enum msm_mdp_crtc_property {
CRTC_PROP_CORE_CLK,
CRTC_PROP_CORE_AB,
CRTC_PROP_CORE_IB,
+ CRTC_PROP_SECURITY_LEVEL,
/* total # of properties */
CRTC_PROP_COUNT
@@ -148,6 +151,7 @@ enum msm_mdp_conn_property {
CONNECTOR_PROP_DST_Y,
CONNECTOR_PROP_DST_W,
CONNECTOR_PROP_DST_H,
+ CONNECTOR_PROP_PLL_DELTA,
/* enum/bitmask properties */
CONNECTOR_PROP_TOPOLOGY_NAME,
@@ -324,6 +328,11 @@ struct msm_drm_private {
unsigned int num_connectors;
struct drm_connector *connectors[MAX_CONNECTORS];
+ /* hash to store mm_struct to msm_mmu_notifier mappings */
+ DECLARE_HASHTABLE(mn_hash, 7);
+ /* protects mn_hash and the msm_mmu_notifier for the process */
+ struct mutex mn_lock;
+
/* Properties */
struct drm_property *plane_property[PLANE_PROP_COUNT];
struct drm_property *crtc_property[CRTC_PROP_COUNT];
@@ -340,6 +349,7 @@ struct msm_drm_private {
* and position mm_node->start is in # of pages:
*/
struct drm_mm mm;
+ spinlock_t lock; /* Protects drm_mm node allocation/removal */
} vram;
struct msm_vblank_ctrl vblank_ctrl;
@@ -402,17 +412,22 @@ void msm_update_fence(struct drm_device *dev, uint32_t fence);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
- void *priv);
+ void *priv, bool invalidated);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv, unsigned int flags);
+int msm_gem_reserve_iova(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *domain,
+ uint64_t hostptr, uint64_t size);
+void msm_gem_release_iova(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
/* For GPU and legacy display */
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
- const char *name);
+ int type, const char *name);
struct msm_gem_address_space *
msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
uint64_t start, uint64_t end);
@@ -430,8 +445,6 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
@@ -452,7 +465,6 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_vaddr(struct drm_gem_object *obj);
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
struct msm_fence_cb *cb);
@@ -467,8 +479,17 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
+struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
+ uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
- uint32_t size, struct sg_table *sgt);
+ uint32_t size, struct sg_table *sgt, u32 flags);
+void msm_gem_sync(struct drm_gem_object *obj, u32 op);
+int msm_gem_svm_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint64_t hostptr, uint64_t size,
+ uint32_t flags, uint32_t *handle);
+struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
+ struct drm_file *file, uint64_t hostptr,
+ uint64_t size, uint32_t flags);
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index cf127250c0d0..28b98cc1433c 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -104,10 +104,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
/* allocate backing bo */
size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
- mutex_lock(&dev->struct_mutex);
fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
MSM_BO_WC | MSM_BO_STOLEN);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(fbdev->bo)) {
ret = PTR_ERR(fbdev->bo);
fbdev->bo = NULL;
@@ -133,7 +131,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+ ret = msm_gem_get_iova(fbdev->bo, 0, &paddr);
if (ret) {
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
@@ -163,7 +161,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
/* FIXME: Verify paddr < 32 bits? */
dev->mode_config.fb_base = lower_32_bits(paddr);
- fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ fbi->screen_base = msm_gem_vaddr(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = lower_32_bits(paddr);
fbi->fix.smem_len = fbdev->bo->size;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d1455fbc980e..6bb29c62378d 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -18,12 +18,154 @@
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+#include <soc/qcom/secure_buffer.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_mmu.h"
+static void msm_gem_mn_free(struct kref *refcount)
+{
+ struct msm_mmu_notifier *msm_mn = container_of(refcount,
+ struct msm_mmu_notifier, refcount);
+
+ mmu_notifier_unregister(&msm_mn->mn, msm_mn->mm);
+ hash_del(&msm_mn->node);
+
+ kfree(msm_mn);
+}
+
+static int msm_gem_mn_get(struct msm_mmu_notifier *msm_mn)
+{
+ if (msm_mn)
+ return kref_get_unless_zero(&msm_mn->refcount);
+ return 0;
+}
+
+static void msm_gem_mn_put(struct msm_mmu_notifier *msm_mn)
+{
+ if (msm_mn) {
+ struct msm_drm_private *msm_dev = msm_mn->msm_dev;
+
+ mutex_lock(&msm_dev->mn_lock);
+ kref_put(&msm_mn->refcount, msm_gem_mn_free);
+ mutex_unlock(&msm_dev->mn_lock);
+ }
+}
+
+void msm_mn_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm, unsigned long start, unsigned long end);
+
+static const struct mmu_notifier_ops msm_mn_ops = {
+ .invalidate_range_start = msm_mn_invalidate_range_start,
+};
+
+static struct msm_mmu_notifier *
+msm_gem_mn_find(struct msm_drm_private *msm_dev, struct mm_struct *mm,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_mmu_notifier *msm_mn;
+ int ret = 0;
+
+ mutex_lock(&msm_dev->mn_lock);
+ hash_for_each_possible(msm_dev->mn_hash, msm_mn, node,
+ (unsigned long) mm) {
+ if (msm_mn->mm == mm) {
+ if (!msm_gem_mn_get(msm_mn)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ mutex_unlock(&msm_dev->mn_lock);
+ return msm_mn;
+ }
+ }
+
+ msm_mn = kzalloc(sizeof(*msm_mn), GFP_KERNEL);
+ if (!msm_mn) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ msm_mn->mm = current->mm;
+ msm_mn->mn.ops = &msm_mn_ops;
+ ret = mmu_notifier_register(&msm_mn->mn, msm_mn->mm);
+ if (ret) {
+ kfree(msm_mn);
+ goto fail;
+ }
+
+ msm_mn->svm_tree = RB_ROOT;
+ spin_lock_init(&msm_mn->svm_tree_lock);
+ kref_init(&msm_mn->refcount);
+ msm_mn->msm_dev = msm_dev;
+
+ /* Insert the msm_mn into the hash */
+ hash_add(msm_dev->mn_hash, &msm_mn->node, (unsigned long) msm_mn->mm);
+ mutex_unlock(&msm_dev->mn_lock);
+
+ return msm_mn;
+
+fail:
+ mutex_unlock(&msm_dev->mn_lock);
+ return ERR_PTR(ret);
+}
+
+static int msm_gem_mn_register(struct msm_gem_svm_object *msm_svm_obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct drm_gem_object *obj = &msm_svm_obj->msm_obj_base.base;
+ struct msm_drm_private *msm_dev = obj->dev->dev_private;
+ struct msm_mmu_notifier *msm_mn;
+
+ msm_svm_obj->mm = current->mm;
+ msm_svm_obj->svm_node.start = msm_svm_obj->hostptr;
+ msm_svm_obj->svm_node.last = msm_svm_obj->hostptr + obj->size - 1;
+
+ msm_mn = msm_gem_mn_find(msm_dev, msm_svm_obj->mm, aspace);
+ if (IS_ERR(msm_mn))
+ return PTR_ERR(msm_mn);
+
+ msm_svm_obj->msm_mn = msm_mn;
+
+ spin_lock(&msm_mn->svm_tree_lock);
+ interval_tree_insert(&msm_svm_obj->svm_node, &msm_mn->svm_tree);
+ spin_unlock(&msm_mn->svm_tree_lock);
+
+ return 0;
+}
+
+static void msm_gem_mn_unregister(struct msm_gem_svm_object *msm_svm_obj)
+{
+ struct msm_mmu_notifier *msm_mn = msm_svm_obj->msm_mn;
+
+ /* invalid: bo already unregistered */
+ if (!msm_mn || msm_svm_obj->invalid)
+ return;
+
+ spin_lock(&msm_mn->svm_tree_lock);
+ interval_tree_remove(&msm_svm_obj->svm_node, &msm_mn->svm_tree);
+ spin_unlock(&msm_mn->svm_tree_lock);
+}
+
+static int protect_pages(struct msm_gem_object *msm_obj)
+{
+ int perm = PERM_READ | PERM_WRITE;
+ int src = VMID_HLOS;
+ int dst = VMID_CP_PIXEL;
+
+ return hyp_assign_table(msm_obj->sgt, &src, 1, &dst, &perm, 1);
+}
+
+static int unprotect_pages(struct msm_gem_object *msm_obj)
+{
+ int perm = PERM_READ | PERM_WRITE | PERM_EXEC;
+ int src = VMID_CP_PIXEL;
+ int dst = VMID_HLOS;
+
+ return hyp_assign_table(msm_obj->sgt, &src, 1, &dst, &perm, 1);
+}
+
static void *get_dmabuf_ptr(struct drm_gem_object *obj)
{
return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
@@ -44,8 +186,7 @@ static bool use_pages(struct drm_gem_object *obj)
}
/* allocate pages from VRAM carveout, used when no IOMMU: */
-static struct page **get_pages_vram(struct drm_gem_object *obj,
- int npages)
+static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
@@ -57,8 +198,10 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
if (!p)
return ERR_PTR(-ENOMEM);
+ spin_lock(&priv->vram.lock);
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
npages, 0, DRM_MM_SEARCH_DEFAULT);
+ spin_unlock(&priv->vram.lock);
if (ret) {
drm_free_large(p);
return ERR_PTR(ret);
@@ -73,7 +216,6 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
return p;
}
-/* called with dev->struct_mutex held */
static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -94,13 +236,15 @@ static struct page **get_pages(struct drm_gem_object *obj)
return p;
}
+ msm_obj->pages = p;
+
msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
if (IS_ERR(msm_obj->sgt)) {
- dev_err(dev->dev, "failed to allocate sgt\n");
- return ERR_CAST(msm_obj->sgt);
- }
+ void *ptr = ERR_CAST(msm_obj->sgt);
- msm_obj->pages = p;
+ msm_obj->sgt = NULL;
+ return ptr;
+ }
/*
* Make sure to flush the CPU cache for newly allocated memory
@@ -109,24 +253,63 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+
+ /* Secure the pages if we need to */
+ if (use_pages(obj) && msm_obj->flags & MSM_BO_SECURE) {
+ int ret = protect_pages(msm_obj);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * Set a flag to indicate the pages are locked by us and
+ * need to be unlocked when the pages get freed
+ */
+ msm_obj->flags |= MSM_BO_LOCKED;
+ }
}
return msm_obj->pages;
}
+static void put_pages_vram(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ spin_lock(&priv->vram.lock);
+ drm_mm_remove_node(msm_obj->vram_node);
+ spin_unlock(&priv->vram.lock);
+
+ drm_free_large(msm_obj->pages);
+}
+
static void put_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (msm_obj->pages) {
- sg_free_table(msm_obj->sgt);
+ if (msm_obj->flags & MSM_BO_LOCKED) {
+ unprotect_pages(msm_obj);
+ msm_obj->flags &= ~MSM_BO_LOCKED;
+ }
+
+ if (msm_obj->sgt)
+ sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
- if (use_pages(obj))
- drm_gem_put_pages(obj, msm_obj->pages, true, false);
- else {
- drm_mm_remove_node(msm_obj->vram_node);
- drm_free_large(msm_obj->pages);
+ if (use_pages(obj)) {
+ if (msm_obj->flags & MSM_BO_SVM) {
+ int npages = obj->size >> PAGE_SHIFT;
+
+ release_pages(msm_obj->pages, npages, 0);
+ kfree(msm_obj->pages);
+ } else {
+ drm_gem_put_pages(obj, msm_obj->pages,
+ true, false);
+ }
+ } else {
+ put_pages_vram(obj);
}
msm_obj->pages = NULL;
@@ -135,11 +318,12 @@ static void put_pages(struct drm_gem_object *obj)
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
- mutex_lock(&dev->struct_mutex);
+
+ mutex_lock(&msm_obj->lock);
p = get_pages(obj);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&msm_obj->lock);
return p;
}
@@ -153,6 +337,12 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ /* We can't mmap secure objects or SVM objects */
+ if (msm_obj->flags & (MSM_BO_SECURE | MSM_BO_SVM)) {
+ drm_gem_vm_close(vma);
+ return -EACCES;
+ }
+
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
@@ -193,16 +383,17 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
int ret;
- /* Make sure we don't parallel update on a fault, nor move or remove
- * something from beneath our feet
+ /*
+ * vm_ops.open and close get and put a reference on obj.
+ * So, we dont need to hold one here.
*/
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&msm_obj->lock);
if (ret)
goto out;
@@ -225,7 +416,7 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
out_unlock:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&msm_obj->lock);
out:
switch (ret) {
case -EAGAIN:
@@ -249,9 +440,10 @@ out:
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
@@ -267,9 +459,11 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
uint64_t offset;
- mutex_lock(&obj->dev->struct_mutex);
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ mutex_lock(&msm_obj->lock);
offset = mmap_offset(obj);
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&msm_obj->lock);
return offset;
}
@@ -281,19 +475,26 @@ static void obj_remove_domain(struct msm_gem_vma *domain)
}
}
+/* Called with msm_obj->lock locked */
static void
put_iova(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_svm_object *msm_svm_obj;
struct msm_gem_vma *domain, *tmp;
+ bool invalid = false;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
+ if (msm_obj->flags & MSM_BO_SVM) {
+ msm_svm_obj = to_msm_svm_obj(msm_obj);
+ invalid = msm_svm_obj->invalid;
+ }
list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
if (iommu_present(&platform_bus_type)) {
msm_gem_unmap_vma(domain->aspace, domain,
- msm_obj->sgt, get_dmabuf_ptr(obj));
+ msm_obj->sgt, get_dmabuf_ptr(obj), invalid);
}
obj_remove_domain(domain);
@@ -334,14 +535,8 @@ static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
#define IOMMU_PRIV 0
#endif
-/* should be called under struct_mutex.. although it can be called
- * from atomic context without struct_mutex to acquire an extra
- * iova ref if you know one is already held.
- *
- * That means when I do eventually need to add support for unpinning
- * the refcnt counter needs to be atomic_t.
- */
-int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+/* A reference to obj must be held before calling this function. */
+int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -349,13 +544,18 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_vma *domain;
int ret = 0;
+ mutex_lock(&msm_obj->lock);
+
if (!iommu_present(&platform_bus_type)) {
pages = get_pages(obj);
- if (IS_ERR(pages))
+ if (IS_ERR(pages)) {
+ mutex_unlock(&msm_obj->lock);
return PTR_ERR(pages);
+ }
*iova = (uint64_t) physaddr(obj);
+ mutex_unlock(&msm_obj->lock);
return 0;
}
@@ -363,12 +563,15 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
if (!domain) {
domain = obj_add_domain(obj, aspace);
- if (IS_ERR(domain))
+ if (IS_ERR(domain)) {
+ mutex_unlock(&msm_obj->lock);
return PTR_ERR(domain);
+ }
pages = get_pages(obj);
if (IS_ERR(pages)) {
obj_remove_domain(domain);
+ mutex_unlock(&msm_obj->lock);
return PTR_ERR(pages);
}
@@ -381,26 +584,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj,
else
obj_remove_domain(domain);
- return ret;
-}
-
-/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
-{
- struct msm_gem_vma *domain;
- int ret;
-
- domain = obj_get_domain(obj, aspace);
- if (domain) {
- *iova = domain->iova;
- return 0;
- }
-
- mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_get_iova_locked(obj, aspace, iova);
- mutex_unlock(&obj->dev->struct_mutex);
- return ret;
+ mutex_unlock(&msm_obj->lock);
+ return 0;
}
/* get iova without taking a reference, used in places where you have
@@ -409,11 +594,17 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
- struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
+ uint64_t iova;
+ mutex_lock(&msm_obj->lock);
+ domain = obj_get_domain(obj, aspace);
WARN_ON(!domain);
+ iova = domain ? domain->iova : 0;
+ mutex_unlock(&msm_obj->lock);
- return domain ? domain->iova : 0;
+ return iova;
}
void msm_gem_put_iova(struct drm_gem_object *obj,
@@ -457,27 +648,23 @@ fail:
return ret;
}
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
+void *msm_gem_vaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
+ mutex_lock(&msm_obj->lock);
if (!msm_obj->vaddr) {
struct page **pages = get_pages(obj);
- if (IS_ERR(pages))
+ if (IS_ERR(pages)) {
+ mutex_unlock(&msm_obj->lock);
return ERR_CAST(pages);
+ }
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
}
- return msm_obj->vaddr;
-}
+ mutex_unlock(&msm_obj->lock);
-void *msm_gem_vaddr(struct drm_gem_object *obj)
-{
- void *ret;
- mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_vaddr_locked(obj);
- mutex_unlock(&obj->dev->struct_mutex);
- return ret;
+ return msm_obj->vaddr;
}
/* setup callback for when bo is no longer busy..
@@ -546,6 +733,26 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
return 0;
}
+void msm_gem_sync(struct drm_gem_object *obj, u32 op)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ return;
+
+ switch (op) {
+ case MSM_GEM_SYNC_TO_CPU:
+ dma_sync_sg_for_cpu(dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ break;
+ case MSM_GEM_SYNC_TO_DEV:
+ dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ break;
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
@@ -590,21 +797,32 @@ void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_svm_object *msm_svm_obj = NULL;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* object should not be on active list: */
WARN_ON(is_active(msm_obj));
+ if (msm_obj->flags & MSM_BO_SVM)
+ msm_svm_obj = to_msm_svm_obj(msm_obj);
+
list_del(&msm_obj->mm_list);
+ /* Unregister SVM object from mmu notifications */
+ if (msm_obj->flags & MSM_BO_SVM) {
+ msm_gem_mn_unregister(msm_svm_obj);
+ msm_gem_mn_put(msm_svm_obj->msm_mn);
+ msm_svm_obj->msm_mn = NULL;
+ }
+
+ mutex_lock(&msm_obj->lock);
put_iova(obj);
if (obj->import_attach) {
if (msm_obj->vaddr)
dma_buf_vunmap(obj->import_attach->dmabuf,
msm_obj->vaddr);
-
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
*/
@@ -621,8 +839,12 @@ void msm_gem_free_object(struct drm_gem_object *obj)
reservation_object_fini(msm_obj->resv);
drm_gem_object_release(obj);
+ mutex_unlock(&msm_obj->lock);
- kfree(msm_obj);
+ if (msm_obj->flags & MSM_BO_SVM)
+ kfree(msm_svm_obj);
+ else
+ kfree(msm_obj);
}
/* convenience method to construct a GEM buffer object, and userspace handle */
@@ -632,13 +854,28 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
struct drm_gem_object *obj;
int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
obj = msm_gem_new(dev, size, flags);
- mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = drm_gem_handle_create(file, obj, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+/* convenience method to construct an SVM buffer object, and userspace handle */
+int msm_gem_svm_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint64_t hostptr, uint64_t size,
+ uint32_t flags, uint32_t *handle)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = msm_gem_svm_new(dev, file, hostptr, size, flags);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -651,12 +888,11 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
return ret;
}
-static int msm_gem_new_impl(struct drm_device *dev,
+static int msm_gem_obj_init(struct drm_device *dev,
uint32_t size, uint32_t flags,
- struct drm_gem_object **obj)
+ struct msm_gem_object *msm_obj, bool struct_mutex_locked)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_gem_object *msm_obj;
bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
@@ -678,9 +914,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (WARN_ON(use_vram && !priv->vram.size))
return -EINVAL;
- msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
- if (!msm_obj)
- return -ENOMEM;
+ mutex_init(&msm_obj->lock);
if (use_vram) {
struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
@@ -694,29 +928,58 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->resv = &msm_obj->_resv;
reservation_object_init(msm_obj->resv);
+ INIT_LIST_HEAD(&msm_obj->mm_list);
INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->domains);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
-
- *obj = &msm_obj->base;
+ if (struct_mutex_locked) {
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ } else {
+ mutex_lock(&dev->struct_mutex);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ mutex_unlock(&dev->struct_mutex);
+ }
return 0;
}
-struct drm_gem_object *msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags)
+static struct drm_gem_object *msm_gem_new_impl(struct drm_device *dev,
+ uint32_t size, uint32_t flags, bool struct_mutex_locked)
{
- struct drm_gem_object *obj = NULL;
+ struct msm_gem_object *msm_obj;
int ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
+ if (!msm_obj)
+ return ERR_PTR(-ENOMEM);
+
+ ret = msm_gem_obj_init(dev, size, flags, msm_obj, struct_mutex_locked);
+ if (ret) {
+ kfree(msm_obj);
+ return ERR_PTR(ret);
+ }
+
+ return &msm_obj->base;
+}
+
+static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags, bool struct_mutex_locked)
+{
+ struct drm_gem_object *obj;
+ int ret;
size = PAGE_ALIGN(size);
- ret = msm_gem_new_impl(dev, size, flags, &obj);
- if (ret)
- goto fail;
+ /*
+ * Disallow zero sized objects as they make the underlying
+ * infrastructure grumpy
+ */
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
+ obj = msm_gem_new_impl(dev, size, flags, struct_mutex_locked);
+ if (IS_ERR(obj))
+ return obj;
if (use_pages(obj)) {
ret = drm_gem_object_init(dev, obj, size);
@@ -729,14 +992,162 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
return obj;
fail:
- if (obj)
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ERR_PTR(ret);
+}
+
+struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
+ uint32_t size, uint32_t flags)
+{
+ return _msm_gem_new(dev, size, flags, true);
+}
+
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags)
+{
+ return _msm_gem_new(dev, size, flags, false);
+}
+
+static struct drm_gem_object *msm_svm_gem_new_impl(struct drm_device *dev,
+ uint32_t size, uint32_t flags)
+{
+ struct msm_gem_svm_object *msm_svm_obj;
+ struct msm_gem_object *msm_obj;
+ int ret;
+
+ msm_svm_obj = kzalloc(sizeof(*msm_svm_obj), GFP_KERNEL);
+ if (!msm_svm_obj)
+ return ERR_PTR(-ENOMEM);
+
+ msm_obj = &msm_svm_obj->msm_obj_base;
+
+ ret = msm_gem_obj_init(dev, size, flags | MSM_BO_SVM, msm_obj, false);
+ if (ret) {
+ kfree(msm_svm_obj);
+ return ERR_PTR(ret);
+ }
+
+ return &msm_obj->base;
+}
+
+/* convenience method to construct an SVM GEM bo, and userspace handle */
+struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
+ struct drm_file *file, uint64_t hostptr,
+ uint64_t size, uint32_t flags)
+{
+ struct drm_gem_object *obj;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_gem_address_space *aspace = ctx->aspace;
+ struct msm_gem_object *msm_obj;
+ struct msm_gem_svm_object *msm_svm_obj;
+ struct msm_gem_vma *domain = NULL;
+ struct page **p;
+ int npages;
+ int num_pinned = 0;
+ int write;
+ int ret;
+
+ /* if we don't have IOMMU, don't bother pretending we can import: */
+ if (!iommu_present(&platform_bus_type)) {
+ dev_err_once(dev->dev, "cannot import without IOMMU\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* hostptr and size must be page-aligned */
+ if (offset_in_page(hostptr | size))
+ return ERR_PTR(-EINVAL);
+
+ /* Only CPU cached SVM objects are allowed */
+ if ((flags & MSM_BO_CACHE_MASK) != MSM_BO_CACHED)
+ return ERR_PTR(-EINVAL);
+
+ /* Allocate and initialize a new msm_gem_object */
+ obj = msm_svm_gem_new_impl(dev, size, flags);
+ if (IS_ERR(obj))
+ return obj;
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ msm_obj = to_msm_bo(obj);
+ domain = obj_add_domain(&msm_obj->base, aspace);
+ if (IS_ERR(domain)) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_CAST(domain);
+ }
+
+ /* Reserve iova if not already in use, else fail */
+ ret = msm_gem_reserve_iova(aspace, domain, hostptr, size);
+ if (ret) {
+ obj_remove_domain(domain);
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(ret);
+ }
+
+ msm_svm_obj = to_msm_svm_obj(msm_obj);
+ msm_svm_obj->hostptr = hostptr;
+ msm_svm_obj->invalid = false;
+
+ ret = msm_gem_mn_register(msm_svm_obj, aspace);
+ if (ret)
+ goto fail;
+
+ /*
+ * Get physical pages and map into smmu in the ioctl itself.
+ * The driver handles iova allocation, physical page allocation and
+ * SMMU map all in one go. If we break this, then we have to maintain
+ * state to tell if physical pages allocation/map needs to happen.
+ * For SVM, iova reservation needs to happen in the ioctl itself,
+ * so do the rest right here as well.
+ */
+ npages = size >> PAGE_SHIFT;
+ p = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ write = (msm_obj->flags & MSM_BO_GPU_READONLY) ? 0 : 1;
+ /* This may hold mm->mmap_sem */
+ num_pinned = get_user_pages_fast(hostptr, npages, write, p);
+ if (num_pinned != npages) {
+ ret = -EINVAL;
+ goto free_pages;
+ }
+
+ msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ if (IS_ERR(msm_obj->sgt)) {
+ ret = PTR_ERR(msm_obj->sgt);
+ goto free_pages;
+ }
+
+ msm_obj->pages = p;
+
+ ret = aspace->mmu->funcs->map(aspace->mmu, domain->iova,
+ msm_obj->sgt, msm_obj->flags, get_dmabuf_ptr(obj));
+ if (ret)
+ goto free_pages;
+
+ kref_get(&aspace->kref);
+
+ return obj;
+
+free_pages:
+ release_pages(p, num_pinned, 0);
+ kfree(p);
+
+fail:
+ if (domain)
+ msm_gem_release_iova(aspace, domain);
+
+ obj_remove_domain(domain);
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
- uint32_t size, struct sg_table *sgt)
+ uint32_t size, struct sg_table *sgt, u32 flags)
{
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
@@ -750,34 +1161,119 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
size = PAGE_ALIGN(size);
- mutex_lock(&dev->struct_mutex);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
- mutex_unlock(&dev->struct_mutex);
-
- if (ret)
- goto fail;
+ obj = msm_gem_new_impl(dev, size, MSM_BO_WC, false);
+ if (IS_ERR(obj))
+ return obj;
drm_gem_private_object_init(dev, obj, size);
npages = size / PAGE_SIZE;
msm_obj = to_msm_bo(obj);
+ mutex_lock(&msm_obj->lock);
msm_obj->sgt = sgt;
msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
if (!msm_obj->pages) {
+ mutex_unlock(&msm_obj->lock);
ret = -ENOMEM;
goto fail;
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
- if (ret)
+ /* OR the passed in flags */
+ msm_obj->flags |= flags;
+
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages,
+ NULL, npages);
+ if (ret) {
+ mutex_unlock(&msm_obj->lock);
goto fail;
+ }
+
+ mutex_unlock(&msm_obj->lock);
return obj;
fail:
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
+
+/* Timeout in ms, long enough so we are sure the GPU is hung */
+#define SVM_OBJ_WAIT_TIMEOUT 10000
+static void invalidate_svm_object(struct msm_gem_svm_object *msm_svm_obj)
+{
+ struct msm_gem_object *msm_obj = &msm_svm_obj->msm_obj_base;
+ struct drm_device *dev = msm_obj->base.dev;
+ struct msm_gem_vma *domain, *tmp;
+ uint32_t fence;
+ int ret;
+
+ if (is_active(msm_obj)) {
+ ktime_t timeout = ktime_add_ms(ktime_get(),
+ SVM_OBJ_WAIT_TIMEOUT);
+
+ /* Get the most recent fence that touches the object */
+ fence = msm_gem_fence(msm_obj, MSM_PREP_READ | MSM_PREP_WRITE);
+
+ /* Wait for the fence to retire */
+ ret = msm_wait_fence(dev, fence, &timeout, true);
+ if (ret)
+ /* The GPU could be hung! Not much we can do */
+ dev_err(dev->dev, "drm: Error (%d) waiting for svm object: 0x%llx",
+ ret, msm_svm_obj->hostptr);
+ }
+
+ /* GPU is done, unmap object from SMMU */
+ mutex_lock(&msm_obj->lock);
+ list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+ struct msm_gem_address_space *aspace = domain->aspace;
+
+ if (domain->iova)
+ aspace->mmu->funcs->unmap(aspace->mmu,
+ domain->iova, msm_obj->sgt,
+ get_dmabuf_ptr(&msm_obj->base));
+ }
+ /* Let go of the physical pages */
+ put_pages(&msm_obj->base);
+ mutex_unlock(&msm_obj->lock);
+}
+
+void msm_mn_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ struct msm_mmu_notifier *msm_mn =
+ container_of(mn, struct msm_mmu_notifier, mn);
+ struct interval_tree_node *itn = NULL;
+ struct msm_gem_svm_object *msm_svm_obj;
+ struct drm_gem_object *obj;
+ LIST_HEAD(inv_list);
+
+ if (!msm_gem_mn_get(msm_mn))
+ return;
+
+ spin_lock(&msm_mn->svm_tree_lock);
+ itn = interval_tree_iter_first(&msm_mn->svm_tree, start, end - 1);
+ while (itn) {
+ msm_svm_obj = container_of(itn,
+ struct msm_gem_svm_object, svm_node);
+ obj = &msm_svm_obj->msm_obj_base.base;
+
+ if (kref_get_unless_zero(&obj->refcount))
+ list_add(&msm_svm_obj->lnode, &inv_list);
+
+ itn = interval_tree_iter_next(itn, start, end - 1);
+ }
+ spin_unlock(&msm_mn->svm_tree_lock);
+
+ list_for_each_entry(msm_svm_obj, &inv_list, lnode) {
+ obj = &msm_svm_obj->msm_obj_base.base;
+ /* Unregister SVM object from mmu notifications */
+ msm_gem_mn_unregister(msm_svm_obj);
+ msm_svm_obj->invalid = true;
+ invalidate_svm_object(msm_svm_obj);
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
+ msm_gem_mn_put(msm_mn);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index ac46c473791f..04e6c658b5f3 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -20,26 +20,22 @@
#include <linux/kref.h>
#include <linux/reservation.h>
+#include <linux/mmu_notifier.h>
+#include <linux/interval_tree.h>
#include "msm_drv.h"
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
-
-struct msm_gem_aspace_ops {
- int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
- struct sg_table *sgt, void *priv, unsigned int flags);
-
- void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
- struct sg_table *sgt, void *priv);
-
- void (*destroy)(struct msm_gem_address_space *);
-};
+#define MSM_BO_LOCKED 0x20000000 /* Pages have been securely locked */
+#define MSM_BO_SVM 0x40000000 /* bo is SVM */
struct msm_gem_address_space {
const char *name;
struct msm_mmu *mmu;
- const struct msm_gem_aspace_ops *ops;
struct kref kref;
+ struct drm_mm mm;
+ spinlock_t lock; /* Protects drm_mm node allocation/removal */
+ u64 va_len;
};
struct msm_gem_vma {
@@ -88,9 +84,36 @@ struct msm_gem_object {
* an IOMMU. Also used for stolen/splashscreen buffer.
*/
struct drm_mm_node *vram_node;
+ struct mutex lock; /* Protects resources associated with bo */
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
+struct msm_mmu_notifier {
+ struct mmu_notifier mn;
+ struct mm_struct *mm; /* mm_struct owning the mmu notifier mn */
+ struct hlist_node node;
+ struct rb_root svm_tree; /* interval tree holding all svm bos */
+ spinlock_t svm_tree_lock; /* Protects svm_tree*/
+ struct msm_drm_private *msm_dev;
+ struct kref refcount;
+};
+
+struct msm_gem_svm_object {
+ struct msm_gem_object msm_obj_base;
+ uint64_t hostptr;
+ struct mm_struct *mm; /* mm_struct the svm bo belongs to */
+ struct interval_tree_node svm_node;
+ struct msm_mmu_notifier *msm_mn;
+ struct list_head lnode;
+ /* bo has been unmapped on CPU, cannot be part of GPU submits */
+ bool invalid;
+};
+
+#define to_msm_svm_obj(x) \
+ ((struct msm_gem_svm_object *) \
+ container_of(x, struct msm_gem_svm_object, msm_obj_base))
+
+
static inline bool is_active(struct msm_gem_object *msm_obj)
{
return msm_obj->gpu != NULL;
@@ -109,8 +132,6 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
return fence;
}
-#define MAX_CMDS 4
-
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
* make it easier to unwind when things go wrong, etc). This only
@@ -125,6 +146,9 @@ struct msm_gem_submit {
uint32_t fence;
int ring;
bool valid;
+ uint64_t profile_buf_iova;
+ void *profile_buf_vaddr;
+ bool secure;
unsigned int nr_cmds;
unsigned int nr_bos;
struct {
@@ -132,7 +156,7 @@ struct msm_gem_submit {
uint32_t size; /* in dwords */
uint64_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
- } cmd[MAX_CMDS];
+ } *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
struct msm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 121975b07cd4..678018804f3a 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -19,6 +19,7 @@
#include "msm_gem.h"
#include <linux/dma-buf.h>
+#include <linux/ion.h>
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
@@ -55,7 +56,16 @@ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
- return msm_gem_import(dev, attach->dmabuf->size, sg);
+ u32 flags = 0;
+
+ /*
+ * Check to see if this is a secure buffer by way of Ion and set the
+ * appropriate flag if so.
+ */
+ if (ion_dma_buf_is_secure(attach->dmabuf))
+ flags |= MSM_BO_SECURE;
+
+ return msm_gem_import(dev, attach->dmabuf->size, sg, flags);
}
int msm_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 0566cefaae81..c861bfd77537 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,10 +34,15 @@ static inline void __user *to_user_ptr(u64 address)
}
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gem_address_space *aspace, int nr)
+ struct msm_gem_address_space *aspace,
+ uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
- int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
+ uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
+ (nr_cmds * sizeof(submit->cmd[0]));
+
+ if (sz > SIZE_MAX)
+ return NULL;
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (submit) {
@@ -48,6 +53,12 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->nr_bos = 0;
submit->nr_cmds = 0;
+ submit->profile_buf_vaddr = NULL;
+ submit->profile_buf_iova = 0;
+ submit->cmd = (void *)&submit->bos[nr_bos];
+
+ submit->secure = false;
+
INIT_LIST_HEAD(&submit->bo_list);
ww_acquire_init(&submit->ticket, &reservation_ww_class);
}
@@ -63,7 +74,8 @@ copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return -EFAULT;
}
-static int submit_lookup_objects(struct msm_gem_submit *submit,
+static int submit_lookup_objects(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
unsigned i;
@@ -79,13 +91,16 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
void __user *userptr =
to_user_ptr(args->bos + (i * sizeof(submit_bo)));
- ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
- if (unlikely(ret)) {
+ if (copy_from_user_inatomic(&submit_bo, userptr,
+ sizeof(submit_bo))) {
pagefault_enable();
spin_unlock(&file->table_lock);
- ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
- if (ret)
+ if (copy_from_user(&submit_bo, userptr,
+ sizeof(submit_bo))) {
+ ret = -EFAULT;
goto out;
+ }
+
spin_lock(&file->table_lock);
pagefault_disable();
}
@@ -113,6 +128,20 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
msm_obj = to_msm_bo(obj);
+ /*
+ * If the buffer is marked as secure make sure that we can
+ * handle secure buffers and then mark the submission as secure
+ */
+ if (msm_obj->flags & MSM_BO_SECURE) {
+ if (!gpu->secure_aspace) {
+ DRM_ERROR("Cannot handle secure buffers\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ submit->secure = true;
+ }
+
if (!list_empty(&msm_obj->submit_entry)) {
DRM_ERROR("handle %u at index %u already on submit list\n",
submit_bo.handle, i);
@@ -137,12 +166,17 @@ out:
return ret;
}
-static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
+static void submit_unlock_unpin_bo(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, int i)
{
struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct msm_gem_address_space *aspace;
+
+ aspace = (msm_obj->flags & MSM_BO_SECURE) ?
+ gpu->secure_aspace : submit->aspace;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->aspace);
+ msm_gem_put_iova(&msm_obj->base, aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -154,7 +188,8 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
}
/* This is where we make sure all the bo's are reserved and pin'd: */
-static int submit_validate_objects(struct msm_gem_submit *submit)
+static int submit_validate_objects(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit)
{
int contended, slow_locked = -1, i, ret = 0;
@@ -163,8 +198,12 @@ retry:
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct msm_gem_address_space *aspace;
uint64_t iova;
+ aspace = (msm_obj->flags & MSM_BO_SECURE) ?
+ gpu->secure_aspace : submit->aspace;
+
if (slow_locked == i)
slow_locked = -1;
@@ -178,10 +217,21 @@ retry:
submit->bos[i].flags |= BO_LOCKED;
}
+ /*
+ * An invalid SVM object is part of
+ * this submit's buffer list, fail.
+ */
+ if (msm_obj->flags & MSM_BO_SVM) {
+ struct msm_gem_svm_object *msm_svm_obj =
+ to_msm_svm_obj(msm_obj);
+ if (msm_svm_obj->invalid) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova_locked(&msm_obj->base,
- submit->aspace, &iova);
+ ret = msm_gem_get_iova(&msm_obj->base, aspace, &iova);
/* this would break the logic in the fail path.. there is no
* reason for this to happen, but just to be on the safe side
@@ -209,10 +259,10 @@ retry:
fail:
for (; i >= 0; i--)
- submit_unlock_unpin_bo(submit, i);
+ submit_unlock_unpin_bo(gpu, submit, i);
if (slow_locked > 0)
- submit_unlock_unpin_bo(submit, slow_locked);
+ submit_unlock_unpin_bo(gpu, submit, slow_locked);
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
@@ -261,10 +311,15 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return -EINVAL;
}
+ if (obj->flags & MSM_BO_SECURE) {
+ DRM_ERROR("cannot do relocs on a secure buffer\n");
+ return -EINVAL;
+ }
+
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_vaddr_locked(&obj->base);
+ ptr = msm_gem_vaddr(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -280,8 +335,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
uint32_t off;
bool valid;
- ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
- if (ret)
+ if (copy_from_user(&submit_reloc, userptr,
+ sizeof(submit_reloc)))
return -EFAULT;
if (submit_reloc.submit_offset % 4) {
@@ -321,13 +376,14 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return 0;
}
-static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
+static void submit_cleanup(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ bool fail)
{
unsigned i;
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- submit_unlock_unpin_bo(submit, i);
+ submit_unlock_unpin_bo(gpu, submit, i);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
@@ -356,22 +412,19 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!gpu)
return -ENXIO;
- if (args->nr_cmds > MAX_CMDS)
- return -EINVAL;
-
mutex_lock(&dev->struct_mutex);
- submit = submit_create(dev, ctx->aspace, args->nr_bos);
+ submit = submit_create(dev, ctx->aspace, args->nr_bos, args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
goto out;
}
- ret = submit_lookup_objects(submit, args, file);
+ ret = submit_lookup_objects(gpu, submit, args, file);
if (ret)
goto out;
- ret = submit_validate_objects(submit);
+ ret = submit_validate_objects(gpu, submit);
if (ret)
goto out;
@@ -393,6 +446,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
case MSM_SUBMIT_CMD_BUF:
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ case MSM_SUBMIT_CMD_PROFILE_BUF:
break;
default:
DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
@@ -425,6 +479,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->cmd[i].iova = iova + submit_cmd.submit_offset;
submit->cmd[i].idx = submit_cmd.submit_idx;
+ if (submit_cmd.type == MSM_SUBMIT_CMD_PROFILE_BUF) {
+ submit->profile_buf_iova = submit->cmd[i].iova;
+ submit->profile_buf_vaddr =
+ msm_gem_vaddr(&msm_obj->base);
+ }
+
if (submit->valid)
continue;
@@ -447,7 +507,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (submit)
- submit_cleanup(submit, !!ret);
+ submit_cleanup(gpu, submit, !!ret);
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index a227f1ba0573..f399d24019e4 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -25,8 +25,10 @@ msm_gem_address_space_destroy(struct kref *kref)
struct msm_gem_address_space *aspace = container_of(kref,
struct msm_gem_address_space, kref);
- if (aspace->ops->destroy)
- aspace->ops->destroy(aspace);
+ if (aspace->va_len)
+ drm_mm_takedown(&aspace->mm);
+
+ aspace->mmu->funcs->destroy(aspace->mmu);
kfree(aspace);
}
@@ -37,57 +39,9 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
-/* SDE address space operations */
-static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt,
- void *priv)
-{
- struct dma_buf *buf = priv;
-
- if (buf)
- aspace->mmu->funcs->unmap_dma_buf(aspace->mmu,
- sgt, buf, DMA_BIDIRECTIONAL);
- else
- aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt,
- DMA_BIDIRECTIONAL);
-
- vma->iova = 0;
-
- msm_gem_address_space_put(aspace);
-}
-
-
-static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt,
- void *priv, unsigned int flags)
-{
- struct dma_buf *buf = priv;
- int ret;
-
- if (buf)
- ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
- DMA_BIDIRECTIONAL);
- else
- ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt,
- DMA_BIDIRECTIONAL);
-
- if (!ret)
- vma->iova = sg_dma_address(sgt->sgl);
-
- /* Get a reference to the aspace to keep it around */
- kref_get(&aspace->kref);
-
- return ret;
-}
-
-static const struct msm_gem_aspace_ops smmu_aspace_ops = {
- .map = smmu_aspace_map_vma,
- .unmap = smmu_aspace_unmap_vma,
-};
-
-struct msm_gem_address_space *
-msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
- const char *name)
+static struct msm_gem_address_space *
+msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
+ uint64_t start, uint64_t end)
{
struct msm_gem_address_space *aspace;
@@ -98,144 +52,148 @@ msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
if (!aspace)
return ERR_PTR(-ENOMEM);
+ spin_lock_init(&aspace->lock);
aspace->name = name;
aspace->mmu = mmu;
- aspace->ops = &smmu_aspace_ops;
-
- kref_init(&aspace->kref);
-
- return aspace;
-}
-
-/* GPU address space operations */
-struct msm_iommu_aspace {
- struct msm_gem_address_space base;
- struct drm_mm mm;
-};
-
-#define to_iommu_aspace(aspace) \
- ((struct msm_iommu_aspace *) \
- container_of(aspace, struct msm_iommu_aspace, base))
-
-static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
-{
- if (!vma->iova)
- return;
- if (aspace->mmu)
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt);
+ aspace->va_len = end - start;
- drm_mm_remove_node(&vma->node);
+ if (aspace->va_len)
+ drm_mm_init(&aspace->mm, (start >> PAGE_SHIFT),
+ (end >> PAGE_SHIFT) - 1);
- vma->iova = 0;
+ kref_init(&aspace->kref);
- msm_gem_address_space_put(aspace);
+ return aspace;
}
-static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, void *priv,
- unsigned int flags)
+static int allocate_iova(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ u64 *iova)
{
- struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
- size_t size = 0;
struct scatterlist *sg;
+ size_t size = 0;
int ret, i;
- int iommu_flags = IOMMU_READ;
-
- if (!(flags & MSM_BO_GPU_READONLY))
- iommu_flags |= IOMMU_WRITE;
-
- if (flags & MSM_BO_PRIVILEGED)
- iommu_flags |= IOMMU_PRIV;
- if ((flags & MSM_BO_CACHED) && msm_iommu_coherent(aspace->mmu))
- iommu_flags |= IOMMU_CACHE;
-
- if (WARN_ON(drm_mm_node_allocated(&vma->node)))
+ if (!aspace->va_len)
return 0;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
size += sg->length + sg->offset;
- ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT,
- 0, DRM_MM_SEARCH_DEFAULT);
- if (ret)
- return ret;
+ spin_lock(&aspace->lock);
- vma->iova = vma->node.start << PAGE_SHIFT;
+ if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
+ spin_unlock(&aspace->lock);
+ return 0;
+ }
+ ret = drm_mm_insert_node(&aspace->mm, &vma->node,
+ size >> PAGE_SHIFT, 0, DRM_MM_SEARCH_BOTTOM_UP);
- if (aspace->mmu)
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
- iommu_flags);
+ spin_unlock(&aspace->lock);
- /* Get a reference to the aspace to keep it around */
- kref_get(&aspace->kref);
+ if (!ret && iova)
+ *iova = vma->node.start << PAGE_SHIFT;
return ret;
}
-static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
+int msm_gem_reserve_iova(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma,
+ uint64_t hostptr, uint64_t size)
{
- struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
+ struct drm_mm *mm = &aspace->mm;
+ uint64_t start = hostptr >> PAGE_SHIFT;
+ uint64_t last = (hostptr + size - 1) >> PAGE_SHIFT;
+ int ret;
- drm_mm_takedown(&local->mm);
- aspace->mmu->funcs->destroy(aspace->mmu);
+ spin_lock(&aspace->lock);
+
+ if (drm_mm_interval_first(mm, start, last)) {
+ /* iova already in use, fail */
+ spin_unlock(&aspace->lock);
+ return -EADDRINUSE;
+ }
+
+ vma->node.start = hostptr >> PAGE_SHIFT;
+ vma->node.size = size >> PAGE_SHIFT;
+ vma->node.color = 0;
+
+ ret = drm_mm_reserve_node(mm, &vma->node);
+ if (!ret)
+ vma->iova = hostptr;
+
+ spin_unlock(&aspace->lock);
+
+ return ret;
}
-static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
- .map = iommu_aspace_map_vma,
- .unmap = iommu_aspace_unmap_vma,
- .destroy = iommu_aspace_destroy,
-};
+void msm_gem_release_iova(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ spin_lock(&aspace->lock);
+ if (drm_mm_node_allocated(&vma->node))
+ drm_mm_remove_node(&vma->node);
+ spin_unlock(&aspace->lock);
+}
-static struct msm_gem_address_space *
-msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
- uint64_t start, uint64_t end)
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags)
{
- struct msm_iommu_aspace *local;
+ u64 iova = 0;
+ int ret;
- if (!mmu)
- return ERR_PTR(-EINVAL);
+ if (!aspace)
+ return -EINVAL;
- local = kzalloc(sizeof(*local), GFP_KERNEL);
- if (!local)
- return ERR_PTR(-ENOMEM);
+ ret = allocate_iova(aspace, vma, sgt, &iova);
+ if (ret)
+ return ret;
- drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
- (end >> PAGE_SHIFT) - 1);
+ ret = aspace->mmu->funcs->map(aspace->mmu, iova, sgt,
+ flags, priv);
- local->base.name = name;
- local->base.mmu = mmu;
- local->base.ops = &msm_iommu_aspace_ops;
+ if (ret) {
+ msm_gem_release_iova(aspace, vma);
+ return ret;
+ }
- kref_init(&local->base.kref);
+ vma->iova = sg_dma_address(sgt->sgl);
+ kref_get(&aspace->kref);
- return &local->base;
+ return 0;
}
-int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
- void *priv, unsigned int flags)
+ void *priv, bool invalidated)
{
- if (aspace && aspace->ops->map)
- return aspace->ops->map(aspace, vma, sgt, priv, flags);
+ if (!aspace || !vma->iova)
+ return;
+
+ if (!invalidated)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);
+
+ msm_gem_release_iova(aspace, vma);
+
+ vma->iova = 0;
- return -EINVAL;
+ msm_gem_address_space_put(aspace);
}
-void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+ const char *name)
{
- if (aspace && aspace->ops->unmap)
- aspace->ops->unmap(aspace, vma, sgt, priv);
+ return msm_gem_address_space_new(mmu, name, 0, 0);
}
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
- const char *name)
+ int type, const char *name)
{
- struct msm_mmu *mmu = msm_iommu_new(dev, domain);
+ struct msm_mmu *mmu = msm_iommu_new(dev, type, domain);
if (IS_ERR(mmu))
return (struct msm_gem_address_space *) mmu;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5a505a8bf328..81bab9cc22af 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -183,6 +183,9 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
+ if (gpu->aspace && gpu->aspace->mmu)
+ msm_mmu_enable(gpu->aspace->mmu);
+
return 0;
}
@@ -203,6 +206,9 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
if (WARN_ON(gpu->active_cnt < 0))
return -EINVAL;
+ if (gpu->aspace && gpu->aspace->mmu)
+ msm_mmu_disable(gpu->aspace->mmu);
+
ret = disable_axi(gpu);
if (ret)
return ret;
@@ -541,7 +547,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = gpu->rb[submit->ring];
- int i, ret;
+ int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -566,12 +572,15 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
if (!is_active(msm_obj)) {
+ struct msm_gem_address_space *aspace;
uint64_t iova;
+ aspace = (msm_obj->flags & MSM_BO_SECURE) ?
+ gpu->secure_aspace : submit->aspace;
+
/* ring takes a reference to the bo and iova: */
drm_gem_object_reference(&msm_obj->base);
- msm_gem_get_iova_locked(&msm_obj->base,
- submit->aspace, &iova);
+ msm_gem_get_iova(&msm_obj->base, aspace, &iova);
}
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
@@ -580,11 +589,11 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
- ret = gpu->funcs->submit(gpu, submit);
+ gpu->funcs->submit(gpu, submit);
hangcheck_timer_reset(gpu);
- return ret;
+ return 0;
}
struct msm_context_counter {
@@ -751,11 +760,49 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
return 0;
}
+static struct msm_gem_address_space *
+msm_gpu_create_address_space(struct msm_gpu *gpu, struct device *dev,
+ int type, u64 start, u64 end, const char *name)
+{
+ struct msm_gem_address_space *aspace;
+ struct iommu_domain *iommu;
+
+ /*
+ * If start == end then assume we don't want an address space; this is
+ * mainly for targets to opt out of secure
+ */
+ if (start == end)
+ return NULL;
+
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu) {
+ dev_info(gpu->dev->dev,
+ "%s: no IOMMU, fallback to VRAM carveout!\n",
+ gpu->name);
+ return NULL;
+ }
+
+ iommu->geometry.aperture_start = start;
+ iommu->geometry.aperture_end = end;
+
+ dev_info(gpu->dev->dev, "%s: using IOMMU '%s'\n", gpu->name, name);
+
+ aspace = msm_gem_address_space_create(dev, iommu, type, name);
+ if (IS_ERR(aspace)) {
+ dev_err(gpu->dev->dev, "%s: failed to init IOMMU '%s': %ld\n",
+ gpu->name, name, PTR_ERR(aspace));
+
+ iommu_domain_free(iommu);
+ aspace = NULL;
+ }
+
+ return aspace;
+}
+
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
{
- struct iommu_domain *iommu;
int i, ret, nr_rings;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
@@ -825,30 +872,13 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
- /* Setup IOMMU.. eventually we will (I think) do this once per context
- * and have separate page tables per context. For now, to keep things
- * simple and to get something working, just use a single address space:
- */
- iommu = iommu_domain_alloc(&platform_bus_type);
- if (iommu) {
- /* TODO 32b vs 64b address space.. */
- iommu->geometry.aperture_start = config->va_start;
- iommu->geometry.aperture_end = config->va_end;
-
- dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->aspace = msm_gem_address_space_create(&pdev->dev,
- iommu, "gpu");
- if (IS_ERR(gpu->aspace)) {
- ret = PTR_ERR(gpu->aspace);
- dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->aspace = NULL;
- iommu_domain_free(iommu);
- goto fail;
- }
+ gpu->aspace = msm_gpu_create_address_space(gpu, &pdev->dev,
+ MSM_IOMMU_DOMAIN_USER, config->va_start, config->va_end,
+ "gpu");
- } else {
- dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
- }
+ gpu->secure_aspace = msm_gpu_create_address_space(gpu, &pdev->dev,
+ MSM_IOMMU_DOMAIN_SECURE, config->secure_va_start,
+ config->secure_va_end, "gpu_secure");
nr_rings = config->nr_rings;
@@ -859,10 +889,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
/* Create ringbuffer(s): */
for (i = 0; i < nr_rings; i++) {
- mutex_lock(&drm->struct_mutex);
- gpu->rb[i] = msm_ringbuffer_new(gpu, i);
- mutex_unlock(&drm->struct_mutex);
+ gpu->rb[i] = msm_ringbuffer_new(gpu, i);
if (IS_ERR(gpu->rb[i])) {
ret = PTR_ERR(gpu->rb[i]);
gpu->rb[i] = NULL;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 3fac423929c5..a47eae68dd9b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -29,12 +29,17 @@
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+#define MSM_GPU_DEFAULT_IONAME "kgsl_3d0_reg_memory"
+#define MSM_GPU_DEFAULT_IRQNAME "kgsl_3d0_irq"
+
struct msm_gpu_config {
const char *ioname;
const char *irqname;
int nr_rings;
uint64_t va_start;
uint64_t va_end;
+ uint64_t secure_va_start;
+ uint64_t secure_va_end;
};
/* So far, with hardware that I've seen to date, we can have:
@@ -56,7 +61,7 @@ struct msm_gpu_funcs {
int (*hw_init)(struct msm_gpu *gpu);
int (*pm_suspend)(struct msm_gpu *gpu);
int (*pm_resume)(struct msm_gpu *gpu);
- int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+ void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
irqreturn_t (*irq)(struct msm_gpu *irq);
uint32_t (*last_fence)(struct msm_gpu *gpu,
@@ -111,6 +116,7 @@ struct msm_gpu {
int irq;
struct msm_gem_address_space *aspace;
+ struct msm_gem_address_space *secure_aspace;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 3af24646f4f1..b52c4752c5fe 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <soc/qcom/secure_buffer.h>
#include "msm_drv.h"
#include "msm_iommu.h"
@@ -27,31 +28,17 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
return 0;
}
-/*
- * Get and enable the IOMMU clocks so that we can make
- * sure they stay on the entire duration so that we can
- * safely change the pagetable from the GPU
- */
-static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
+static void iommu_get_clocks(struct msm_iommu *iommu, struct device *dev)
{
- struct msm_iommu *iommu = to_msm_iommu(mmu);
- struct device *dev;
struct property *prop;
const char *name;
int i = 0;
- if (WARN_ON(!pdev))
- return;
-
- dev = &pdev->dev;
-
iommu->nr_clocks =
of_property_count_strings(dev->of_node, "clock-names");
- if (iommu->nr_clocks < 0) {
- iommu->nr_clocks = 0;
+ if (iommu->nr_clocks < 0)
return;
- }
if (WARN_ON(iommu->nr_clocks > ARRAY_SIZE(iommu->clocks)))
iommu->nr_clocks = ARRAY_SIZE(iommu->clocks);
@@ -60,78 +47,58 @@ static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
if (i == iommu->nr_clocks)
break;
- iommu->clocks[i] = clk_get(dev, name);
- if (iommu->clocks[i])
- clk_prepare_enable(iommu->clocks[i]);
-
- i++;
+ iommu->clocks[i++] = clk_get(dev, name);
}
}
-static int _attach_iommu_device(struct msm_mmu *mmu,
- struct iommu_domain *domain, const char **names, int cnt)
+
+static void msm_iommu_clocks_enable(struct msm_mmu *mmu)
{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
int i;
- /* See if there is a iommus member in the current device. If not, look
- * for the names and see if there is one in there.
- */
+ if (!iommu->nr_clocks)
+ iommu_get_clocks(iommu, mmu->dev->parent);
- if (of_find_property(mmu->dev->of_node, "iommus", NULL))
- return iommu_attach_device(domain, mmu->dev);
-
- /* Look through the list of names for a target */
- for (i = 0; i < cnt; i++) {
- struct device_node *node =
- of_find_node_by_name(mmu->dev->of_node, names[i]);
-
- if (!node)
- continue;
-
- if (of_find_property(node, "iommus", NULL)) {
- struct platform_device *pdev;
-
- /* Get the platform device for the node */
- of_platform_populate(node->parent, NULL, NULL,
- mmu->dev);
-
- pdev = of_find_device_by_node(node);
-
- if (!pdev)
- continue;
-
- _get_iommu_clocks(mmu,
- of_find_device_by_node(node->parent));
+ for (i = 0; i < iommu->nr_clocks; i++) {
+ if (iommu->clocks[i])
+ clk_prepare_enable(iommu->clocks[i]);
+ }
+}
- mmu->dev = &pdev->dev;
+static void msm_iommu_clocks_disable(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i;
- return iommu_attach_device(domain, mmu->dev);
- }
+ for (i = 0; i < iommu->nr_clocks; i++) {
+ if (iommu->clocks[i])
+ clk_disable_unprepare(iommu->clocks[i]);
}
+}
- dev_err(mmu->dev, "Couldn't find a IOMMU device\n");
- return -ENODEV;
+static int msm_iommu_attach(struct msm_mmu *mmu, const char **names,
+ int cnt)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ return iommu_attach_device(iommu->domain, mmu->dev);
}
-static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+static int msm_iommu_attach_user(struct msm_mmu *mmu, const char **names,
+ int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int val = 1, ret;
+ int ret, val = 1;
/* Hope springs eternal */
- iommu->allow_dynamic = true;
-
- /* per-instance pagetables need TTBR1 support in the IOMMU driver */
- ret = iommu_domain_set_attr(iommu->domain,
- DOMAIN_ATTR_ENABLE_TTBR1, &val);
- if (ret)
- iommu->allow_dynamic = false;
+ iommu->allow_dynamic = !iommu_domain_set_attr(iommu->domain,
+ DOMAIN_ATTR_ENABLE_TTBR1, &val) ? true : false;
/* Mark the GPU as I/O coherent if it is supported */
iommu->is_coherent = of_dma_is_coherent(mmu->dev->of_node);
- /* Attach the device to the domain */
- ret = _attach_iommu_device(mmu, iommu->domain, names, cnt);
+ ret = iommu_attach_device(iommu->domain, mmu->dev);
if (ret)
return ret;
@@ -176,17 +143,25 @@ static int msm_iommu_attach_dynamic(struct msm_mmu *mmu, const char **names,
return 0;
}
+static int msm_iommu_attach_secure(struct msm_mmu *mmu, const char **names,
+ int cnt)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int ret, vmid = VMID_CP_PIXEL;
+
+ ret = iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_SECURE_VMID,
+ &vmid);
+ if (ret)
+ return ret;
+
+ return iommu_attach_device(iommu->domain, mmu->dev);
+}
+
static void msm_iommu_detach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int i;
iommu_detach_device(iommu->domain, mmu->dev);
-
- for (i = 0; i < iommu->nr_clocks; i++) {
- if (iommu->clocks[i])
- clk_disable(iommu->clocks[i]);
- }
}
static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
@@ -196,69 +171,50 @@ static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, int prot)
+ struct sg_table *sgt, u32 flags, void *priv)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
- uint64_t da = iova;
- unsigned int i, j;
int ret;
+ u32 prot = IOMMU_READ;
if (!domain || !sgt)
return -EINVAL;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- phys_addr_t pa = sg_phys(sg) - sg->offset;
- size_t bytes = sg->length + sg->offset;
+ if (!(flags & MSM_BO_GPU_READONLY))
+ prot |= IOMMU_WRITE;
- VERB("map[%d]: %016llx %pa(%zx)", i, iova, &pa, bytes);
+ if (flags & MSM_BO_PRIVILEGED)
+ prot |= IOMMU_PRIV;
- ret = iommu_map(domain, da, pa, bytes, prot);
- if (ret)
- goto fail;
+ if ((flags & MSM_BO_CACHED) && msm_iommu_coherent(mmu))
+ prot |= IOMMU_CACHE;
- da += bytes;
- }
-
- return 0;
-
-fail:
- da = iova;
+ /* iommu_map_sg returns the number of bytes mapped */
+ ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->nents, prot);
+ if (ret)
+ sgt->sgl->dma_address = iova;
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg->length + sg->offset;
- iommu_unmap(domain, da, bytes);
- da += bytes;
- }
- return ret;
+ return ret ? 0 : -ENOMEM;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt)
+static void msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, void *priv)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- uint64_t da = iova;
- int i;
+ size_t len = 0;
+ int ret, i;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- size_t bytes = sg->length + sg->offset;
- size_t unmapped;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ len += sg->length;
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- return unmapped;
+ ret = iommu_unmap(domain, iova, len);
+ if (ret != len)
+ dev_warn(mmu->dev, "could not unmap iova %llx\n", iova);
- VERB("unmap[%d]: %016llx(%zx)", i, iova, bytes);
-
- BUG_ON(!PAGE_ALIGNED(bytes));
-
- da += bytes;
- }
-
- return 0;
+ sgt->sgl->dma_address = 0;
}
static void msm_iommu_destroy(struct msm_mmu *mmu)
@@ -268,7 +224,30 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
kfree(iommu);
}
-static const struct msm_mmu_funcs funcs = {
+static struct device *find_context_bank(const char *name)
+{
+ struct device_node *node = of_find_node_by_name(NULL, name);
+ struct platform_device *pdev, *parent;
+
+ if (!node)
+ return ERR_PTR(-ENODEV);
+
+ if (!of_find_property(node, "iommus", NULL))
+ return ERR_PTR(-ENODEV);
+
+ /* Get the parent device */
+ parent = of_find_device_by_node(node->parent);
+
+ /* Populate the sub nodes */
+ of_platform_populate(parent->dev.of_node, NULL, NULL, &parent->dev);
+
+ /* Get the context bank device */
+ pdev = of_find_device_by_node(node);
+
+ return pdev ? &pdev->dev : ERR_PTR(-ENODEV);
+}
+
+static const struct msm_mmu_funcs default_funcs = {
.attach = msm_iommu_attach,
.detach = msm_iommu_detach,
.map = msm_iommu_map,
@@ -276,6 +255,24 @@ static const struct msm_mmu_funcs funcs = {
.destroy = msm_iommu_destroy,
};
+static const struct msm_mmu_funcs user_funcs = {
+ .attach = msm_iommu_attach_user,
+ .detach = msm_iommu_detach,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+ .enable = msm_iommu_clocks_enable,
+ .disable = msm_iommu_clocks_disable,
+};
+
+static const struct msm_mmu_funcs secure_funcs = {
+ .attach = msm_iommu_attach_secure,
+ .detach = msm_iommu_detach,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+};
+
static const struct msm_mmu_funcs dynamic_funcs = {
.attach = msm_iommu_attach_dynamic,
.detach = msm_iommu_detach_dynamic,
@@ -284,8 +281,26 @@ static const struct msm_mmu_funcs dynamic_funcs = {
.destroy = msm_iommu_destroy,
};
-struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
- const struct msm_mmu_funcs *funcs)
+static const struct {
+ const char *cbname;
+ const struct msm_mmu_funcs *funcs;
+} msm_iommu_domains[] = {
+ [MSM_IOMMU_DOMAIN_DEFAULT] = {
+ .cbname = NULL,
+ .funcs = &default_funcs,
+ },
+ [MSM_IOMMU_DOMAIN_USER] = {
+ .cbname = "gfx3d_user",
+ .funcs = &user_funcs,
+ },
+ [MSM_IOMMU_DOMAIN_SECURE] = {
+ .cbname = "gfx3d_secure",
+ .funcs = &secure_funcs
+ },
+};
+
+static struct msm_mmu *iommu_create(struct device *dev,
+ struct iommu_domain *domain, const struct msm_mmu_funcs *funcs)
{
struct msm_iommu *iommu;
@@ -299,9 +314,23 @@ struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
return &iommu->base;
}
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+
+struct msm_mmu *msm_iommu_new(struct device *parent,
+ enum msm_iommu_domain_type type, struct iommu_domain *domain)
{
- return _msm_iommu_new(dev, domain, &funcs);
+ struct device *dev = parent;
+
+ if (type >= ARRAY_SIZE(msm_iommu_domains) ||
+ !msm_iommu_domains[type].funcs)
+ return ERR_PTR(-ENODEV);
+
+ if (msm_iommu_domains[type].cbname) {
+ dev = find_context_bank(msm_iommu_domains[type].cbname);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+ }
+
+ return iommu_create(dev, domain, msm_iommu_domains[type].funcs);
}
/*
@@ -326,7 +355,7 @@ struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *base)
if (!domain)
return ERR_PTR(-ENODEV);
- mmu = _msm_iommu_new(base->dev, domain, &dynamic_funcs);
+ mmu = iommu_create(base->dev, domain, &dynamic_funcs);
if (IS_ERR(mmu)) {
if (domain)
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 501f12bef00d..8148d3e9e850 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -30,21 +30,22 @@ enum msm_mmu_domain_type {
MSM_SMMU_DOMAIN_MAX,
};
+enum msm_iommu_domain_type {
+ MSM_IOMMU_DOMAIN_DEFAULT,
+ MSM_IOMMU_DOMAIN_USER,
+ MSM_IOMMU_DOMAIN_SECURE,
+};
+
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- int prot);
- int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt);
- int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
- enum dma_data_direction dir);
- void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
- enum dma_data_direction dir);
- int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
- struct dma_buf *dma_buf, int dir);
- void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
- struct dma_buf *dma_buf, int dir);
+ u32 flags, void *priv);
+ void (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
+ void *priv);
void (*destroy)(struct msm_mmu *mmu);
+ void (*enable)(struct msm_mmu *mmu);
+ void (*disable)(struct msm_mmu *mmu);
};
struct msm_mmu {
@@ -59,9 +60,31 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
mmu->funcs = funcs;
}
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
+/* Create a new SDE mmu device */
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain);
+
+/* Create a new legacy MDP4 or GPU mmu device */
+struct msm_mmu *msm_iommu_new(struct device *parent,
+ enum msm_iommu_domain_type type, struct iommu_domain *domain);
+
+/* Create a new dynamic domain for GPU */
struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *orig);
+static inline void msm_mmu_enable(struct msm_mmu *mmu)
+{
+ if (mmu->funcs->enable)
+ mmu->funcs->enable(mmu);
+}
+
+static inline void msm_mmu_disable(struct msm_mmu *mmu)
+{
+ if (mmu->funcs->disable)
+ mmu->funcs->disable(mmu);
+}
+
+/* SDE smmu driver initialize and cleanup functions */
+int __init msm_smmu_driver_init(void);
+void __exit msm_smmu_driver_cleanup(void);
+
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
index 5a9e472ea59b..5f3d1b6356aa 100644
--- a/drivers/gpu/drm/msm/msm_prop.c
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -183,6 +183,55 @@ static void _msm_property_install_integer(struct msm_property_info *info,
}
}
+/**
+ * _msm_property_install_integer - install signed drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ * @force_dirty: Whether or not to filter 'dirty' status on unchanged values
+ */
+static void _msm_property_install_signed_integer(struct msm_property_info *info,
+ const char *name, int flags, int64_t min, int64_t max,
+ int64_t init, uint32_t property_idx, bool force_dirty)
+{
+ struct drm_property **prop;
+
+ if (!info)
+ return;
+
+ ++info->install_request;
+
+ if (!name || (property_idx >= info->property_count)) {
+ DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+ } else {
+ prop = &info->property_array[property_idx];
+ /*
+ * Properties need to be attached to each drm object that
+ * uses them, but only need to be created once
+ */
+ if (*prop == 0) {
+ *prop = drm_property_create_signed_range(info->dev,
+ flags, name, min, max);
+ if (*prop == 0)
+ DRM_ERROR("create %s property failed\n", name);
+ }
+
+ /* save init value for later */
+ info->property_data[property_idx].default_value = I642U64(init);
+ info->property_data[property_idx].force_dirty = force_dirty;
+
+ /* always attach property, if created */
+ if (*prop) {
+ drm_object_attach_property(info->base, *prop, init);
+ ++info->install_count;
+ }
+ }
+}
+
void msm_property_install_range(struct msm_property_info *info,
const char *name, int flags, uint64_t min, uint64_t max,
uint64_t init, uint32_t property_idx)
@@ -199,6 +248,22 @@ void msm_property_install_volatile_range(struct msm_property_info *info,
min, max, init, property_idx, true);
}
+void msm_property_install_signed_range(struct msm_property_info *info,
+ const char *name, int flags, int64_t min, int64_t max,
+ int64_t init, uint32_t property_idx)
+{
+ _msm_property_install_signed_integer(info, name, flags,
+ min, max, init, property_idx, false);
+}
+
+void msm_property_install_volatile_signed_range(struct msm_property_info *info,
+ const char *name, int flags, int64_t min, int64_t max,
+ int64_t init, uint32_t property_idx)
+{
+ _msm_property_install_signed_integer(info, name, flags,
+ min, max, init, property_idx, true);
+}
+
void msm_property_install_rotation(struct msm_property_info *info,
unsigned int supported_rotations, uint32_t property_idx)
{
diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h
index dbe28bdf5638..1430551700c7 100644
--- a/drivers/gpu/drm/msm/msm_prop.h
+++ b/drivers/gpu/drm/msm/msm_prop.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -208,6 +208,45 @@ void msm_property_install_volatile_range(struct msm_property_info *info,
uint32_t property_idx);
/**
+ * msm_property_install_signed_range - install signed drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_signed_range(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ int64_t min,
+ int64_t max,
+ int64_t init,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_volatile_signed_range - install signed range property
+ * This function is similar to msm_property_install_range, but assumes
+ * that the property is meant for holding user pointers or descriptors
+ * that may reference volatile data without having an updated value.
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_volatile_signed_range(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ int64_t min,
+ int64_t max,
+ int64_t init,
+ uint32_t property_idx);
+
+/**
* msm_property_install_rotation - install standard drm rotation property
* @info: Pointer to property info container struct
* @supported_rotations: Bitmask of supported rotation values (see
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 6dbb516cd4dc..2d112f24a902 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -310,7 +310,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
struct msm_gem_object *obj = submit->bos[idx].obj;
- const char *buf = msm_gem_vaddr_locked(&obj->base);
+ const char *buf = msm_gem_vaddr(&obj->base);
buf += iova - submit->bos[idx].iova;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 14a16c4578d9..382c71bb0ebe 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -34,14 +34,15 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
ring->gpu = gpu;
ring->id = id;
- ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, MSM_BO_WC);
+ ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
+ MSM_BO_WC);
if (IS_ERR(ring->bo)) {
ret = PTR_ERR(ring->bo);
ring->bo = NULL;
goto fail;
}
- ring->start = msm_gem_vaddr_locked(ring->bo);
+ ring->start = msm_gem_vaddr(ring->bo);
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
ring->cur = ring->start;
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index 172aba387982..7d0dda032c59 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -58,7 +58,7 @@ static int msm_smmu_fault_handler(struct iommu_domain *iommu,
struct device *dev, unsigned long iova, int flags, void *arg)
{
- dev_info(dev, "%s: iova=0x%08lx, flags=0x%x, iommu=%p\n", __func__,
+ dev_info(dev, "%s: iova=0x%08lx, flags=0x%x, iommu=%pK\n", __func__,
iova, flags, iommu);
return 0;
}
@@ -116,106 +116,34 @@ static void msm_smmu_detach(struct msm_mmu *mmu)
}
static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, int prot)
+ struct sg_table *sgt, u32 flags, void *priv)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
- struct iommu_domain *domain;
- struct scatterlist *sg;
- uint64_t da = iova;
- unsigned int i, j;
int ret;
- if (!client)
- return -ENODEV;
-
- domain = client->mmu_mapping->domain;
- if (!domain || !sgt)
- return -EINVAL;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_phys(sg) - sg->offset;
- size_t bytes = sg->length + sg->offset;
-
- VERB("map[%d]: %16llx %08x(%zx)", i, iova, pa, bytes);
-
- ret = iommu_map(domain, da, pa, bytes, prot);
- if (ret)
- goto fail;
-
- da += bytes;
- }
-
- return 0;
-
-fail:
- da = iova;
-
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg->length + sg->offset;
-
- iommu_unmap(domain, da, bytes);
- da += bytes;
- }
- return ret;
-}
-
-static int msm_smmu_map_sg(struct msm_mmu *mmu, struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- struct msm_smmu *smmu = to_msm_smmu(mmu);
- struct msm_smmu_client *client = msm_smmu_to_client(smmu);
- int ret;
-
- ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, dir);
- if (ret != sgt->nents)
- return -ENOMEM;
-
- return 0;
-}
-
-static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- struct msm_smmu *smmu = to_msm_smmu(mmu);
- struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ if (priv)
+ ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL, priv);
+ else
+ ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL);
- dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
+ return (ret != sgt->nents) ? -ENOMEM : 0;
}
-static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt)
+static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, void *priv)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
- struct iommu_domain *domain;
- struct scatterlist *sg;
- uint64_t da = iova;
- int i;
-
- if (!client)
- return -ENODEV;
-
- domain = client->mmu_mapping->domain;
- if (!domain || !sgt)
- return -EINVAL;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- size_t bytes = sg->length + sg->offset;
- size_t unmapped;
-
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- return unmapped;
-
- VERB("unmap[%d]: %16llx(%zx)", i, iova, bytes);
- WARN_ON(!PAGE_ALIGNED(bytes));
-
- da += bytes;
- }
-
- return 0;
+ if (priv)
+ msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL, priv);
+ else
+ dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL);
}
static void msm_smmu_destroy(struct msm_mmu *mmu)
@@ -228,42 +156,11 @@ static void msm_smmu_destroy(struct msm_mmu *mmu)
kfree(smmu);
}
-static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
- struct dma_buf *dma_buf, int dir)
-{
- struct msm_smmu *smmu = to_msm_smmu(mmu);
- struct msm_smmu_client *client = msm_smmu_to_client(smmu);
- int ret;
-
- ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, dir,
- dma_buf);
- if (ret != sgt->nents) {
- DRM_ERROR("dma map sg failed\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-
-static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
- struct dma_buf *dma_buf, int dir)
-{
- struct msm_smmu *smmu = to_msm_smmu(mmu);
- struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-
- msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
-}
-
static const struct msm_mmu_funcs funcs = {
.attach = msm_smmu_attach,
.detach = msm_smmu_detach,
.map = msm_smmu_map,
- .map_sg = msm_smmu_map_sg,
- .unmap_sg = msm_smmu_unmap_sg,
.unmap = msm_smmu_unmap,
- .map_dma_buf = msm_smmu_map_dma_buf,
- .unmap_dma_buf = msm_smmu_unmap_dma_buf,
.destroy = msm_smmu_destroy,
};
@@ -295,13 +192,13 @@ static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
};
static const struct of_device_id msm_smmu_dt_match[] = {
- { .compatible = "qcom,smmu_mdp_unsec",
+ { .compatible = "qcom,smmu_sde_unsec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
- { .compatible = "qcom,smmu_mdp_sec",
+ { .compatible = "qcom,smmu_sde_sec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] },
- { .compatible = "qcom,smmu_rot_unsec",
+ { .compatible = "qcom,smmu_sde_nrt_unsec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] },
- { .compatible = "qcom,smmu_rot_sec",
+ { .compatible = "qcom,smmu_sde_nrt_sec",
.data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] },
{}
};
@@ -497,7 +394,7 @@ static struct platform_driver msm_smmu_driver = {
},
};
-static int __init msm_smmu_driver_init(void)
+int __init msm_smmu_driver_init(void)
{
int ret;
@@ -507,13 +404,11 @@ static int __init msm_smmu_driver_init(void)
return ret;
}
-module_init(msm_smmu_driver_init);
-static void __exit msm_smmu_driver_cleanup(void)
+void __exit msm_smmu_driver_cleanup(void)
{
platform_driver_unregister(&msm_smmu_driver);
}
-module_exit(msm_smmu_driver_cleanup);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM SMMU driver");
diff --git a/drivers/gpu/drm/msm/msm_snapshot.h b/drivers/gpu/drm/msm/msm_snapshot.h
index 247e1358c885..fd560b2129f1 100644
--- a/drivers/gpu/drm/msm/msm_snapshot.h
+++ b/drivers/gpu/drm/msm/msm_snapshot.h
@@ -71,8 +71,8 @@ static inline bool _snapshot_header(struct msm_snapshot *snapshot,
*/
#define SNAPSHOT_HEADER(_snapshot, _header, _id, _dwords) \
_snapshot_header((_snapshot), \
- (struct msm_snapshot_section_header *) &(header), \
- sizeof(header), (_dwords) << 2, (_id))
+ (struct msm_snapshot_section_header *) &(_header), \
+ sizeof(_header), (_dwords) << 2, (_id))
struct msm_gpu;
diff --git a/drivers/gpu/drm/msm/msm_snapshot_api.h b/drivers/gpu/drm/msm/msm_snapshot_api.h
index 9f0adb9ee784..7ad6f0498423 100644
--- a/drivers/gpu/drm/msm/msm_snapshot_api.h
+++ b/drivers/gpu/drm/msm/msm_snapshot_api.h
@@ -118,4 +118,17 @@ struct msm_snapshot_shader {
__u32 size;
} __packed;
+#define SNAPSHOT_GPU_OBJECT_SHADER 1
+#define SNAPSHOT_GPU_OBJECT_IB 2
+#define SNAPSHOT_GPU_OBJECT_GENERIC 3
+#define SNAPSHOT_GPU_OBJECT_DRAW 4
+#define SNAPSHOT_GPU_OBJECT_GLOBAL 5
+
+struct msm_snapshot_gpu_object {
+ struct msm_snapshot_section_header header;
+ __u32 type;
+ __u64 gpuaddr;
+ __u64 pt_base;
+ __u64 size;
+} __packed;
#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 12546c059f6f..7538927a4993 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -578,6 +578,10 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
+ msm_property_install_volatile_signed_range(&c_conn->property_info,
+ "PLL_DELTA", 0x0, INT_MIN, INT_MAX, 0,
+ CONNECTOR_PROP_PLL_DELTA);
+
/* enum/bitmask properties */
msm_property_install_enum(&c_conn->property_info, "topology_name",
DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name,
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 41322045ced3..cb5f7d3cf19f 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -57,6 +57,7 @@
static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
+
return to_sde_kms(priv->kms);
}
@@ -183,9 +184,6 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
pstate = to_sde_plane_state(plane->state);
- flush_mask = ctl->ops.get_bitmask_sspp(ctl,
- sde_plane_pipe(plane));
-
/* always stage plane on either left or right lm */
if (plane->state->crtc_x >= crtc_split_width) {
lm_idx = RIGHT_MIXER;
@@ -195,20 +193,36 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
idx = left_crtc_zpos_cnt[pstate->stage]++;
}
+ /*
+ * program each mixer with two hw pipes in dual mixer mode,
+ */
+ if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS) {
+ stage_cfg->stage[LEFT_MIXER][pstate->stage][1] =
+ sde_plane_pipe(plane, 1);
+
+ flush_mask = ctl->ops.get_bitmask_sspp(ctl,
+ sde_plane_pipe(plane, 1));
+ }
+
+ flush_mask |= ctl->ops.get_bitmask_sspp(ctl,
+ sde_plane_pipe(plane, lm_idx ? 1 : 0));
+
/* stage plane on right LM if it crosses the boundary */
lm_right = (lm_idx == LEFT_MIXER) &&
(plane->state->crtc_x + plane->state->crtc_w >
crtc_split_width);
stage_cfg->stage[lm_idx][pstate->stage][idx] =
- sde_plane_pipe(plane);
+ sde_plane_pipe(plane, lm_idx ? 1 : 0);
+
mixer[lm_idx].flush_mask |= flush_mask;
SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
crtc->base.id,
pstate->stage,
plane->base.id,
- sde_plane_pipe(plane) - SSPP_VIG0,
+ sde_plane_pipe(plane,
+ lm_idx ? 1 : 0) - SSPP_VIG0,
plane->state->fb ?
plane->state->fb->base.id : -1);
@@ -230,8 +244,19 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (lm_right) {
idx = right_crtc_zpos_cnt[pstate->stage]++;
- stage_cfg->stage[RIGHT_MIXER][pstate->stage][idx] =
- sde_plane_pipe(plane);
+
+ /*
+ * program each mixer with two hw pipes
+ in dual mixer mode,
+ */
+ if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS) {
+ stage_cfg->stage[RIGHT_MIXER][pstate->stage][1]
+ = sde_plane_pipe(plane, 0);
+ }
+
+ stage_cfg->stage[RIGHT_MIXER][pstate->stage][idx]
+ = sde_plane_pipe(plane, 1);
+
mixer[RIGHT_MIXER].flush_mask |= flush_mask;
/* blend config update */
@@ -1134,12 +1159,12 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
/* get plane state for all drm planes associated with crtc state */
drm_atomic_crtc_state_for_each_plane(plane, state) {
- pstate = drm_atomic_get_plane_state(state->state, plane);
+ pstate = drm_atomic_get_existing_plane_state(
+ state->state, plane);
if (IS_ERR_OR_NULL(pstate)) {
- rc = PTR_ERR(pstate);
- SDE_ERROR("%s: failed to get plane%d state, %d\n",
+ SDE_DEBUG("%s: failed to get plane%d state, %d\n",
sde_crtc->name, plane->base.id, rc);
- goto end;
+ continue;
}
if (cnt >= ARRAY_SIZE(pstates))
continue;
@@ -1256,7 +1281,8 @@ int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
return 0;
}
-void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc,
+ struct drm_file *file)
{
struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
@@ -1275,6 +1301,10 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
struct drm_device *dev;
struct sde_kms_info *info;
struct sde_kms *sde_kms;
+ static const struct drm_prop_enum_list e_secure_level[] = {
+ {SDE_DRM_SEC_NON_SEC, "sec_and_non_sec"},
+ {SDE_DRM_SEC_ONLY, "sec_only"},
+ };
SDE_DEBUG("\n");
@@ -1320,6 +1350,12 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
msm_property_install_blob(&sde_crtc->property_info, "capabilities",
DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
+
+ msm_property_install_enum(&sde_crtc->property_info, "security_level",
+ 0x0, 0, e_secure_level,
+ ARRAY_SIZE(e_secure_level),
+ CRTC_PROP_SECURITY_LEVEL);
+
sde_kms_info_reset(info);
sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
@@ -1665,7 +1701,8 @@ static void _sde_crtc_init_debugfs(struct sde_crtc *sde_crtc,
#endif
/* initialize crtc */
-struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+struct drm_crtc *sde_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane)
{
struct drm_crtc *crtc = NULL;
struct sde_crtc *sde_crtc = NULL;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index e61ff97d2ca4..0b6ee302e231 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -73,12 +73,6 @@ static void drm_mode_to_intf_timing_params(
timing->underflow_clr = 0xff;
timing->hsync_skew = mode->hskew;
- /* DSI controller cannot handle active-low sync signals. */
- if (vid_enc->hw_intf->cap->type == INTF_DSI) {
- timing->hsync_polarity = 0;
- timing->vsync_polarity = 0;
- }
-
/*
* For edp only:
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 519288f0dda2..17b678cfca46 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -14,6 +14,8 @@
#include <linux/slab.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
#include "sde_hw_mdss.h"
#include "sde_hw_catalog.h"
#include "sde_hw_catalog_format.h"
@@ -715,6 +717,7 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
sblk->pcc_blk.len = 0;
set_bit(SDE_SSPP_PCC, &sspp->features);
}
+ snprintf(sspp->name, sizeof(sspp->name), "vig%d", *vig_count-1);
}
static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
@@ -753,6 +756,7 @@ static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
sblk->pcc_blk.len = 0;
set_bit(SDE_SSPP_PCC, &sspp->features);
}
+ snprintf(sspp->name, sizeof(sspp->name), "rgb%d", *rgb_count-1);
}
static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
@@ -766,6 +770,7 @@ static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
sblk->format_list = plane_formats;
(*cursor_count)++;
+ snprintf(sspp->name, sizeof(sspp->name), "cursor%d", *cursor_count-1);
}
static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg,
@@ -779,6 +784,7 @@ static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg,
sblk->format_list = plane_formats;
set_bit(SDE_SSPP_QOS, &sspp->features);
(*dma_count)++;
+ snprintf(sspp->name, sizeof(sspp->name), "dma%d", *dma_count-1);
}
static int sde_sspp_parse_dt(struct device_node *np,
@@ -1200,7 +1206,8 @@ end:
return rc;
}
-static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
+static int sde_wb_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
{
int rc, prop_count[WB_PROP_MAX], i, j;
struct sde_prop_value *prop_value = NULL;
@@ -1686,7 +1693,8 @@ end:
return rc;
}
-static int sde_pp_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
+static int sde_pp_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
{
int rc, prop_count[PP_PROP_MAX], i;
struct sde_prop_value *prop_value = NULL;
@@ -1760,6 +1768,94 @@ end:
return rc;
}
+static inline u32 _sde_parse_sspp_id(struct sde_mdss_cfg *cfg,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < cfg->sspp_count; i++) {
+ if (!strcmp(cfg->sspp[i].name, name))
+ return cfg->sspp[i].id;
+ }
+
+ return SSPP_NONE;
+}
+
+static int _sde_vp_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *cfg)
+{
+ int rc = 0, i = 0;
+ struct device_node *node = NULL;
+ struct device_node *root_node = NULL;
+ struct sde_vp_cfg *vp;
+ struct sde_vp_sub_blks *vp_sub, *vp_sub_next;
+ struct property *prop;
+ const char *cname;
+
+ root_node = of_get_child_by_name(np, "qcom,sde-plane-id-map");
+ if (!root_node) {
+ root_node = of_parse_phandle(np, "qcom,sde-plane-id-map", 0);
+ if (!root_node) {
+ SDE_ERROR("No entry present for qcom,sde-plane-id-map");
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ for_each_child_of_node(root_node, node) {
+ if (i >= MAX_BLOCKS) {
+ SDE_ERROR("num of nodes(%d) is bigger than max(%d)\n",
+ i, MAX_BLOCKS);
+ rc = -EINVAL;
+ goto end;
+ }
+ cfg->vp_count++;
+ vp = &(cfg->vp[i]);
+ vp->id = i;
+ rc = of_property_read_string(node, "qcom,display-type",
+ &(vp->display_type));
+ if (rc) {
+ SDE_ERROR("failed to read display-type, rc = %d\n", rc);
+ goto end;
+ }
+
+ rc = of_property_read_string(node, "qcom,plane-type",
+ &(vp->plane_type));
+ if (rc) {
+ SDE_ERROR("failed to read plane-type, rc = %d\n", rc);
+ goto end;
+ }
+
+ INIT_LIST_HEAD(&vp->sub_blks);
+ of_property_for_each_string(node, "qcom,plane-name",
+ prop, cname) {
+ vp_sub = kzalloc(sizeof(*vp_sub), GFP_KERNEL);
+ if (!vp_sub) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ vp_sub->sspp_id = _sde_parse_sspp_id(cfg, cname);
+ list_add_tail(&vp_sub->pipeid_list, &vp->sub_blks);
+ }
+ i++;
+ }
+
+end:
+ if (rc && cfg->vp_count) {
+ vp = &(cfg->vp[i]);
+ for (i = 0; i < cfg->vp_count; i++) {
+ list_for_each_entry_safe(vp_sub, vp_sub_next,
+ &vp->sub_blks, pipeid_list) {
+ list_del(&vp_sub->pipeid_list);
+ kfree(vp_sub);
+ }
+ }
+ memset(&(cfg->vp[0]), 0, sizeof(cfg->vp));
+ cfg->vp_count = 0;
+ }
+ return rc;
+}
+
static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
{
int rc, len, prop_count[SDE_PROP_MAX];
@@ -1851,7 +1947,8 @@ end:
return rc;
}
-static int sde_perf_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
+static int sde_perf_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *cfg)
{
int rc, len, prop_count[PERF_PROP_MAX];
struct sde_prop_value *prop_value = NULL;
@@ -1891,7 +1988,8 @@ end:
return rc;
}
-static void sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
+static void sde_hardware_caps(struct sde_mdss_cfg *sde_cfg,
+ uint32_t hw_rev)
{
switch (hw_rev) {
case SDE_HW_VER_170:
@@ -1909,6 +2007,7 @@ static void sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
{
int i;
+ struct sde_vp_sub_blks *vp_sub, *vp_sub_next;
if (!sde_cfg)
return;
@@ -1932,13 +2031,23 @@ void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
}
+
+ for (i = 0; i < sde_cfg->vp_count; i++) {
+ list_for_each_entry_safe(vp_sub, vp_sub_next,
+ &sde_cfg->vp[i].sub_blks, pipeid_list) {
+ list_del(&vp_sub->pipeid_list);
+ kfree(vp_sub);
+ }
+ }
+
kfree(sde_cfg);
}
/*************************************************************
* hardware catalog init
*************************************************************/
-struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
+struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev,
+ u32 hw_rev)
{
int rc;
struct sde_mdss_cfg *sde_cfg;
@@ -1996,6 +2105,10 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
if (rc)
goto end;
+ rc = _sde_vp_parse_dt(np, sde_cfg);
+ if (rc)
+ SDE_DEBUG("virtual plane is not supported.\n");
+
sde_hardware_caps(sde_cfg, hw_rev);
return sde_cfg;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index a8f9169aaf35..bca221d2a959 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -57,6 +57,8 @@
#define SDE_COLOR_PROCESS_MAJOR(version) (((version) & 0xFFFF0000) >> 16)
#define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
+#define SSPP_NAME_SIZE 12
+
/**
* MDP TOP BLOCK features
* @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
@@ -455,12 +457,14 @@ struct sde_ctl_cfg {
* @sblk: SSPP sub-blocks information
* @xin_id: bus client identifier
* @clk_ctrl clock control identifier
+ *@name source pipe name
*/
struct sde_sspp_cfg {
SDE_HW_BLK_INFO;
const struct sde_sspp_sub_blks *sblk;
u32 xin_id;
enum sde_clk_ctrl_type clk_ctrl;
+ char name[SSPP_NAME_SIZE];
};
/**
@@ -608,6 +612,31 @@ struct sde_perf_cfg {
};
/**
+* struct sde_vp_sub_blks - Virtual Plane sub-blocks
+* @pipeid_list list for hw pipe id
+* @sspp_id SSPP ID, refer to enum sde_sspp.
+*/
+struct sde_vp_sub_blks {
+ struct list_head pipeid_list;
+ u32 sspp_id;
+};
+
+/**
+* struct sde_vp_cfg - information of Virtual Plane SW blocks
+* @id enum identifying this block
+* @sub_blks list head for virtual plane sub blocks
+* @plane_type plane type, such as primary, overlay or cursor
+* @display_type which display the plane bound to, such as primary,
+* secondary or tertiary
+*/
+struct sde_vp_cfg {
+ u32 id;
+ struct list_head sub_blks;
+ const char *plane_type;
+ const char *display_type;
+};
+
+/**
* struct sde_mdss_cfg - information of MDSS HW
* This is the main catalog data structure representing
* this HW version. Contains number of instances,
@@ -672,6 +701,9 @@ struct sde_mdss_cfg {
/* Add additional block data structures here */
struct sde_perf_cfg perf;
+
+ u32 vp_count;
+ struct sde_vp_cfg vp[MAX_BLOCKS];
};
struct sde_mdss_hw_cfg_handler {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index 3b34719e9971..042b0ee7909a 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -157,13 +157,8 @@ static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
display_hctl = (hsync_end_x << 16) | hsync_start_x;
den_polarity = 0;
- if (ctx->cap->type == INTF_HDMI) {
- hsync_polarity = p->yres >= 720 ? 0 : 1;
- vsync_polarity = p->yres >= 720 ? 0 : 1;
- } else {
- hsync_polarity = 0;
- vsync_polarity = 0;
- }
+ hsync_polarity = p->hsync_polarity;
+ vsync_polarity = p->vsync_polarity;
polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
(vsync_polarity << 1) | /* VSYNC Polarity */
(hsync_polarity << 0); /* HSYNC Polarity */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index 1a5d469e6e7e..d6d2e41ff5aa 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -42,6 +42,10 @@ static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
if (!mdp || !cfg)
return;
+ /* The SPLIT registers are only for DSI interfaces */
+ if ((cfg->intf != INTF_1) && (cfg->intf != INTF_2))
+ return;
+
if (cfg->en) {
if (cfg->mode == INTF_MODE_CMD) {
lower_pipe = FLD_SPLIT_DISPLAY_CMD;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 195eadc2e5fd..544fef90ef6b 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -41,10 +41,6 @@
#define CREATE_TRACE_POINTS
#include "sde_trace.h"
-static const char * const iommu_ports[] = {
- "mdp_0",
-};
-
/**
* Controls size of event log buffer. Specified as a power of 2.
*/
@@ -604,6 +600,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.get_modes = sde_hdmi_connector_get_modes,
.mode_valid = sde_hdmi_mode_valid,
.get_info = sde_hdmi_get_info,
+ .set_property = sde_hdmi_set_property,
};
struct msm_display_info info = {0};
struct drm_encoder *encoder;
@@ -800,6 +797,16 @@ static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
_sde_kms_release_displays(sde_kms);
}
+static inline int sde_get_crtc_id(const char *display_type)
+{
+ if (!strcmp(display_type, "primary"))
+ return 0;
+ else if (!strcmp(display_type, "secondary"))
+ return 1;
+ else
+ return 2;
+}
+
static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
{
struct drm_device *dev;
@@ -832,28 +839,57 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
(void)_sde_kms_setup_displays(dev, priv, sde_kms);
max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
- max_plane_count = min_t(u32, catalog->sspp_count, MAX_PLANES);
/* Create the planes */
primary_planes_idx = 0;
- for (i = 0; i < max_plane_count; i++) {
- bool primary = true;
-
- if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
- || primary_planes_idx >= max_crtc_count)
- primary = false;
-
- plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
- (1UL << max_crtc_count) - 1);
- if (IS_ERR(plane)) {
- SDE_ERROR("sde_plane_init failed\n");
- ret = PTR_ERR(plane);
- goto fail;
+ if (catalog->vp_count) {
+ max_plane_count = min_t(u32, catalog->vp_count, MAX_PLANES);
+
+ for (i = 0; i < max_plane_count; i++) {
+ bool primary = true;
+ int crtc_id =
+ sde_get_crtc_id(catalog->vp[i].display_type);
+
+ if (strcmp(catalog->vp[i].plane_type, "primary"))
+ primary = false;
+
+ plane = sde_plane_init(dev, catalog->vp[i].id,
+ primary, 1UL << crtc_id, true);
+ if (IS_ERR(plane)) {
+ SDE_ERROR("sde_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ if (primary) {
+ primary_planes[crtc_id] = plane;
+ primary_planes_idx++;
+ }
+ }
+ } else {
+ max_plane_count = min_t(u32, catalog->sspp_count, MAX_PLANES);
+
+ for (i = 0; i < max_plane_count; i++) {
+ bool primary = true;
+
+ if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
+ || primary_planes_idx >= max_crtc_count)
+ primary = false;
+
+ plane = sde_plane_init(dev, catalog->sspp[i].id,
+ primary, (1UL << max_crtc_count) - 1,
+ false);
+ if (IS_ERR(plane)) {
+ SDE_ERROR("sde_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ if (primary)
+ primary_planes[primary_planes_idx++] = plane;
}
- priv->planes[priv->num_planes++] = plane;
-
- if (primary)
- primary_planes[primary_planes_idx++] = plane;
}
max_crtc_count = min(max_crtc_count, primary_planes_idx);
@@ -1082,8 +1118,7 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
sde_kms->aspace[i] = aspace;
- ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ ret = mmu->funcs->attach(mmu, NULL, 0);
if (ret) {
SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
msm_gem_address_space_put(aspace);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 114acfd7a173..3ccad0c0c08d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -78,20 +78,22 @@ enum sde_plane_qos {
};
/*
- * struct sde_plane - local sde plane structure
+ * struct sde_phy_plane - physical plane structure
+ * @sde_plane: Points to virtual plane
+ * @phy_plane_list: list of hw pipe(physical plane)
+ * @index: index of physical plane (starts from 0, order from left to right)
+ * @features: capabilities from catalog
* @csc_cfg: Decoded user configuration for csc
* @csc_usr_ptr: Points to csc_cfg if valid user config available
* @csc_ptr: Points to sde_csc_cfg structure to use for current
*/
-struct sde_plane {
- struct drm_plane base;
-
- struct msm_gem_address_space *aspace;
-
- struct mutex lock;
-
+struct sde_phy_plane {
+ struct sde_plane *sde_plane;
+ struct list_head phy_plane_list;
enum sde_sspp pipe;
- uint32_t features; /* capabilities from catalog */
+ uint32_t index;
+
+ uint32_t features;
uint32_t nformats;
uint32_t formats[64];
@@ -101,7 +103,6 @@ struct sde_plane {
struct sde_hw_scaler3_cfg *scaler3_cfg;
struct sde_hw_pipe_qos_cfg pipe_qos_cfg;
uint32_t color_fill;
- bool is_error;
bool is_rt_pipe;
struct sde_hw_pixel_ext pixel_ext;
@@ -112,9 +113,22 @@ struct sde_plane {
struct sde_csc_cfg *csc_ptr;
const struct sde_sspp_sub_blks *pipe_sblk;
+};
+
+/*
+ * struct sde_plane - local sde plane structure
+ */
+struct sde_plane {
+ struct drm_plane base;
+ struct msm_gem_address_space *aspace;
+ struct mutex lock;
+ bool is_error;
char pipe_name[SDE_NAME_SIZE];
+ struct list_head phy_plane_head;
+ u32 num_of_phy_planes;
+
struct msm_property_info property_info;
struct msm_property_data property_data[PLANE_PROP_COUNT];
struct drm_property_blob *blob_info;
@@ -140,20 +154,20 @@ static bool sde_plane_enabled(struct drm_plane_state *state)
* @src_wdith: width of source buffer
* Return: fill level corresponding to the source buffer/format or 0 if error
*/
-static inline int _sde_plane_calc_fill_level(struct drm_plane *plane,
+static inline int _sde_plane_calc_fill_level(struct sde_phy_plane *pp,
const struct sde_format *fmt, u32 src_width)
{
struct sde_plane *psde;
u32 fixed_buff_size;
u32 total_fl;
- if (!plane || !fmt) {
+ if (!pp || !fmt) {
SDE_ERROR("invalid arguments\n");
return 0;
}
- psde = to_sde_plane(plane);
- fixed_buff_size = psde->pipe_sblk->pixel_ram_size;
+ psde = pp->sde_plane;
+ fixed_buff_size = pp->pipe_sblk->pixel_ram_size;
if (fmt->fetch_planes == SDE_PLANE_PSEUDO_PLANAR) {
if (fmt->chroma_sample == SDE_CHROMA_420) {
@@ -171,7 +185,7 @@ static inline int _sde_plane_calc_fill_level(struct drm_plane *plane,
}
SDE_DEBUG("plane%u: pnum:%d fmt:%x w:%u fl:%u\n",
- plane->base.id, psde->pipe - SSPP_VIG0,
+ psde->base.base.id, pp->pipe - SSPP_VIG0,
fmt->base.pixel_format, src_width, total_fl);
return total_fl;
@@ -236,7 +250,7 @@ static inline u32 _sde_plane_get_qos_lut_macrotile(u32 total_fl)
* @plane: Pointer to drm plane
* @fb: Pointer to framebuffer associated with the given plane
*/
-static void _sde_plane_set_qos_lut(struct drm_plane *plane,
+static void _sde_plane_set_qos_lut(struct sde_phy_plane *pp,
struct drm_framebuffer *fb)
{
struct sde_plane *psde;
@@ -244,30 +258,30 @@ static void _sde_plane_set_qos_lut(struct drm_plane *plane,
u32 qos_lut;
u32 total_fl = 0;
- if (!plane || !fb) {
- SDE_ERROR("invalid arguments plane %d fb %d\n",
- plane != 0, fb != 0);
+ if (!pp || !fb) {
+ SDE_ERROR("invalid arguments phy_plane %d fb %d\n",
+ pp != NULL, fb != NULL);
return;
}
- psde = to_sde_plane(plane);
+ psde = pp->sde_plane;
- if (!psde->pipe_hw || !psde->pipe_sblk) {
+ if (!pp->pipe_hw || !pp->pipe_sblk) {
SDE_ERROR("invalid arguments\n");
return;
- } else if (!psde->pipe_hw->ops.setup_creq_lut) {
+ } else if (!pp->pipe_hw->ops.setup_creq_lut) {
return;
}
- if (!psde->is_rt_pipe) {
- qos_lut = psde->pipe_sblk->creq_lut_nrt;
+ if (!pp->is_rt_pipe) {
+ qos_lut = pp->pipe_sblk->creq_lut_nrt;
} else {
fmt = sde_get_sde_format_ext(
fb->pixel_format,
fb->modifier,
drm_format_num_planes(fb->pixel_format));
- total_fl = _sde_plane_calc_fill_level(plane, fmt,
- psde->pipe_cfg.src_rect.w);
+ total_fl = _sde_plane_calc_fill_level(pp, fmt,
+ pp->pipe_cfg.src_rect.w);
if (SDE_FORMAT_IS_LINEAR(fmt))
qos_lut = _sde_plane_get_qos_lut_linear(total_fl);
@@ -275,20 +289,20 @@ static void _sde_plane_set_qos_lut(struct drm_plane *plane,
qos_lut = _sde_plane_get_qos_lut_macrotile(total_fl);
}
- psde->pipe_qos_cfg.creq_lut = qos_lut;
+ pp->pipe_qos_cfg.creq_lut = qos_lut;
- trace_sde_perf_set_qos_luts(psde->pipe - SSPP_VIG0,
+ trace_sde_perf_set_qos_luts(pp->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
- psde->is_rt_pipe, total_fl, qos_lut,
+ pp->is_rt_pipe, total_fl, qos_lut,
(fmt) ? SDE_FORMAT_IS_LINEAR(fmt) : 0);
SDE_DEBUG("plane%u: pnum:%d fmt:%x rt:%d fl:%u lut:0x%x\n",
- plane->base.id,
- psde->pipe - SSPP_VIG0,
+ psde->base.base.id,
+ pp->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
- psde->is_rt_pipe, total_fl, qos_lut);
+ pp->is_rt_pipe, total_fl, qos_lut);
- psde->pipe_hw->ops.setup_creq_lut(psde->pipe_hw, &psde->pipe_qos_cfg);
+ pp->pipe_hw->ops.setup_creq_lut(pp->pipe_hw, &pp->pipe_qos_cfg);
}
/**
@@ -296,30 +310,30 @@ static void _sde_plane_set_qos_lut(struct drm_plane *plane,
* @plane: Pointer to drm plane
* @fb: Pointer to framebuffer associated with the given plane
*/
-static void _sde_plane_set_danger_lut(struct drm_plane *plane,
+static void _sde_plane_set_danger_lut(struct sde_phy_plane *pp,
struct drm_framebuffer *fb)
{
struct sde_plane *psde;
const struct sde_format *fmt = NULL;
u32 danger_lut, safe_lut;
- if (!plane || !fb) {
+ if (!pp || !fb) {
SDE_ERROR("invalid arguments\n");
return;
}
- psde = to_sde_plane(plane);
+ psde = pp->sde_plane;
- if (!psde->pipe_hw || !psde->pipe_sblk) {
+ if (!pp->pipe_hw || !pp->pipe_sblk) {
SDE_ERROR("invalid arguments\n");
return;
- } else if (!psde->pipe_hw->ops.setup_danger_safe_lut) {
+ } else if (!pp->pipe_hw->ops.setup_danger_safe_lut) {
return;
}
- if (!psde->is_rt_pipe) {
- danger_lut = psde->pipe_sblk->danger_lut_nrt;
- safe_lut = psde->pipe_sblk->safe_lut_nrt;
+ if (!pp->is_rt_pipe) {
+ danger_lut = pp->pipe_sblk->danger_lut_nrt;
+ safe_lut = pp->pipe_sblk->safe_lut_nrt;
} else {
fmt = sde_get_sde_format_ext(
fb->pixel_format,
@@ -327,33 +341,33 @@ static void _sde_plane_set_danger_lut(struct drm_plane *plane,
drm_format_num_planes(fb->pixel_format));
if (SDE_FORMAT_IS_LINEAR(fmt)) {
- danger_lut = psde->pipe_sblk->danger_lut_linear;
- safe_lut = psde->pipe_sblk->safe_lut_linear;
+ danger_lut = pp->pipe_sblk->danger_lut_linear;
+ safe_lut = pp->pipe_sblk->safe_lut_linear;
} else {
- danger_lut = psde->pipe_sblk->danger_lut_tile;
- safe_lut = psde->pipe_sblk->safe_lut_tile;
+ danger_lut = pp->pipe_sblk->danger_lut_tile;
+ safe_lut = pp->pipe_sblk->safe_lut_tile;
}
}
- psde->pipe_qos_cfg.danger_lut = danger_lut;
- psde->pipe_qos_cfg.safe_lut = safe_lut;
+ pp->pipe_qos_cfg.danger_lut = danger_lut;
+ pp->pipe_qos_cfg.safe_lut = safe_lut;
- trace_sde_perf_set_danger_luts(psde->pipe - SSPP_VIG0,
+ trace_sde_perf_set_danger_luts(pp->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
(fmt) ? fmt->fetch_mode : 0,
- psde->pipe_qos_cfg.danger_lut,
- psde->pipe_qos_cfg.safe_lut);
+ pp->pipe_qos_cfg.danger_lut,
+ pp->pipe_qos_cfg.safe_lut);
SDE_DEBUG("plane%u: pnum:%d fmt:%x mode:%d luts[0x%x, 0x%x]\n",
- plane->base.id,
- psde->pipe - SSPP_VIG0,
+ psde->base.base.id,
+ pp->pipe - SSPP_VIG0,
fmt ? fmt->base.pixel_format : 0,
fmt ? fmt->fetch_mode : -1,
- psde->pipe_qos_cfg.danger_lut,
- psde->pipe_qos_cfg.safe_lut);
+ pp->pipe_qos_cfg.danger_lut,
+ pp->pipe_qos_cfg.safe_lut);
- psde->pipe_hw->ops.setup_danger_safe_lut(psde->pipe_hw,
- &psde->pipe_qos_cfg);
+ pp->pipe_hw->ops.setup_danger_safe_lut(pp->pipe_hw,
+ &pp->pipe_qos_cfg);
}
/**
@@ -362,85 +376,90 @@ static void _sde_plane_set_danger_lut(struct drm_plane *plane,
* @enable: true to enable QoS control
* @flags: QoS control mode (enum sde_plane_qos)
*/
-static void _sde_plane_set_qos_ctrl(struct drm_plane *plane,
+static void _sde_plane_set_qos_ctrl(struct sde_phy_plane *pp,
bool enable, u32 flags)
{
struct sde_plane *psde;
- if (!plane) {
+ if (!pp) {
SDE_ERROR("invalid arguments\n");
return;
}
- psde = to_sde_plane(plane);
+ psde = pp->sde_plane;
- if (!psde->pipe_hw || !psde->pipe_sblk) {
+ if (!pp->pipe_hw || !pp->pipe_sblk) {
SDE_ERROR("invalid arguments\n");
return;
- } else if (!psde->pipe_hw->ops.setup_qos_ctrl) {
+ } else if (!pp->pipe_hw->ops.setup_qos_ctrl) {
return;
}
if (flags & SDE_PLANE_QOS_VBLANK_CTRL) {
- psde->pipe_qos_cfg.creq_vblank = psde->pipe_sblk->creq_vblank;
- psde->pipe_qos_cfg.danger_vblank =
- psde->pipe_sblk->danger_vblank;
- psde->pipe_qos_cfg.vblank_en = enable;
+ pp->pipe_qos_cfg.creq_vblank = pp->pipe_sblk->creq_vblank;
+ pp->pipe_qos_cfg.danger_vblank =
+ pp->pipe_sblk->danger_vblank;
+ pp->pipe_qos_cfg.vblank_en = enable;
}
if (flags & SDE_PLANE_QOS_VBLANK_AMORTIZE) {
/* this feature overrules previous VBLANK_CTRL */
- psde->pipe_qos_cfg.vblank_en = false;
- psde->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ pp->pipe_qos_cfg.vblank_en = false;
+ pp->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
}
if (flags & SDE_PLANE_QOS_PANIC_CTRL)
- psde->pipe_qos_cfg.danger_safe_en = enable;
+ pp->pipe_qos_cfg.danger_safe_en = enable;
- if (!psde->is_rt_pipe) {
- psde->pipe_qos_cfg.vblank_en = false;
- psde->pipe_qos_cfg.danger_safe_en = false;
+ if (!pp->is_rt_pipe) {
+ pp->pipe_qos_cfg.vblank_en = false;
+ pp->pipe_qos_cfg.danger_safe_en = false;
}
SDE_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
- plane->base.id,
- psde->pipe - SSPP_VIG0,
- psde->pipe_qos_cfg.danger_safe_en,
- psde->pipe_qos_cfg.vblank_en,
- psde->pipe_qos_cfg.creq_vblank,
- psde->pipe_qos_cfg.danger_vblank,
- psde->is_rt_pipe);
-
- psde->pipe_hw->ops.setup_qos_ctrl(psde->pipe_hw,
- &psde->pipe_qos_cfg);
+ psde->base.base.id,
+ pp->pipe - SSPP_VIG0,
+ pp->pipe_qos_cfg.danger_safe_en,
+ pp->pipe_qos_cfg.vblank_en,
+ pp->pipe_qos_cfg.creq_vblank,
+ pp->pipe_qos_cfg.danger_vblank,
+ pp->is_rt_pipe);
+
+ pp->pipe_hw->ops.setup_qos_ctrl(pp->pipe_hw,
+ &pp->pipe_qos_cfg);
}
-int sde_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+static int sde_plane_danger_signal_ctrl(struct sde_phy_plane *pp, bool enable)
{
struct sde_plane *psde;
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
- if (!plane || !plane->dev) {
+ if (!pp) {
+ SDE_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+ psde = pp->sde_plane;
+
+ if (!psde->base.dev) {
SDE_ERROR("invalid arguments\n");
return -EINVAL;
}
- priv = plane->dev->dev_private;
+ priv = psde->base.dev->dev_private;
if (!priv || !priv->kms) {
SDE_ERROR("invalid KMS reference\n");
return -EINVAL;
}
sde_kms = to_sde_kms(priv->kms);
- psde = to_sde_plane(plane);
- if (!psde->is_rt_pipe)
+ if (!pp->is_rt_pipe)
goto end;
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
- _sde_plane_set_qos_ctrl(plane, enable, SDE_PLANE_QOS_PANIC_CTRL);
+ _sde_plane_set_qos_ctrl(pp, enable, SDE_PLANE_QOS_PANIC_CTRL);
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
@@ -453,7 +472,7 @@ end:
* @plane: Pointer to drm plane
* @crtc: Pointer to drm crtc
*/
-static void _sde_plane_set_ot_limit(struct drm_plane *plane,
+static void _sde_plane_set_ot_limit(struct sde_phy_plane *pp,
struct drm_crtc *crtc)
{
struct sde_plane *psde;
@@ -461,34 +480,38 @@ static void _sde_plane_set_ot_limit(struct drm_plane *plane,
struct msm_drm_private *priv;
struct sde_kms *sde_kms;
- if (!plane || !plane->dev || !crtc) {
- SDE_ERROR("invalid arguments plane %d crtc %d\n",
- plane != 0, crtc != 0);
+ if (!pp || !crtc) {
+ SDE_ERROR("invalid arguments phy_plane %d crtc %d\n",
+ pp != NULL, crtc != NULL);
+ return;
+ }
+ psde = pp->sde_plane;
+ if (!psde->base.dev) {
+ SDE_ERROR("invalid DRM device\n");
return;
}
- priv = plane->dev->dev_private;
+ priv = psde->base.dev->dev_private;
if (!priv || !priv->kms) {
SDE_ERROR("invalid KMS reference\n");
return;
}
sde_kms = to_sde_kms(priv->kms);
- psde = to_sde_plane(plane);
- if (!psde->pipe_hw) {
+ if (!pp->pipe_hw) {
SDE_ERROR("invalid pipe reference\n");
return;
}
memset(&ot_params, 0, sizeof(ot_params));
- ot_params.xin_id = psde->pipe_hw->cap->xin_id;
- ot_params.num = psde->pipe_hw->idx - SSPP_NONE;
- ot_params.width = psde->pipe_cfg.src_rect.w;
- ot_params.height = psde->pipe_cfg.src_rect.h;
- ot_params.is_wfd = !psde->is_rt_pipe;
+ ot_params.xin_id = pp->pipe_hw->cap->xin_id;
+ ot_params.num = pp->pipe_hw->idx - SSPP_NONE;
+ ot_params.width = pp->pipe_cfg.src_rect.w;
+ ot_params.height = pp->pipe_cfg.src_rect.h;
+ ot_params.is_wfd = !pp->is_rt_pipe;
ot_params.frame_rate = crtc->mode.vrefresh;
ot_params.vbif_idx = VBIF_RT;
- ot_params.clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
+ ot_params.clk_ctrl = pp->pipe_hw->cap->clk_ctrl;
ot_params.rd = true;
sde_vbif_set_ot_limit(sde_kms, &ot_params);
@@ -559,7 +582,7 @@ int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms)
return ret;
}
-static inline void _sde_plane_set_scanout(struct drm_plane *plane,
+static inline void _sde_plane_set_scanout(struct sde_phy_plane *pp,
struct sde_plane_state *pstate,
struct sde_hw_pipe_cfg *pipe_cfg,
struct drm_framebuffer *fb)
@@ -567,15 +590,15 @@ static inline void _sde_plane_set_scanout(struct drm_plane *plane,
struct sde_plane *psde;
int ret;
- if (!plane || !pstate || !pipe_cfg || !fb) {
+ if (!pp || !pstate || !pipe_cfg || !fb) {
SDE_ERROR(
- "invalid arg(s), plane %d state %d cfg %d fb %d\n",
- plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+ "invalid arg(s), phy_plane %d state %d cfg %d fb %d\n",
+ pp != 0, pstate != 0, pipe_cfg != 0, fb != 0);
return;
}
- psde = to_sde_plane(plane);
- if (!psde->pipe_hw) {
+ psde = pp->sde_plane;
+ if (!pp->pipe_hw) {
SDE_ERROR_PLANE(psde, "invalid pipe_hw\n");
return;
}
@@ -585,14 +608,15 @@ static inline void _sde_plane_set_scanout(struct drm_plane *plane,
SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
else if (ret)
SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
- else if (psde->pipe_hw->ops.setup_sourceaddress)
- psde->pipe_hw->ops.setup_sourceaddress(psde->pipe_hw, pipe_cfg);
+ else if (pp->pipe_hw && pp->pipe_hw->ops.setup_sourceaddress)
+ pp->pipe_hw->ops.setup_sourceaddress(pp->pipe_hw, pipe_cfg);
}
-static int _sde_plane_setup_scaler3_lut(struct sde_plane *psde,
+static int _sde_plane_setup_scaler3_lut(struct sde_phy_plane *pp,
struct sde_plane_state *pstate)
{
- struct sde_hw_scaler3_cfg *cfg = psde->scaler3_cfg;
+ struct sde_plane *psde = pp->sde_plane;
+ struct sde_hw_scaler3_cfg *cfg = pp->scaler3_cfg;
int ret = 0;
cfg->dir_lut = msm_property_get_blob(
@@ -612,7 +636,7 @@ static int _sde_plane_setup_scaler3_lut(struct sde_plane *psde,
return ret;
}
-static void _sde_plane_setup_scaler3(struct sde_plane *psde,
+static void _sde_plane_setup_scaler3(struct sde_phy_plane *pp,
uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
struct sde_hw_scaler3_cfg *scale_cfg,
const struct sde_format *fmt,
@@ -620,10 +644,10 @@ static void _sde_plane_setup_scaler3(struct sde_plane *psde,
{
uint32_t decimated, i;
- if (!psde || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+ if (!pp || !scale_cfg || !fmt || !chroma_subsmpl_h ||
!chroma_subsmpl_v) {
SDE_ERROR("psde %pK scale_cfg %pK fmt %pK smp_h %d smp_v %d\n"
- , psde, scale_cfg, fmt, chroma_subsmpl_h,
+ , pp, scale_cfg, fmt, chroma_subsmpl_h,
chroma_subsmpl_v);
return;
}
@@ -631,11 +655,11 @@ static void _sde_plane_setup_scaler3(struct sde_plane *psde,
memset(scale_cfg, 0, sizeof(*scale_cfg));
decimated = DECIMATED_DIMENSION(src_w,
- psde->pipe_cfg.horz_decimation);
+ pp->pipe_cfg.horz_decimation);
scale_cfg->phase_step_x[SDE_SSPP_COMP_0] =
mult_frac((1 << PHASE_STEP_SHIFT), decimated, dst_w);
decimated = DECIMATED_DIMENSION(src_h,
- psde->pipe_cfg.vert_decimation);
+ pp->pipe_cfg.vert_decimation);
scale_cfg->phase_step_y[SDE_SSPP_COMP_0] =
mult_frac((1 << PHASE_STEP_SHIFT), decimated, dst_h);
@@ -657,9 +681,9 @@ static void _sde_plane_setup_scaler3(struct sde_plane *psde,
for (i = 0; i < SDE_MAX_PLANES; i++) {
scale_cfg->src_width[i] = DECIMATED_DIMENSION(src_w,
- psde->pipe_cfg.horz_decimation);
+ pp->pipe_cfg.horz_decimation);
scale_cfg->src_height[i] = DECIMATED_DIMENSION(src_h,
- psde->pipe_cfg.vert_decimation);
+ pp->pipe_cfg.vert_decimation);
if (SDE_FORMAT_IS_YUV(fmt))
scale_cfg->src_width[i] &= ~0x1;
if (i == SDE_SSPP_COMP_1_2 || i == SDE_SSPP_COMP_2) {
@@ -668,9 +692,9 @@ static void _sde_plane_setup_scaler3(struct sde_plane *psde,
}
scale_cfg->preload_x[i] = SDE_QSEED3_DEFAULT_PRELOAD_H;
scale_cfg->preload_y[i] = SDE_QSEED3_DEFAULT_PRELOAD_V;
- psde->pixel_ext.num_ext_pxls_top[i] =
+ pp->pixel_ext.num_ext_pxls_top[i] =
scale_cfg->src_height[i];
- psde->pixel_ext.num_ext_pxls_left[i] =
+ pp->pixel_ext.num_ext_pxls_left[i] =
scale_cfg->src_width[i];
}
if (!(SDE_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
@@ -835,7 +859,7 @@ static void _sde_plane_setup_pixel_ext(struct sde_plane *psde,
}
}
-static inline void _sde_plane_setup_csc(struct sde_plane *psde)
+static inline void _sde_plane_setup_csc(struct sde_phy_plane *pp)
{
static const struct sde_csc_cfg sde_csc_YUV2RGB_601L = {
{
@@ -866,26 +890,30 @@ static inline void _sde_plane_setup_csc(struct sde_plane *psde)
{ 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
};
- if (!psde) {
+ struct sde_plane *psde;
+
+ if (!pp) {
SDE_ERROR("invalid plane\n");
return;
}
+ psde = pp->sde_plane;
/* revert to kernel default if override not available */
- if (psde->csc_usr_ptr)
- psde->csc_ptr = psde->csc_usr_ptr;
- else if (BIT(SDE_SSPP_CSC_10BIT) & psde->features)
- psde->csc_ptr = (struct sde_csc_cfg *)&sde_csc10_YUV2RGB_601L;
+ if (pp->csc_usr_ptr)
+ pp->csc_ptr = pp->csc_usr_ptr;
+ else if (BIT(SDE_SSPP_CSC_10BIT) & pp->features)
+ pp->csc_ptr = (struct sde_csc_cfg *)&sde_csc10_YUV2RGB_601L;
else
- psde->csc_ptr = (struct sde_csc_cfg *)&sde_csc_YUV2RGB_601L;
+ pp->csc_ptr = (struct sde_csc_cfg *)&sde_csc_YUV2RGB_601L;
SDE_DEBUG_PLANE(psde, "using 0x%X 0x%X 0x%X...\n",
- psde->csc_ptr->csc_mv[0],
- psde->csc_ptr->csc_mv[1],
- psde->csc_ptr->csc_mv[2]);
+ pp->csc_ptr->csc_mv[0],
+ pp->csc_ptr->csc_mv[1],
+ pp->csc_ptr->csc_mv[2]);
}
-static void sde_color_process_plane_setup(struct drm_plane *plane)
+static void sde_color_process_plane_setup(struct drm_plane *plane,
+ struct sde_phy_plane *pp)
{
struct sde_plane *psde;
struct sde_plane_state *pstate;
@@ -893,32 +921,32 @@ static void sde_color_process_plane_setup(struct drm_plane *plane)
struct drm_msm_memcol *memcol = NULL;
size_t memcol_sz = 0;
- psde = to_sde_plane(plane);
+ psde = pp->sde_plane;
pstate = to_sde_plane_state(plane->state);
hue = (uint32_t) sde_plane_get_property(pstate, PLANE_PROP_HUE_ADJUST);
- if (psde->pipe_hw->ops.setup_pa_hue)
- psde->pipe_hw->ops.setup_pa_hue(psde->pipe_hw, &hue);
+ if (pp->pipe_hw->ops.setup_pa_hue)
+ pp->pipe_hw->ops.setup_pa_hue(pp->pipe_hw, &hue);
saturation = (uint32_t) sde_plane_get_property(pstate,
PLANE_PROP_SATURATION_ADJUST);
- if (psde->pipe_hw->ops.setup_pa_sat)
- psde->pipe_hw->ops.setup_pa_sat(psde->pipe_hw, &saturation);
+ if (pp->pipe_hw->ops.setup_pa_sat)
+ pp->pipe_hw->ops.setup_pa_sat(pp->pipe_hw, &saturation);
value = (uint32_t) sde_plane_get_property(pstate,
PLANE_PROP_VALUE_ADJUST);
- if (psde->pipe_hw->ops.setup_pa_val)
- psde->pipe_hw->ops.setup_pa_val(psde->pipe_hw, &value);
+ if (pp->pipe_hw->ops.setup_pa_val)
+ pp->pipe_hw->ops.setup_pa_val(pp->pipe_hw, &value);
contrast = (uint32_t) sde_plane_get_property(pstate,
PLANE_PROP_CONTRAST_ADJUST);
- if (psde->pipe_hw->ops.setup_pa_cont)
- psde->pipe_hw->ops.setup_pa_cont(psde->pipe_hw, &contrast);
+ if (pp->pipe_hw->ops.setup_pa_cont)
+ pp->pipe_hw->ops.setup_pa_cont(pp->pipe_hw, &contrast);
- if (psde->pipe_hw->ops.setup_pa_memcolor) {
+ if (pp->pipe_hw->ops.setup_pa_memcolor) {
/* Skin memory color setup */
memcol = msm_property_get_blob(&psde->property_info,
pstate->property_blobs,
&memcol_sz,
PLANE_PROP_SKIN_COLOR);
- psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
+ pp->pipe_hw->ops.setup_pa_memcolor(pp->pipe_hw,
MEMCOLOR_SKIN, memcol);
/* Sky memory color setup */
@@ -926,7 +954,7 @@ static void sde_color_process_plane_setup(struct drm_plane *plane)
pstate->property_blobs,
&memcol_sz,
PLANE_PROP_SKY_COLOR);
- psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
+ pp->pipe_hw->ops.setup_pa_memcolor(pp->pipe_hw,
MEMCOLOR_SKY, memcol);
/* Foliage memory color setup */
@@ -934,87 +962,89 @@ static void sde_color_process_plane_setup(struct drm_plane *plane)
pstate->property_blobs,
&memcol_sz,
PLANE_PROP_FOLIAGE_COLOR);
- psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
+ pp->pipe_hw->ops.setup_pa_memcolor(pp->pipe_hw,
MEMCOLOR_FOLIAGE, memcol);
}
}
-static void _sde_plane_setup_scaler(struct sde_plane *psde,
+static void _sde_plane_setup_scaler(struct sde_phy_plane *pp,
const struct sde_format *fmt,
struct sde_plane_state *pstate)
{
struct sde_hw_pixel_ext *pe;
uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+ struct sde_plane *psde;
- if (!psde || !fmt) {
- SDE_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
- psde != 0, fmt != 0, pstate != 0);
+ if (!pp || !fmt || !pstate || !pp->sde_plane) {
+ SDE_ERROR("invalid arg(s), phy_plane %d fmt %d\n",
+ pp != NULL, fmt != NULL);
return;
}
+ psde = pp->sde_plane;
- pe = &(psde->pixel_ext);
+ pe = &(pp->pixel_ext);
- psde->pipe_cfg.horz_decimation =
+ pp->pipe_cfg.horz_decimation =
sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
- psde->pipe_cfg.vert_decimation =
+ pp->pipe_cfg.vert_decimation =
sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
/* don't chroma subsample if decimating */
- chroma_subsmpl_h = psde->pipe_cfg.horz_decimation ? 1 :
+ chroma_subsmpl_h = pp->pipe_cfg.horz_decimation ? 1 :
drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
- chroma_subsmpl_v = psde->pipe_cfg.vert_decimation ? 1 :
+ chroma_subsmpl_v = pp->pipe_cfg.vert_decimation ? 1 :
drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
/* update scaler */
- if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+ if (pp->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
int error;
- error = _sde_plane_setup_scaler3_lut(psde, pstate);
- if (error || !psde->pixel_ext_usr) {
+ error = _sde_plane_setup_scaler3_lut(pp, pstate);
+ if (error || !pp->pixel_ext_usr) {
memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
/* calculate default config for QSEED3 */
- _sde_plane_setup_scaler3(psde,
- psde->pipe_cfg.src_rect.w,
- psde->pipe_cfg.src_rect.h,
- psde->pipe_cfg.dst_rect.w,
- psde->pipe_cfg.dst_rect.h,
- psde->scaler3_cfg, fmt,
+ _sde_plane_setup_scaler3(pp,
+ pp->pipe_cfg.src_rect.w,
+ pp->pipe_cfg.src_rect.h,
+ pp->pipe_cfg.dst_rect.w,
+ pp->pipe_cfg.dst_rect.h,
+ pp->scaler3_cfg, fmt,
chroma_subsmpl_h, chroma_subsmpl_v);
}
- } else if (!psde->pixel_ext_usr) {
+ } else if (!pp->pixel_ext_usr) {
uint32_t deci_dim, i;
/* calculate default configuration for QSEED2 */
memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
SDE_DEBUG_PLANE(psde, "default config\n");
- deci_dim = DECIMATED_DIMENSION(psde->pipe_cfg.src_rect.w,
- psde->pipe_cfg.horz_decimation);
+ deci_dim = DECIMATED_DIMENSION(pp->pipe_cfg.src_rect.w,
+ pp->pipe_cfg.horz_decimation);
_sde_plane_setup_scaler2(psde,
deci_dim,
- psde->pipe_cfg.dst_rect.w,
+ pp->pipe_cfg.dst_rect.w,
pe->phase_step_x,
pe->horz_filter, fmt, chroma_subsmpl_h);
if (SDE_FORMAT_IS_YUV(fmt))
deci_dim &= ~0x1;
- _sde_plane_setup_pixel_ext(psde, psde->pipe_cfg.src_rect.w,
- psde->pipe_cfg.dst_rect.w, deci_dim,
+ _sde_plane_setup_pixel_ext(psde, pp->pipe_cfg.src_rect.w,
+ pp->pipe_cfg.dst_rect.w, deci_dim,
pe->phase_step_x,
pe->roi_w,
pe->num_ext_pxls_left,
pe->num_ext_pxls_right, pe->horz_filter, fmt,
chroma_subsmpl_h, 0);
- deci_dim = DECIMATED_DIMENSION(psde->pipe_cfg.src_rect.h,
- psde->pipe_cfg.vert_decimation);
+ deci_dim = DECIMATED_DIMENSION(pp->pipe_cfg.src_rect.h,
+ pp->pipe_cfg.vert_decimation);
_sde_plane_setup_scaler2(psde,
deci_dim,
- psde->pipe_cfg.dst_rect.h,
+ pp->pipe_cfg.dst_rect.h,
pe->phase_step_y,
pe->vert_filter, fmt, chroma_subsmpl_v);
- _sde_plane_setup_pixel_ext(psde, psde->pipe_cfg.src_rect.h,
- psde->pipe_cfg.dst_rect.h, deci_dim,
+ _sde_plane_setup_pixel_ext(psde, pp->pipe_cfg.src_rect.h,
+ pp->pipe_cfg.dst_rect.h, deci_dim,
pe->phase_step_y,
pe->roi_h,
pe->num_ext_pxls_top,
@@ -1052,22 +1082,22 @@ static void _sde_plane_setup_scaler(struct sde_plane *psde,
* @alpha: 8-bit fill alpha value, 255 selects 100% alpha
* Returns: 0 on success
*/
-static int _sde_plane_color_fill(struct sde_plane *psde,
+static int _sde_plane_color_fill(struct sde_phy_plane *pp,
uint32_t color, uint32_t alpha)
{
const struct sde_format *fmt;
- if (!psde) {
+ if (!pp) {
SDE_ERROR("invalid plane\n");
return -EINVAL;
}
- if (!psde->pipe_hw) {
- SDE_ERROR_PLANE(psde, "invalid plane h/w pointer\n");
+ if (!pp->pipe_hw) {
+ SDE_ERROR_PLANE(pp->sde_plane, "invalid plane h/w pointer\n");
return -EINVAL;
}
- SDE_DEBUG_PLANE(psde, "\n");
+ SDE_DEBUG_PLANE(pp->sde_plane, "\n");
/*
* select fill format to match user property expectation,
@@ -1076,26 +1106,26 @@ static int _sde_plane_color_fill(struct sde_plane *psde,
fmt = sde_get_sde_format(DRM_FORMAT_ABGR8888);
/* update sspp */
- if (fmt && psde->pipe_hw->ops.setup_solidfill) {
- psde->pipe_hw->ops.setup_solidfill(psde->pipe_hw,
+ if (fmt && pp->pipe_hw->ops.setup_solidfill) {
+ pp->pipe_hw->ops.setup_solidfill(pp->pipe_hw,
(color & 0xFFFFFF) | ((alpha & 0xFF) << 24));
/* override scaler/decimation if solid fill */
- psde->pipe_cfg.src_rect.x = 0;
- psde->pipe_cfg.src_rect.y = 0;
- psde->pipe_cfg.src_rect.w = psde->pipe_cfg.dst_rect.w;
- psde->pipe_cfg.src_rect.h = psde->pipe_cfg.dst_rect.h;
+ pp->pipe_cfg.src_rect.x = 0;
+ pp->pipe_cfg.src_rect.y = 0;
+ pp->pipe_cfg.src_rect.w = pp->pipe_cfg.dst_rect.w;
+ pp->pipe_cfg.src_rect.h = pp->pipe_cfg.dst_rect.h;
- _sde_plane_setup_scaler(psde, fmt, 0);
+ _sde_plane_setup_scaler(pp, fmt, NULL);
- if (psde->pipe_hw->ops.setup_format)
- psde->pipe_hw->ops.setup_format(psde->pipe_hw,
+ if (pp->pipe_hw->ops.setup_format)
+ pp->pipe_hw->ops.setup_format(pp->pipe_hw,
fmt, SDE_SSPP_SOLID_FILL);
- if (psde->pipe_hw->ops.setup_rects)
- psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
- &psde->pipe_cfg, &psde->pixel_ext,
- psde->scaler3_cfg);
+ if (pp->pipe_hw->ops.setup_rects)
+ pp->pipe_hw->ops.setup_rects(pp->pipe_hw,
+ &pp->pipe_cfg, &pp->pixel_ext,
+ pp->scaler3_cfg);
}
return 0;
@@ -1113,6 +1143,8 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
struct sde_rect src, dst;
bool q16_data = true;
int idx;
+ struct sde_phy_plane *pp;
+ uint32_t num_of_phy_planes = 0, maxlinewidth = 0xFFFF;
if (!plane) {
SDE_ERROR("invalid plane\n");
@@ -1170,19 +1202,24 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
}
}
- if (pstate->dirty & SDE_PLANE_DIRTY_RECTS)
- memset(&(psde->pipe_cfg), 0, sizeof(struct sde_hw_pipe_cfg));
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (pstate->dirty & SDE_PLANE_DIRTY_RECTS)
+ memset(&(pp->pipe_cfg), 0,
+ sizeof(struct sde_hw_pipe_cfg));
- _sde_plane_set_scanout(plane, pstate, &psde->pipe_cfg, fb);
+ _sde_plane_set_scanout(pp, pstate, &pp->pipe_cfg, fb);
+
+ pstate->pending = true;
+
+ pp->is_rt_pipe = sde_crtc_is_rt(crtc);
+ _sde_plane_set_qos_ctrl(pp, false, SDE_PLANE_QOS_PANIC_CTRL);
+ }
/* early out if nothing dirty */
if (!pstate->dirty)
return 0;
- pstate->pending = true;
-
- psde->is_rt_pipe = sde_crtc_is_rt(crtc);
- _sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
+ memset(&src, 0, sizeof(struct sde_rect));
/* update roi config */
if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) {
POPULATE_RECT(&src, state->src_x, state->src_y,
@@ -1201,72 +1238,99 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
BIT(SDE_DRM_DEINTERLACE)) {
SDE_DEBUG_PLANE(psde, "deinterlace\n");
for (idx = 0; idx < SDE_MAX_PLANES; ++idx)
- psde->pipe_cfg.layout.plane_pitch[idx] <<= 1;
+ pp->pipe_cfg.layout.plane_pitch[idx] <<= 1;
src.h /= 2;
src.y = DIV_ROUND_UP(src.y, 2);
src.y &= ~0x1;
}
+ }
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (maxlinewidth > pp->pipe_sblk->maxlinewidth)
+ maxlinewidth = pp->pipe_sblk->maxlinewidth;
+ num_of_phy_planes++;
+ }
+
+ /*
+ * Only need to use one physical plane if plane width is still within
+ * the limitation.
+ */
+ if (maxlinewidth >= (src.x + src.w))
+ num_of_phy_planes = 1;
- psde->pipe_cfg.src_rect = src;
- psde->pipe_cfg.dst_rect = dst;
+ if (num_of_phy_planes > 1) {
+ /* Adjust width for multi-pipe */
+ src.w /= num_of_phy_planes;
+ dst.w /= num_of_phy_planes;
+ }
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ /* Adjust offset for multi-pipe */
+ src.x += src.w * pp->index;
+ dst.x += dst.w * pp->index;
+
+ pp->pipe_cfg.src_rect = src;
+ pp->pipe_cfg.dst_rect = dst;
/* check for color fill */
- psde->color_fill = (uint32_t)sde_plane_get_property(pstate,
+ pp->color_fill = (uint32_t)sde_plane_get_property(pstate,
PLANE_PROP_COLOR_FILL);
- if (psde->color_fill & SDE_PLANE_COLOR_FILL_FLAG) {
+ if (pp->color_fill & SDE_PLANE_COLOR_FILL_FLAG) {
/* skip remaining processing on color fill */
pstate->dirty = 0x0;
- } else if (psde->pipe_hw->ops.setup_rects) {
- _sde_plane_setup_scaler(psde, fmt, pstate);
+ } else if (pp->pipe_hw->ops.setup_rects) {
+ _sde_plane_setup_scaler(pp, fmt, pstate);
- psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
- &psde->pipe_cfg, &psde->pixel_ext,
- psde->scaler3_cfg);
+ pp->pipe_hw->ops.setup_rects(pp->pipe_hw,
+ &pp->pipe_cfg, &pp->pixel_ext,
+ pp->scaler3_cfg);
}
- }
- if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) &&
- psde->pipe_hw->ops.setup_format) {
- src_flags = 0x0;
- SDE_DEBUG_PLANE(psde, "rotation 0x%llX\n",
+ if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) &&
+ pp->pipe_hw->ops.setup_format) {
+ src_flags = 0x0;
+ SDE_DEBUG_PLANE(psde, "rotation 0x%llX\n",
sde_plane_get_property(pstate, PLANE_PROP_ROTATION));
- if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) &
- BIT(DRM_REFLECT_X))
- src_flags |= SDE_SSPP_FLIP_LR;
- if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) &
- BIT(DRM_REFLECT_Y))
- src_flags |= SDE_SSPP_FLIP_UD;
-
- /* update format */
- psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags);
-
- /* update csc */
- if (SDE_FORMAT_IS_YUV(fmt))
- _sde_plane_setup_csc(psde);
- else
- psde->csc_ptr = 0;
- }
+ if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION)
+ & BIT(DRM_REFLECT_X))
+ src_flags |= SDE_SSPP_FLIP_LR;
+ if (sde_plane_get_property(pstate,
+ PLANE_PROP_ROTATION) & BIT(DRM_REFLECT_Y))
+ src_flags |= SDE_SSPP_FLIP_UD;
+
+ /* update format */
+ pp->pipe_hw->ops.setup_format(pp->pipe_hw,
+ fmt, src_flags);
+
+ /* update csc */
+ if (SDE_FORMAT_IS_YUV(fmt))
+ _sde_plane_setup_csc(pp);
+ else
+ pp->csc_ptr = NULL;
+ }
- sde_color_process_plane_setup(plane);
+ sde_color_process_plane_setup(plane, pp);
- /* update sharpening */
- if ((pstate->dirty & SDE_PLANE_DIRTY_SHARPEN) &&
- psde->pipe_hw->ops.setup_sharpening) {
- psde->sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
- psde->sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
- psde->sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
- psde->sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
+ /* update sharpening */
+ if ((pstate->dirty & SDE_PLANE_DIRTY_SHARPEN) &&
+ pp->pipe_hw->ops.setup_sharpening) {
+ pp->sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
+ pp->sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
+ pp->sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
+ pp->sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
- psde->pipe_hw->ops.setup_sharpening(psde->pipe_hw,
- &psde->sharp_cfg);
- }
+ pp->pipe_hw->ops.setup_sharpening(pp->pipe_hw,
+ &pp->sharp_cfg);
+ }
- _sde_plane_set_qos_lut(plane, fb);
- _sde_plane_set_danger_lut(plane, fb);
+ _sde_plane_set_qos_lut(pp, fb);
+ _sde_plane_set_danger_lut(pp, fb);
- if (plane->type != DRM_PLANE_TYPE_CURSOR) {
- _sde_plane_set_qos_ctrl(plane, true, SDE_PLANE_QOS_PANIC_CTRL);
- _sde_plane_set_ot_limit(plane, crtc);
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ _sde_plane_set_qos_ctrl(pp, true,
+ SDE_PLANE_QOS_PANIC_CTRL);
+ _sde_plane_set_ot_limit(pp, crtc);
+ }
}
/* clear dirty */
@@ -1393,10 +1457,12 @@ static int sde_plane_atomic_check(struct drm_plane *plane,
uint32_t deci_w, deci_h, src_deci_w, src_deci_h;
uint32_t max_upscale, max_downscale, min_src_size, max_linewidth;
bool q16_data = true;
+ struct sde_phy_plane *pp;
+ uint32_t num_of_phy_planes = 0;
if (!plane || !state) {
- SDE_ERROR("invalid arg(s), plane %d state %d\n",
- plane != 0, state != 0);
+ SDE_ERROR("invalid arg(s), plane %d state %d.\n",
+ plane != NULL, state != NULL);
ret = -EINVAL;
goto exit;
}
@@ -1404,11 +1470,8 @@ static int sde_plane_atomic_check(struct drm_plane *plane,
psde = to_sde_plane(plane);
pstate = to_sde_plane_state(state);
- if (!psde->pipe_sblk) {
- SDE_ERROR_PLANE(psde, "invalid catalog\n");
- ret = -EINVAL;
- goto exit;
- }
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list)
+ num_of_phy_planes++;
deci_w = sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
deci_h = sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
@@ -1422,10 +1485,6 @@ static int sde_plane_atomic_check(struct drm_plane *plane,
src_deci_w = DECIMATED_DIMENSION(src.w, deci_w);
src_deci_h = DECIMATED_DIMENSION(src.h, deci_h);
- max_upscale = psde->pipe_sblk->maxupscale;
- max_downscale = psde->pipe_sblk->maxdwnscale;
- max_linewidth = psde->pipe_sblk->maxlinewidth;
-
SDE_DEBUG_PLANE(psde, "check %d -> %d\n",
sde_plane_enabled(plane->state), sde_plane_enabled(state));
@@ -1436,73 +1495,87 @@ static int sde_plane_atomic_check(struct drm_plane *plane,
min_src_size = SDE_FORMAT_IS_YUV(fmt) ? 2 : 1;
- if (SDE_FORMAT_IS_YUV(fmt) &&
- (!(psde->features & SDE_SSPP_SCALER) ||
- !(psde->features & (BIT(SDE_SSPP_CSC)
- | BIT(SDE_SSPP_CSC_10BIT))))) {
- SDE_ERROR_PLANE(psde,
- "plane doesn't have scaler/csc for yuv\n");
- ret = -EINVAL;
-
- /* check src bounds */
- } else if (state->fb->width > MAX_IMG_WIDTH ||
- state->fb->height > MAX_IMG_HEIGHT ||
- src.w < min_src_size || src.h < min_src_size ||
- CHECK_LAYER_BOUNDS(src.x, src.w, state->fb->width) ||
- CHECK_LAYER_BOUNDS(src.y, src.h, state->fb->height)) {
- SDE_ERROR_PLANE(psde, "invalid source %u, %u, %ux%u\n",
- src.x, src.y, src.w, src.h);
- ret = -E2BIG;
-
- /* valid yuv image */
- } else if (SDE_FORMAT_IS_YUV(fmt) && ((src.x & 0x1) || (src.y & 0x1) ||
- (src.w & 0x1) || (src.h & 0x1))) {
- SDE_ERROR_PLANE(psde, "invalid yuv source %u, %u, %ux%u\n",
- src.x, src.y, src.w, src.h);
- ret = -EINVAL;
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (!pp->pipe_sblk) {
+ SDE_ERROR("invalid plane catalog\n");
+ ret = -EINVAL;
+ goto exit;
+ }
- /* min dst support */
- } else if (dst.w < 0x1 || dst.h < 0x1) {
- SDE_ERROR_PLANE(psde, "invalid dest rect %u, %u, %ux%u\n",
- dst.x, dst.y, dst.w, dst.h);
- ret = -EINVAL;
+ max_upscale = pp->pipe_sblk->maxupscale;
+ max_downscale = pp->pipe_sblk->maxdwnscale;
+ max_linewidth = pp->pipe_sblk->maxlinewidth;
- /* decimation validation */
- } else if (deci_w || deci_h) {
- if ((deci_w > psde->pipe_sblk->maxhdeciexp) ||
- (deci_h > psde->pipe_sblk->maxvdeciexp)) {
+ if (SDE_FORMAT_IS_YUV(fmt) &&
+ (!(pp->features & SDE_SSPP_SCALER) ||
+ !(pp->features & (BIT(SDE_SSPP_CSC)
+ | BIT(SDE_SSPP_CSC_10BIT))))) {
SDE_ERROR_PLANE(psde,
- "too much decimation requested\n");
+ "plane doesn't have scaler/csc for yuv\n");
ret = -EINVAL;
- } else if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
- SDE_ERROR_PLANE(psde,
- "decimation requires linear fetch\n");
+
+ /* check src bounds */
+ } else if (state->fb->width > MAX_IMG_WIDTH ||
+ state->fb->height > MAX_IMG_HEIGHT ||
+ src.w < min_src_size || src.h < min_src_size ||
+ CHECK_LAYER_BOUNDS(src.x, src.w, state->fb->width) ||
+ CHECK_LAYER_BOUNDS(src.y, src.h, state->fb->height)) {
+ SDE_ERROR_PLANE(psde, "invalid source %u, %u, %ux%u\n",
+ src.x, src.y, src.w, src.h);
+ ret = -E2BIG;
+
+ /* valid yuv image */
+ } else if (SDE_FORMAT_IS_YUV(fmt) && ((src.x & 0x1)
+ || (src.y & 0x1) || (src.w & 0x1)
+ || (src.h & 0x1))) {
+ SDE_ERROR_PLANE(psde, "invalid yuv source %u, %u,\"\
+ %ux%u\n", src.x, src.y, src.w, src.h);
ret = -EINVAL;
- }
- } else if (!(psde->features & SDE_SSPP_SCALER) &&
- ((src.w != dst.w) || (src.h != dst.h))) {
- SDE_ERROR_PLANE(psde,
- "pipe doesn't support scaling %ux%u->%ux%u\n",
- src.w, src.h, dst.w, dst.h);
- ret = -EINVAL;
+ /* min dst support */
+ } else if (dst.w < 0x1 || dst.h < 0x1) {
+ SDE_ERROR_PLANE(psde, "invalid dest rect %u, %u,\"\
+ %ux%u\n", dst.x, dst.y, dst.w, dst.h);
+ ret = -EINVAL;
- /* check decimated source width */
- } else if (src_deci_w > max_linewidth) {
- SDE_ERROR_PLANE(psde,
- "invalid src w:%u, deci w:%u, line w:%u\n",
- src.w, src_deci_w, max_linewidth);
- ret = -E2BIG;
+ /* decimation validation */
+ } else if (deci_w || deci_h) {
+ if ((deci_w > pp->pipe_sblk->maxhdeciexp) ||
+ (deci_h > pp->pipe_sblk->maxvdeciexp)) {
+ SDE_ERROR_PLANE(psde,
+ "too much decimation requested\n");
+ ret = -EINVAL;
+ } else if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
+ SDE_ERROR_PLANE(psde,
+ "decimation requires linear fetch\n");
+ ret = -EINVAL;
+ }
- /* check max scaler capability */
- } else if (((src_deci_w * max_upscale) < dst.w) ||
- ((src_deci_h * max_upscale) < dst.h) ||
- ((dst.w * max_downscale) < src_deci_w) ||
- ((dst.h * max_downscale) < src_deci_h)) {
- SDE_ERROR_PLANE(psde,
- "too much scaling requested %ux%u->%ux%u\n",
- src_deci_w, src_deci_h, dst.w, dst.h);
- ret = -E2BIG;
+ } else if (!(pp->features & SDE_SSPP_SCALER) &&
+ ((src.w != dst.w) || (src.h != dst.h))) {
+ SDE_ERROR_PLANE(psde,
+ "pipe doesn't support scaling %ux%u->%ux%u\n",
+ src.w, src.h, dst.w, dst.h);
+ ret = -EINVAL;
+
+ /* check decimated source width */
+ } else if (src_deci_w > max_linewidth * num_of_phy_planes) {
+ SDE_ERROR_PLANE(psde,
+ "invalid src w:%u, deci w:%u, line w:%u, num_phy_planes:%u\n",
+ src.w, src_deci_w, max_linewidth,
+ num_of_phy_planes);
+ ret = -E2BIG;
+
+ /* check max scaler capability */
+ } else if (((src_deci_w * max_upscale) < dst.w) ||
+ ((src_deci_h * max_upscale) < dst.h) ||
+ ((dst.w * max_downscale) < src_deci_w) ||
+ ((dst.h * max_downscale) < src_deci_h)) {
+ SDE_ERROR_PLANE(psde,
+ "too much scaling requested %ux%u->%ux%u\n",
+ src_deci_w, src_deci_h, dst.w, dst.h);
+ ret = -E2BIG;
+ }
}
modeset_update:
@@ -1519,6 +1592,7 @@ exit:
void sde_plane_flush(struct drm_plane *plane)
{
struct sde_plane *psde;
+ struct sde_phy_plane *pp;
if (!plane) {
SDE_ERROR("invalid plane\n");
@@ -1531,14 +1605,17 @@ void sde_plane_flush(struct drm_plane *plane)
* These updates have to be done immediately before the plane flush
* timing, and may not be moved to the atomic_update/mode_set functions.
*/
- if (psde->is_error)
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (psde->is_error)
/* force white frame with 0% alpha pipe output on error */
- _sde_plane_color_fill(psde, 0xFFFFFF, 0x0);
- else if (psde->color_fill & SDE_PLANE_COLOR_FILL_FLAG)
- /* force 100% alpha */
- _sde_plane_color_fill(psde, psde->color_fill, 0xFF);
- else if (psde->pipe_hw && psde->csc_ptr && psde->pipe_hw->ops.setup_csc)
- psde->pipe_hw->ops.setup_csc(psde->pipe_hw, psde->csc_ptr);
+ _sde_plane_color_fill(pp, 0xFFFFFF, 0x0);
+ else if (pp->color_fill & SDE_PLANE_COLOR_FILL_FLAG)
+ /* force 100% alpha */
+ _sde_plane_color_fill(pp, pp->color_fill, 0xFF);
+ else if (pp->pipe_hw && pp->csc_ptr &&
+ pp->pipe_hw->ops.setup_csc)
+ pp->pipe_hw->ops.setup_csc(pp->pipe_hw, pp->csc_ptr);
+ }
/* flag h/w flush complete */
if (plane->state)
@@ -1592,25 +1669,60 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
static const struct drm_prop_enum_list e_src_config[] = {
{SDE_DRM_DEINTERLACE, "deinterlace"}
};
+ static const struct drm_prop_enum_list e_fb_translation_mode[] = {
+ {SDE_DRM_FB_NON_SEC, "non_sec"},
+ {SDE_DRM_FB_SEC, "sec"},
+ {SDE_DRM_FB_NON_SEC_DIR_TRANS, "non_sec_direct_translation"},
+ {SDE_DRM_FB_SEC_DIR_TRANS, "sec_direct_translation"},
+ };
const struct sde_format_extended *format_list;
struct sde_kms_info *info;
struct sde_plane *psde = to_sde_plane(plane);
int zpos_max = 255;
int zpos_def = 0;
char feature_name[256];
+ struct sde_phy_plane *pp;
+ uint32_t features = 0xFFFFFFFF, nformats = 64;
+ u32 maxlinewidth = -1, maxupscale = -1, maxdwnscale = -1;
+ u32 maxhdeciexp = -1, maxvdeciexp = -1;
if (!plane || !psde) {
SDE_ERROR("invalid plane\n");
return;
- } else if (!psde->pipe_hw || !psde->pipe_sblk) {
- SDE_ERROR("invalid plane, pipe_hw %d pipe_sblk %d\n",
- psde->pipe_hw != 0, psde->pipe_sblk != 0);
- return;
- } else if (!catalog) {
+ }
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (!pp->pipe_hw || !pp->pipe_sblk) {
+ SDE_ERROR("invalid phy_plane, pipe_hw %d\"\
+ pipe_sblk %d\n", pp->pipe_hw != NULL,
+ pp->pipe_sblk != NULL);
+ return;
+ }
+ }
+ if (!catalog) {
SDE_ERROR("invalid catalog\n");
return;
}
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ /* Get common features for all pipes */
+ features &= pp->features;
+ if (nformats > pp->nformats) {
+ nformats = pp->nformats;
+ format_list = pp->pipe_sblk->format_list;
+ }
+ if (maxlinewidth < pp->pipe_sblk->maxlinewidth)
+ maxlinewidth = pp->pipe_sblk->maxlinewidth;
+ if (maxupscale < pp->pipe_sblk->maxupscale)
+ maxupscale = pp->pipe_sblk->maxupscale;
+ if (maxdwnscale < pp->pipe_sblk->maxdwnscale)
+ maxdwnscale = pp->pipe_sblk->maxdwnscale;
+ if (maxhdeciexp < pp->pipe_sblk->maxhdeciexp)
+ maxhdeciexp = pp->pipe_sblk->maxhdeciexp;
+ if (maxvdeciexp < pp->pipe_sblk->maxvdeciexp)
+ maxvdeciexp = pp->pipe_sblk->maxvdeciexp;
+ break;
+ }
+
if (sde_is_custom_client()) {
if (catalog->mixer_count && catalog->mixer &&
catalog->mixer[0].sblk->maxblendstages) {
@@ -1633,19 +1745,24 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
msm_property_install_range(&psde->property_info, "input_fence",
0x0, 0, INR_OPEN_MAX, 0, PLANE_PROP_INPUT_FENCE);
- if (psde->pipe_sblk->maxhdeciexp) {
- msm_property_install_range(&psde->property_info, "h_decimate",
- 0x0, 0, psde->pipe_sblk->maxhdeciexp, 0,
- PLANE_PROP_H_DECIMATE);
- }
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (pp->pipe_sblk->maxhdeciexp) {
+ msm_property_install_range(&psde->property_info,
+ "h_decimate", 0x0, 0,
+ pp->pipe_sblk->maxhdeciexp, 0,
+ PLANE_PROP_H_DECIMATE);
+ }
- if (psde->pipe_sblk->maxvdeciexp) {
- msm_property_install_range(&psde->property_info, "v_decimate",
- 0x0, 0, psde->pipe_sblk->maxvdeciexp, 0,
+ if (pp->pipe_sblk->maxvdeciexp) {
+ msm_property_install_range(&psde->property_info,
+ "v_decimate", 0x0, 0,
+ pp->pipe_sblk->maxvdeciexp, 0,
PLANE_PROP_V_DECIMATE);
+ }
+ break;
}
- if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+ if (features & BIT(SDE_SSPP_SCALER_QSEED3)) {
msm_property_install_volatile_range(&psde->property_info,
"scaler_v2", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V2);
msm_property_install_blob(&psde->property_info, "lut_ed", 0,
@@ -1654,38 +1771,38 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
PLANE_PROP_SCALER_LUT_CIR);
msm_property_install_blob(&psde->property_info, "lut_sep", 0,
PLANE_PROP_SCALER_LUT_SEP);
- } else if (psde->features & SDE_SSPP_SCALER) {
+ } else if (features & SDE_SSPP_SCALER) {
msm_property_install_volatile_range(&psde->property_info,
"scaler_v1", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V1);
}
- if (psde->features & BIT(SDE_SSPP_CSC)) {
+ if (features & BIT(SDE_SSPP_CSC)) {
msm_property_install_volatile_range(&psde->property_info,
"csc_v1", 0x0, 0, ~0, 0, PLANE_PROP_CSC_V1);
}
- if (psde->features & BIT(SDE_SSPP_HSIC)) {
+ if (features & BIT(SDE_SSPP_HSIC)) {
snprintf(feature_name, sizeof(feature_name), "%s%d",
"SDE_SSPP_HUE_V",
- psde->pipe_sblk->hsic_blk.version >> 16);
+ pp->pipe_sblk->hsic_blk.version >> 16);
msm_property_install_range(&psde->property_info,
feature_name, 0, 0, 0xFFFFFFFF, 0,
PLANE_PROP_HUE_ADJUST);
snprintf(feature_name, sizeof(feature_name), "%s%d",
"SDE_SSPP_SATURATION_V",
- psde->pipe_sblk->hsic_blk.version >> 16);
+ pp->pipe_sblk->hsic_blk.version >> 16);
msm_property_install_range(&psde->property_info,
feature_name, 0, 0, 0xFFFFFFFF, 0,
PLANE_PROP_SATURATION_ADJUST);
snprintf(feature_name, sizeof(feature_name), "%s%d",
"SDE_SSPP_VALUE_V",
- psde->pipe_sblk->hsic_blk.version >> 16);
+ pp->pipe_sblk->hsic_blk.version >> 16);
msm_property_install_range(&psde->property_info,
feature_name, 0, 0, 0xFFFFFFFF, 0,
PLANE_PROP_VALUE_ADJUST);
snprintf(feature_name, sizeof(feature_name), "%s%d",
"SDE_SSPP_CONTRAST_V",
- psde->pipe_sblk->hsic_blk.version >> 16);
+ pp->pipe_sblk->hsic_blk.version >> 16);
msm_property_install_range(&psde->property_info,
feature_name, 0, 0, 0xFFFFFFFF, 0,
PLANE_PROP_CONTRAST_ADJUST);
@@ -1701,9 +1818,13 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
msm_property_install_enum(&psde->property_info, "src_config", 0x0, 1,
e_src_config, ARRAY_SIZE(e_src_config), PLANE_PROP_SRC_CONFIG);
- if (psde->pipe_hw->ops.setup_solidfill)
- msm_property_install_range(&psde->property_info, "color_fill",
- 0, 0, 0xFFFFFFFF, 0, PLANE_PROP_COLOR_FILL);
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (pp->pipe_hw->ops.setup_solidfill)
+ msm_property_install_range(&psde->property_info,
+ "color_fill", 0, 0, 0xFFFFFFFF, 0,
+ PLANE_PROP_COLOR_FILL);
+ break;
+ }
info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
if (!info) {
@@ -1715,7 +1836,6 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
DRM_MODE_PROP_IMMUTABLE, PLANE_PROP_INFO);
sde_kms_info_reset(info);
- format_list = psde->pipe_sblk->format_list;
if (format_list) {
sde_kms_info_start(info, "pixel_formats");
while (format_list->fourcc_format) {
@@ -1727,51 +1847,55 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
sde_kms_info_stop(info);
}
- sde_kms_info_add_keyint(info, "max_linewidth",
- psde->pipe_sblk->maxlinewidth);
- sde_kms_info_add_keyint(info, "max_upscale",
- psde->pipe_sblk->maxupscale);
- sde_kms_info_add_keyint(info, "max_downscale",
- psde->pipe_sblk->maxdwnscale);
- sde_kms_info_add_keyint(info, "max_horizontal_deci",
- psde->pipe_sblk->maxhdeciexp);
- sde_kms_info_add_keyint(info, "max_vertical_deci",
- psde->pipe_sblk->maxvdeciexp);
+ sde_kms_info_add_keyint(info, "max_linewidth", maxlinewidth);
+ sde_kms_info_add_keyint(info, "max_upscale", maxupscale);
+ sde_kms_info_add_keyint(info, "max_downscale", maxdwnscale);
+ sde_kms_info_add_keyint(info, "max_horizontal_deci", maxhdeciexp);
+ sde_kms_info_add_keyint(info, "max_vertical_deci", maxvdeciexp);
msm_property_set_blob(&psde->property_info, &psde->blob_info,
info->data, info->len, PLANE_PROP_INFO);
kfree(info);
- if (psde->features & BIT(SDE_SSPP_MEMCOLOR)) {
+ if (features & BIT(SDE_SSPP_MEMCOLOR)) {
snprintf(feature_name, sizeof(feature_name), "%s%d",
"SDE_SSPP_SKIN_COLOR_V",
- psde->pipe_sblk->memcolor_blk.version >> 16);
+ pp->pipe_sblk->memcolor_blk.version >> 16);
msm_property_install_blob(&psde->property_info, feature_name, 0,
PLANE_PROP_SKIN_COLOR);
snprintf(feature_name, sizeof(feature_name), "%s%d",
"SDE_SSPP_SKY_COLOR_V",
- psde->pipe_sblk->memcolor_blk.version >> 16);
+ pp->pipe_sblk->memcolor_blk.version >> 16);
msm_property_install_blob(&psde->property_info, feature_name, 0,
PLANE_PROP_SKY_COLOR);
snprintf(feature_name, sizeof(feature_name), "%s%d",
"SDE_SSPP_FOLIAGE_COLOR_V",
- psde->pipe_sblk->memcolor_blk.version >> 16);
+ pp->pipe_sblk->memcolor_blk.version >> 16);
msm_property_install_blob(&psde->property_info, feature_name, 0,
PLANE_PROP_FOLIAGE_COLOR);
}
+
+ msm_property_install_enum(&psde->property_info, "fb_translation_mode",
+ 0x0,
+ 0, e_fb_translation_mode,
+ ARRAY_SIZE(e_fb_translation_mode),
+ PLANE_PROP_FB_TRANSLATION_MODE);
}
-static inline void _sde_plane_set_csc_v1(struct sde_plane *psde, void *usr_ptr)
+static inline void _sde_plane_set_csc_v1(struct sde_phy_plane *pp,
+ void *usr_ptr)
{
struct sde_drm_csc_v1 csc_v1;
+ struct sde_plane *psde;
int i;
- if (!psde) {
- SDE_ERROR("invalid plane\n");
+ if (!pp) {
+ SDE_ERROR("invalid phy_plane\n");
return;
}
+ psde = pp->sde_plane;
- psde->csc_usr_ptr = NULL;
+ pp->csc_usr_ptr = NULL;
if (!usr_ptr) {
SDE_DEBUG_PLANE(psde, "csc data removed\n");
return;
@@ -1784,30 +1908,33 @@ static inline void _sde_plane_set_csc_v1(struct sde_plane *psde, void *usr_ptr)
/* populate from user space */
for (i = 0; i < SDE_CSC_MATRIX_COEFF_SIZE; ++i)
- psde->csc_cfg.csc_mv[i] = csc_v1.ctm_coeff[i] >> 16;
+ pp->csc_cfg.csc_mv[i] = csc_v1.ctm_coeff[i] >> 16;
for (i = 0; i < SDE_CSC_BIAS_SIZE; ++i) {
- psde->csc_cfg.csc_pre_bv[i] = csc_v1.pre_bias[i];
- psde->csc_cfg.csc_post_bv[i] = csc_v1.post_bias[i];
+ pp->csc_cfg.csc_pre_bv[i] = csc_v1.pre_bias[i];
+ pp->csc_cfg.csc_post_bv[i] = csc_v1.post_bias[i];
}
for (i = 0; i < SDE_CSC_CLAMP_SIZE; ++i) {
- psde->csc_cfg.csc_pre_lv[i] = csc_v1.pre_clamp[i];
- psde->csc_cfg.csc_post_lv[i] = csc_v1.post_clamp[i];
+ pp->csc_cfg.csc_pre_lv[i] = csc_v1.pre_clamp[i];
+ pp->csc_cfg.csc_post_lv[i] = csc_v1.post_clamp[i];
}
- psde->csc_usr_ptr = &psde->csc_cfg;
+ pp->csc_usr_ptr = &pp->csc_cfg;
}
-static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde, void *usr)
+static inline void _sde_plane_set_scaler_v1(struct sde_phy_plane *pp,
+ void *usr)
{
struct sde_drm_scaler_v1 scale_v1;
struct sde_hw_pixel_ext *pe;
+ struct sde_plane *psde;
int i;
- if (!psde) {
- SDE_ERROR("invalid plane\n");
+ if (!pp) {
+ SDE_ERROR("invalid phy_plane\n");
return;
}
+ psde = pp->sde_plane;
- psde->pixel_ext_usr = false;
+ pp->pixel_ext_usr = false;
if (!usr) {
SDE_DEBUG_PLANE(psde, "scale data removed\n");
return;
@@ -1819,7 +1946,7 @@ static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde, void *usr)
}
/* populate from user space */
- pe = &(psde->pixel_ext);
+ pe = &(pp->pixel_ext);
memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
for (i = 0; i < SDE_MAX_PLANES; i++) {
pe->init_phase_x[i] = scale_v1.init_phase_x[i];
@@ -1844,26 +1971,28 @@ static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde, void *usr)
pe->roi_h[i] = scale_v1.pe.num_ext_pxls_tb[i];
}
- psde->pixel_ext_usr = true;
+ pp->pixel_ext_usr = true;
SDE_DEBUG_PLANE(psde, "user property data copied\n");
}
-static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde,
+static inline void _sde_plane_set_scaler_v2(struct sde_phy_plane *pp,
struct sde_plane_state *pstate, void *usr)
{
struct sde_drm_scaler_v2 scale_v2;
struct sde_hw_pixel_ext *pe;
int i;
struct sde_hw_scaler3_cfg *cfg;
+ struct sde_plane *psde;
- if (!psde) {
- SDE_ERROR("invalid plane\n");
+ if (!pp) {
+ SDE_ERROR("invalid phy_plane\n");
return;
}
+ psde = pp->sde_plane;
- cfg = psde->scaler3_cfg;
- psde->pixel_ext_usr = false;
+ cfg = pp->scaler3_cfg;
+ pp->pixel_ext_usr = false;
if (!usr) {
SDE_DEBUG_PLANE(psde, "scale data removed\n");
return;
@@ -1875,7 +2004,7 @@ static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde,
}
/* populate from user space */
- pe = &(psde->pixel_ext);
+ pe = &(pp->pixel_ext);
memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
cfg->enable = scale_v2.enable;
cfg->dir_en = scale_v2.dir_en;
@@ -1933,7 +2062,7 @@ static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde,
pe->btm_rpt[i] = scale_v2.pe.btm_rpt[i];
pe->roi_h[i] = scale_v2.pe.num_ext_pxls_tb[i];
}
- psde->pixel_ext_usr = true;
+ pp->pixel_ext_usr = true;
SDE_DEBUG_PLANE(psde, "user property data copied\n");
}
@@ -1945,6 +2074,7 @@ static int sde_plane_atomic_set_property(struct drm_plane *plane,
struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
struct sde_plane_state *pstate;
int idx, ret = -EINVAL;
+ struct sde_phy_plane *pp;
SDE_DEBUG_PLANE(psde, "\n");
@@ -1965,14 +2095,24 @@ static int sde_plane_atomic_set_property(struct drm_plane *plane,
_sde_plane_set_input_fence(psde, pstate, val);
break;
case PLANE_PROP_CSC_V1:
- _sde_plane_set_csc_v1(psde, (void *)val);
+ list_for_each_entry(pp, &psde->phy_plane_head,
+ phy_plane_list) {
+ _sde_plane_set_csc_v1(pp, (void *)val);
+ }
break;
case PLANE_PROP_SCALER_V1:
- _sde_plane_set_scaler_v1(psde, (void *)val);
+ list_for_each_entry(pp, &psde->phy_plane_head,
+ phy_plane_list) {
+ _sde_plane_set_scaler_v1(pp,
+ (void *)val);
+ }
break;
case PLANE_PROP_SCALER_V2:
- _sde_plane_set_scaler_v2(psde, pstate,
- (void *)val);
+ list_for_each_entry(pp, &psde->phy_plane_head,
+ phy_plane_list) {
+ _sde_plane_set_scaler_v2(pp, pstate,
+ (void *)val);
+ }
break;
default:
/* nothing to do */
@@ -2019,12 +2159,15 @@ static int sde_plane_atomic_get_property(struct drm_plane *plane,
static void sde_plane_destroy(struct drm_plane *plane)
{
struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+ struct sde_phy_plane *pp, *n;
SDE_DEBUG_PLANE(psde, "\n");
if (psde) {
- _sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
-
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ _sde_plane_set_qos_ctrl(pp,
+ false, SDE_PLANE_QOS_PANIC_CTRL);
+ }
debugfs_remove_recursive(psde->debugfs_root);
if (psde->blob_info)
@@ -2037,8 +2180,13 @@ static void sde_plane_destroy(struct drm_plane *plane)
/* this will destroy the states as well */
drm_plane_cleanup(plane);
- if (psde->pipe_hw)
- sde_hw_sspp_destroy(psde->pipe_hw);
+ list_for_each_entry_safe(pp, n,
+ &psde->phy_plane_head, phy_plane_list) {
+ if (pp->pipe_hw)
+ sde_hw_sspp_destroy(pp->pipe_hw);
+ list_del(&pp->phy_plane_list);
+ kfree(pp);
+ }
kfree(psde);
}
@@ -2174,9 +2322,22 @@ static const struct drm_plane_helper_funcs sde_plane_helper_funcs = {
.atomic_update = sde_plane_atomic_update,
};
-enum sde_sspp sde_plane_pipe(struct drm_plane *plane)
+enum sde_sspp sde_plane_pipe(struct drm_plane *plane, uint32_t index)
{
- return plane ? to_sde_plane(plane)->pipe : SSPP_NONE;
+ struct sde_plane *sde_plane = to_sde_plane(plane);
+ struct sde_phy_plane *pp;
+ int i = 0;
+ enum sde_sspp default_sspp = SSPP_NONE;
+
+ list_for_each_entry(pp, &sde_plane->phy_plane_head, phy_plane_list) {
+ if (i == 0)
+ default_sspp = pp->pipe;
+ if (i == index)
+ return pp->pipe;
+ i++;
+ }
+
+ return default_sspp;
}
static ssize_t _sde_plane_danger_read(struct file *file,
@@ -2208,10 +2369,16 @@ static ssize_t _sde_plane_danger_read(struct file *file,
static void _sde_plane_set_danger_state(struct sde_kms *kms, bool enable)
{
struct drm_plane *plane;
+ struct sde_plane *psde;
+ struct sde_phy_plane *pp;
drm_for_each_plane(plane, kms->dev) {
if (plane->fb && plane->state) {
- sde_plane_danger_signal_ctrl(plane, enable);
+ psde = to_sde_plane(plane);
+ list_for_each_entry(pp, &psde->phy_plane_head,
+ phy_plane_list) {
+ sde_plane_danger_signal_ctrl(pp, enable);
+ }
SDE_DEBUG("plane:%d img:%dx%d ",
plane->base.id, plane->fb->width,
plane->fb->height);
@@ -2229,7 +2396,7 @@ static void _sde_plane_set_danger_state(struct sde_kms *kms, bool enable)
}
static ssize_t _sde_plane_danger_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
+ const char __user *user_buf, size_t count, loff_t *ppos)
{
struct sde_kms *kms = file->private_data;
struct sde_mdss_cfg *cfg = kms->catalog;
@@ -2271,85 +2438,166 @@ static const struct file_operations sde_plane_danger_enable = {
.write = _sde_plane_danger_write,
};
-static void _sde_plane_init_debugfs(struct sde_plane *psde, struct sde_kms *kms)
+static void _sde_plane_init_debugfs(struct sde_plane *psde,
+ struct sde_kms *kms)
{
const struct sde_sspp_sub_blks *sblk = 0;
const struct sde_sspp_cfg *cfg = 0;
+ struct sde_phy_plane *pp;
+
+ if (!psde || !kms) {
+ SDE_ERROR("invalid arg(s), psde %d kms %d\n",
+ psde != NULL, kms != NULL);
+ return;
+ }
- if (psde && psde->pipe_hw)
- cfg = psde->pipe_hw->cap;
- if (cfg)
+ /* create overall sub-directory for the pipe */
+ psde->debugfs_root = debugfs_create_dir(psde->pipe_name,
+ sde_debugfs_get_root(kms));
+ if (!psde->debugfs_root)
+ return;
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ debugfs_create_u32("pipe", S_IRUGO | S_IWUSR,
+ psde->debugfs_root, &pp->pipe);
+
+ if (!pp->pipe_hw || !pp->pipe_hw->cap ||
+ !pp->pipe_hw->cap->sblk)
+ continue;
+ cfg = pp->pipe_hw->cap;
sblk = cfg->sblk;
- if (kms && sblk) {
- /* create overall sub-directory for the pipe */
- psde->debugfs_root =
- debugfs_create_dir(psde->pipe_name,
- sde_debugfs_get_root(kms));
- if (psde->debugfs_root) {
- /* don't error check these */
- debugfs_create_x32("features", S_IRUGO | S_IWUSR,
- psde->debugfs_root, &psde->features);
-
- /* add register dump support */
- sde_debugfs_setup_regset32(&psde->debugfs_src,
- sblk->src_blk.base + cfg->base,
- sblk->src_blk.len,
- kms);
- sde_debugfs_create_regset32("src_blk", S_IRUGO,
- psde->debugfs_root, &psde->debugfs_src);
-
- sde_debugfs_setup_regset32(&psde->debugfs_scaler,
- sblk->scaler_blk.base + cfg->base,
- sblk->scaler_blk.len,
- kms);
- sde_debugfs_create_regset32("scaler_blk", S_IRUGO,
- psde->debugfs_root,
- &psde->debugfs_scaler);
-
- sde_debugfs_setup_regset32(&psde->debugfs_csc,
- sblk->csc_blk.base + cfg->base,
- sblk->csc_blk.len,
- kms);
- sde_debugfs_create_regset32("csc_blk", S_IRUGO,
- psde->debugfs_root, &psde->debugfs_csc);
-
- debugfs_create_u32("xin_id",
- S_IRUGO,
- psde->debugfs_root,
- (u32 *) &cfg->xin_id);
- debugfs_create_u32("clk_ctrl",
- S_IRUGO,
- psde->debugfs_root,
- (u32 *) &cfg->clk_ctrl);
- debugfs_create_x32("creq_vblank",
- S_IRUGO | S_IWUSR,
- psde->debugfs_root,
- (u32 *) &sblk->creq_vblank);
- debugfs_create_x32("danger_vblank",
- S_IRUGO | S_IWUSR,
- psde->debugfs_root,
- (u32 *) &sblk->danger_vblank);
-
- debugfs_create_file("disable_danger",
- S_IRUGO | S_IWUSR,
- psde->debugfs_root,
- kms, &sde_plane_danger_enable);
+ /* don't error check these */
+ debugfs_create_x32("features", S_IRUGO | S_IWUSR,
+ psde->debugfs_root, &pp->features);
+
+ /* add register dump support */
+ sde_debugfs_setup_regset32(&psde->debugfs_src,
+ sblk->src_blk.base + cfg->base,
+ sblk->src_blk.len,
+ kms);
+ sde_debugfs_create_regset32("src_blk", S_IRUGO,
+ psde->debugfs_root, &psde->debugfs_src);
+
+ sde_debugfs_setup_regset32(&psde->debugfs_scaler,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+ sde_debugfs_create_regset32("scaler_blk", S_IRUGO,
+ psde->debugfs_root,
+ &psde->debugfs_scaler);
+
+ sde_debugfs_setup_regset32(&psde->debugfs_csc,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+ sde_debugfs_create_regset32("csc_blk", S_IRUGO,
+ psde->debugfs_root, &psde->debugfs_csc);
+
+ debugfs_create_u32("xin_id",
+ S_IRUGO,
+ psde->debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ S_IRUGO,
+ psde->debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+ debugfs_create_x32("creq_vblank",
+ S_IRUGO | S_IWUSR,
+ psde->debugfs_root,
+ (u32 *) &sblk->creq_vblank);
+ debugfs_create_x32("danger_vblank",
+ S_IRUGO | S_IWUSR,
+ psde->debugfs_root,
+ (u32 *) &sblk->danger_vblank);
+
+ debugfs_create_file("disable_danger",
+ S_IRUGO | S_IWUSR,
+ psde->debugfs_root,
+ kms, &sde_plane_danger_enable);
+
+ break;
+ }
+}
+
+static int _sde_init_phy_plane(struct sde_kms *sde_kms,
+ struct sde_plane *psde, uint32_t pipe, uint32_t index,
+ struct sde_phy_plane *pp)
+{
+ int rc = 0;
+
+ pp->pipe_hw = sde_rm_get_hw_by_id(&sde_kms->rm,
+ SDE_HW_BLK_SSPP, pipe);
+ if (!pp->pipe_hw) {
+ SDE_ERROR("Not found resource for id=%d\n", pipe);
+ rc = -EINVAL;
+ goto end;
+ } else if (!pp->pipe_hw->cap || !pp->pipe_hw->cap->sblk) {
+ SDE_ERROR("[%u]SSPP returned invalid cfg\n", pipe);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* cache features mask for later */
+ pp->features = pp->pipe_hw->cap->features;
+ pp->pipe_sblk = pp->pipe_hw->cap->sblk;
+ if (!pp->pipe_sblk) {
+ SDE_ERROR("invalid sblk on pipe %d\n", pipe);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (pp->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+ pp->scaler3_cfg = kzalloc(sizeof(struct sde_hw_scaler3_cfg),
+ GFP_KERNEL);
+ if (!pp->scaler3_cfg) {
+ SDE_ERROR("[%u]failed to allocate scale struct\n",
+ pipe);
+ rc = -ENOMEM;
+ goto end;
}
}
+
+ /* add plane to DRM framework */
+ pp->nformats = sde_populate_formats(
+ pp->pipe_sblk->format_list,
+ pp->formats,
+ NULL,
+ ARRAY_SIZE(pp->formats));
+
+ if (!pp->nformats) {
+ SDE_ERROR("[%u]no valid formats for plane\n", pipe);
+ if (pp->scaler3_cfg)
+ kzfree(pp->scaler3_cfg);
+
+ rc = -EINVAL;
+ goto end;
+ }
+
+ pp->sde_plane = psde;
+ pp->pipe = pipe;
+ pp->index = index;
+
+end:
+ return rc;
}
/* initialize plane */
struct drm_plane *sde_plane_init(struct drm_device *dev,
uint32_t pipe, bool primary_plane,
- unsigned long possible_crtcs)
+ unsigned long possible_crtcs, bool vp_enabled)
{
struct drm_plane *plane = NULL;
struct sde_plane *psde;
+ struct sde_phy_plane *pp, *n;
struct msm_drm_private *priv;
struct sde_kms *kms;
enum drm_plane_type type;
int ret = -EINVAL;
+ struct sde_vp_cfg *vp;
+ struct sde_vp_sub_blks *vp_sub;
+ uint32_t features = 0xFFFFFFFF, nformats = 64, formats[64];
+ uint32_t index = 0;
if (!dev) {
SDE_ERROR("[%u]device is NULL\n", pipe);
@@ -2383,60 +2631,77 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
/* cache local stuff for later */
plane = &psde->base;
- psde->pipe = pipe;
psde->aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
- /* initialize underlying h/w driver */
- psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog);
- if (IS_ERR(psde->pipe_hw)) {
- SDE_ERROR("[%u]SSPP init failed\n", pipe);
- ret = PTR_ERR(psde->pipe_hw);
- goto clean_plane;
- } else if (!psde->pipe_hw->cap || !psde->pipe_hw->cap->sblk) {
- SDE_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
- goto clean_sspp;
- }
+ INIT_LIST_HEAD(&psde->phy_plane_head);
- /* cache features mask for later */
- psde->features = psde->pipe_hw->cap->features;
- psde->pipe_sblk = psde->pipe_hw->cap->sblk;
- if (!psde->pipe_sblk) {
- SDE_ERROR("[%u]invalid sblk\n", pipe);
- goto clean_sspp;
- }
+ /* initialize underlying h/w driver */
+ if (vp_enabled) {
+ vp = &(kms->catalog->vp[pipe]);
+ list_for_each_entry(vp_sub, &vp->sub_blks, pipeid_list) {
+ pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+ if (!pp) {
+ SDE_ERROR("out of memory\n");
+ ret = -ENOMEM;
+ goto clean_plane;
+ }
- if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
- psde->scaler3_cfg = kzalloc(sizeof(struct sde_hw_scaler3_cfg),
- GFP_KERNEL);
- if (!psde->scaler3_cfg) {
- SDE_ERROR("[%u]failed to allocate scale struct\n",
- pipe);
+ ret = _sde_init_phy_plane(kms, psde, vp_sub->sspp_id,
+ index, pp);
+ if (ret) {
+ SDE_ERROR("_sde_init_phy_plane error vp=%d\n",
+ pipe);
+ kfree(pp);
+ ret = -EINVAL;
+ goto clean_plane;
+ }
+ /* Get common features for all pipes */
+ features &= pp->features;
+ if (nformats > pp->nformats) {
+ nformats = pp->nformats;
+ memcpy(formats, pp->formats,
+ sizeof(formats));
+ }
+ list_add_tail(&pp->phy_plane_list,
+ &psde->phy_plane_head);
+ index++;
+ psde->num_of_phy_planes++;
+ }
+ } else {
+ pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+ if (!pp) {
+ SDE_ERROR("out of memory\n");
ret = -ENOMEM;
- goto clean_sspp;
+ goto clean_plane;
}
- }
-
- /* add plane to DRM framework */
- psde->nformats = sde_populate_formats(psde->pipe_sblk->format_list,
- psde->formats,
- 0,
- ARRAY_SIZE(psde->formats));
- if (!psde->nformats) {
- SDE_ERROR("[%u]no valid formats for plane\n", pipe);
- goto clean_sspp;
+ ret = _sde_init_phy_plane(kms, psde, pipe, index, pp);
+ if (ret) {
+ SDE_ERROR("_sde_init_phy_plane error id=%d\n",
+ pipe);
+ kfree(pp);
+ ret = -EINVAL;
+ goto clean_plane;
+ }
+ features = pp->features;
+ nformats = pp->nformats;
+ memcpy(formats, pp->formats,
+ sizeof(uint32_t) * 64);
+ list_add_tail(&pp->phy_plane_list,
+ &psde->phy_plane_head);
+ psde->num_of_phy_planes++;
}
- if (psde->features & BIT(SDE_SSPP_CURSOR))
+ if (features & BIT(SDE_SSPP_CURSOR))
type = DRM_PLANE_TYPE_CURSOR;
else if (primary_plane)
type = DRM_PLANE_TYPE_PRIMARY;
else
type = DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, possible_crtcs,
- &sde_plane_funcs, psde->formats, psde->nformats, type);
+ &sde_plane_funcs, formats, nformats, type);
if (ret)
- goto clean_sspp;
+ goto clean_plane;
/* success! finalize initialization */
drm_plane_helper_add(plane, &sde_plane_helper_funcs);
@@ -2458,14 +2723,20 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
DRM_INFO("%s created for pipe %u\n", psde->pipe_name, pipe);
return plane;
-clean_sspp:
- if (psde && psde->pipe_hw)
- sde_hw_sspp_destroy(psde->pipe_hw);
-
- if (psde && psde->scaler3_cfg)
- kfree(psde->scaler3_cfg);
clean_plane:
- kfree(psde);
+ if (psde) {
+ list_for_each_entry_safe(pp, n,
+ &psde->phy_plane_head, phy_plane_list) {
+ if (pp->pipe_hw)
+ sde_hw_sspp_destroy(pp->pipe_hw);
+
+ kfree(pp->scaler3_cfg);
+ list_del(&pp->phy_plane_list);
+ kfree(pp);
+ }
+ kfree(psde);
+ }
+
exit:
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index 1514f633c61e..7b91822d4cde 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -59,9 +59,10 @@ struct sde_plane_state {
/**
* sde_plane_pipe - return sspp identifier for the given plane
* @plane: Pointer to DRM plane object
+ * @index: Plane index
* Returns: sspp identifier of the given plane
*/
-enum sde_sspp sde_plane_pipe(struct drm_plane *plane);
+enum sde_sspp sde_plane_pipe(struct drm_plane *plane, uint32_t index);
/**
* sde_plane_flush - final plane operations before commit flush
@@ -75,10 +76,11 @@ void sde_plane_flush(struct drm_plane *plane);
* @pipe: sde hardware pipe identifier
* @primary_plane: true if this pipe is primary plane for crtc
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @vp_enabled: Flag indicating if virtual planes enabled
*/
struct drm_plane *sde_plane_init(struct drm_device *dev,
uint32_t pipe, bool primary_plane,
- unsigned long possible_crtcs);
+ unsigned long possible_crtcs, bool vp_enabled);
/**
* sde_plane_wait_input_fence - wait for input fence object
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index 1d27b27d265c..fe4b73b4ffea 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,7 @@
#include "sde_hw_wb.h"
#include "sde_encoder.h"
#include "sde_connector.h"
+#include "sde_hw_sspp.h"
#define RESERVED_BY_OTHER(h, r) \
((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
@@ -197,6 +198,33 @@ bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
return false;
}
+void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
+{
+ struct list_head *blk_list;
+ struct sde_rm_hw_blk *blk;
+ void *hw = NULL;
+
+ if (!rm || type >= SDE_HW_BLK_MAX) {
+ SDE_ERROR("invalid rm\n");
+ return hw;
+ }
+
+ blk_list = &rm->hw_blks[type];
+
+ list_for_each_entry(blk, blk_list, list) {
+ if (blk->id == id) {
+ hw = blk->hw;
+ SDE_DEBUG("found type %d %s id %d\n",
+ type, blk->type_name, blk->id);
+ return hw;
+ }
+ }
+
+ SDE_DEBUG("no match, type %d id=%d\n", type, id);
+
+ return hw;
+}
+
static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
{
switch (type) {
@@ -222,7 +250,8 @@ static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
sde_hw_wb_destroy(hw);
break;
case SDE_HW_BLK_SSPP:
- /* SSPPs are not managed by the resource manager */
+ sde_hw_sspp_destroy(hw);
+ break;
case SDE_HW_BLK_TOP:
/* Top is a singleton, not managed in hw_blks list */
case SDE_HW_BLK_MAX:
@@ -310,7 +339,9 @@ static int _sde_rm_hw_blk_create(
name = "wb";
break;
case SDE_HW_BLK_SSPP:
- /* SSPPs are not managed by the resource manager */
+ hw = sde_hw_sspp_init(id, (void __iomem *)mmio, cat);
+ name = "sspp";
+ break;
case SDE_HW_BLK_TOP:
/* Top is a singleton, not managed in hw_blks list */
case SDE_HW_BLK_MAX:
@@ -369,6 +400,13 @@ int sde_rm_init(struct sde_rm *rm,
goto fail;
}
+ for (i = 0; i < cat->sspp_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_SSPP,
+ cat->sspp[i].id, &cat->sspp[i]);
+ if (rc)
+ goto fail;
+ }
+
/* Interrogate HW catalog and create tracking items for hw blocks */
for (i = 0; i < cat->mixer_count; i++) {
struct sde_lm_cfg *lm = &cat->mixer[i];
@@ -1074,12 +1112,6 @@ void _sde_rm_release_rsvp(
}
kfree(rsvp);
-
- (void) msm_property_set_property(
- sde_connector_get_propinfo(conn),
- sde_connector_get_property_values(conn->state),
- CONNECTOR_PROP_TOPOLOGY_NAME,
- SDE_RM_TOPOLOGY_UNKNOWN);
}
void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
@@ -1115,6 +1147,12 @@ void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
rsvp->enc_id);
_sde_rm_release_rsvp(rm, rsvp, conn);
+
+ (void) msm_property_set_property(
+ sde_connector_get_propinfo(conn),
+ sde_connector_get_property_values(conn->state),
+ CONNECTOR_PROP_TOPOLOGY_NAME,
+ SDE_RM_TOPOLOGY_UNKNOWN);
}
}
@@ -1132,8 +1170,12 @@ static int _sde_rm_commit_rsvp(
sde_connector_get_property_values(conn_state),
CONNECTOR_PROP_TOPOLOGY_NAME,
rsvp->topology);
- if (ret)
+ if (ret) {
+ SDE_ERROR("failed to set topology name property, ret %d\n",
+ ret);
_sde_rm_release_rsvp(rm, rsvp, conn_state->connector);
+ return ret;
+ }
/* Swap next rsvp to be the active */
for (type = 0; type < SDE_HW_BLK_MAX; type++) {
@@ -1226,6 +1268,12 @@ int sde_rm_reserve(
_sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
rsvp_cur = NULL;
_sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_CLEAR);
+ (void) msm_property_set_property(
+ sde_connector_get_propinfo(
+ conn_state->connector),
+ sde_connector_get_property_values(conn_state),
+ CONNECTOR_PROP_TOPOLOGY_NAME,
+ SDE_RM_TOPOLOGY_UNKNOWN);
}
/* Check the proposed reservation, store it in hw's "next" field */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 855b12ce8150..1cc22c5fbbf4 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -185,6 +185,18 @@ void sde_rm_init_hw_iter(
bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *iter);
/**
+ * sde_rm_get_hw_by_id - retrieve hw object given hw type and hw id
+ * Meant to do a single pass through the hardware list to iteratively
+ * retrieve hardware blocks of a given type and id.
+ * Function returns the hw resource pointer.
+ * @rm: SDE Resource Manager handle
+ * @type: hw type
+ * @id: hw id
+ * @Return: hw resource pointer on match found, NULL on no match found
+ */
+void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id);
+
+/**
* sde_rm_check_property_topctl - validate property bitmask before it is set
* @val: user's proposed topology control bitmask
* @Return: 0 on success or error
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
new file mode 100644
index 000000000000..69ab367307ea
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -0,0 +1,512 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_edid.h>
+
+#include "sde_kms.h"
+#include "sde_edid_parser.h"
+
+/* TODO: copy from drm_edid.c and mdss_hdmi_edid.c. remove if using ELD */
+#define DBC_START_OFFSET 4
+#define EDID_DTD_LEN 18
+
+enum data_block_types {
+ RESERVED,
+ AUDIO_DATA_BLOCK,
+ VIDEO_DATA_BLOCK,
+ VENDOR_SPECIFIC_DATA_BLOCK,
+ SPEAKER_ALLOCATION_DATA_BLOCK,
+ VESA_DTC_DATA_BLOCK,
+ RESERVED2,
+ USE_EXTENDED_TAG
+};
+
+static u8 *sde_find_edid_extension(struct edid *edid, int ext_id)
+{
+ u8 *edid_ext = NULL;
+ int i;
+
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return NULL;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == ext_id)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return NULL;
+
+ return edid_ext;
+}
+
+static u8 *sde_find_cea_extension(struct edid *edid)
+{
+ return sde_find_edid_extension(edid, SDE_CEA_EXT);
+}
+
+static int
+sde_cea_db_payload_len(const u8 *db)
+{
+ return db[0] & 0x1f;
+}
+
+static int
+sde_cea_db_tag(const u8 *db)
+{
+ return db[0] >> 5;
+}
+
+static int
+sde_cea_revision(const u8 *cea)
+{
+ return cea[1];
+}
+
+static int
+sde_cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+ /* Data block offset in CEA extension block */
+ *start = 4;
+ *end = cea[2];
+ if (*end == 0)
+ *end = 127;
+ if (*end < 4 || *end > 127)
+ return -ERANGE;
+ return 0;
+}
+
+#define sde_for_each_cea_db(cea, i, start, end) \
+for ((i) = (start); \
+(i) < (end) && (i) + sde_cea_db_payload_len(&(cea)[(i)]) < (end); \
+(i) += sde_cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static u8 *sde_edid_find_extended_tag_block(struct edid *edid, int blk_id)
+{
+ u8 *db = NULL;
+ u8 *cea = NULL;
+
+ if (!edid) {
+ pr_err("%s: invalid input\n", __func__);
+ return NULL;
+ }
+
+ cea = sde_find_cea_extension(edid);
+
+ if (cea && sde_cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (sde_cea_db_offsets(cea, &start, &end))
+ return NULL;
+
+ sde_for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ if ((sde_cea_db_tag(db) == SDE_EXTENDED_TAG) &&
+ (db[1] == blk_id))
+ return db;
+ }
+ }
+ return NULL;
+}
+
+static u8 *
+sde_edid_find_block(struct edid *edid, int blk_id)
+{
+ u8 *db = NULL;
+ u8 *cea = NULL;
+
+ if (!edid) {
+ pr_err("%s: invalid input\n", __func__);
+ return NULL;
+ }
+
+ cea = sde_find_cea_extension(edid);
+
+ if (cea && sde_cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (sde_cea_db_offsets(cea, &start, &end))
+ return 0;
+
+ sde_for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ if (sde_cea_db_tag(db) == blk_id)
+ return db;
+ }
+ }
+ return NULL;
+}
+
+
+static const u8 *_sde_edid_find_block(const u8 *in_buf, u32 start_offset,
+ u8 type, u8 *len)
+{
+ /* the start of data block collection, start of Video Data Block */
+ u32 offset = start_offset;
+ u32 dbc_offset = in_buf[2];
+
+ SDE_EDID_DEBUG("%s +", __func__);
+ /*
+ * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
+ * collection present.
+ * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
+ * collection present and no DTD data present.
+ */
+ if ((dbc_offset == 0) || (dbc_offset == 4)) {
+ SDE_ERROR("EDID: no DTD or non-DTD data present\n");
+ return NULL;
+ }
+
+ while (offset < dbc_offset) {
+ u8 block_len = in_buf[offset] & 0x1F;
+
+ if ((offset + block_len <= dbc_offset) &&
+ (in_buf[offset] >> 5) == type) {
+ *len = block_len;
+ SDE_EDID_DEBUG("block=%d found @ 0x%x w/ len=%d\n",
+ type, offset, block_len);
+
+ return in_buf + offset;
+ }
+ offset += 1 + block_len;
+ }
+
+ return NULL;
+}
+
+static void sde_edid_extract_vendor_id(struct sde_edid_ctrl *edid_ctrl)
+{
+ char *vendor_id;
+ u32 id_codes;
+
+ SDE_EDID_DEBUG("%s +", __func__);
+ if (!edid_ctrl) {
+ SDE_ERROR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ vendor_id = edid_ctrl->vendor_id;
+ id_codes = ((u32)edid_ctrl->edid->mfg_id[0] << 8) +
+ edid_ctrl->edid->mfg_id[1];
+
+ vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F);
+ vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F);
+ vendor_id[2] = 'A' - 1 + (id_codes & 0x1F);
+ vendor_id[3] = 0;
+ SDE_EDID_DEBUG("vendor id is %s ", vendor_id);
+ SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_y420_support(struct drm_connector *connector,
+u32 video_format)
+{
+ u8 cea_mode = 0;
+ struct drm_display_mode *mode;
+
+ /* Need to add Y420 support flag to the modes */
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ cea_mode = drm_match_cea_mode(mode);
+ if ((cea_mode != 0) && (cea_mode == video_format)) {
+ SDE_EDID_DEBUG("%s found match for %d ", __func__,
+ video_format);
+ mode->flags |= DRM_MODE_FLAG_SUPPORTS_YUV;
+ }
+ }
+}
+
+static void sde_edid_parse_Y420CMDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+ u32 offset = 0;
+ u8 len = 0;
+ u8 svd_len = 0;
+ const u8 *svd = NULL;
+ u32 i = 0, j = 0;
+ u32 video_format = 0;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: edid_ctrl is NULL\n", __func__);
+ return;
+ }
+
+ if (!db) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+ SDE_EDID_DEBUG("%s +\n", __func__);
+ len = db[0] & 0x1f;
+
+ if (len < 7)
+ return;
+ /* Byte 3 to L+1 contain SVDs */
+ offset += 2;
+
+ svd = sde_edid_find_block(edid_ctrl->edid, VIDEO_DATA_BLOCK);
+
+ if (svd) {
+ /*moving to the next byte as vic info begins there*/
+ ++svd;
+ svd_len = svd[0] & 0x1f;
+ }
+
+ for (i = 0; i < svd_len; i++, j++) {
+ video_format = *svd & 0x7F;
+ if (db[offset] & (1 << j))
+ sde_edid_set_y420_support(connector, video_format);
+
+ if (j & 0x80) {
+ j = j/8;
+ offset++;
+ if (offset >= len)
+ break;
+ }
+ }
+
+ SDE_EDID_DEBUG("%s -\n", __func__);
+
+}
+
+static void sde_edid_parse_Y420VDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+ u8 len = db[0] & 0x1f;
+ u32 i = 0;
+ u32 video_format = 0;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ SDE_EDID_DEBUG("%s +\n", __func__);
+
+ /* Offset to byte 3 */
+ db += 2;
+ for (i = 0; i < len - 1; i++) {
+ video_format = *(db + i) & 0x7F;
+ /*
+ * mode was already added in get_modes()
+ * only need to set the Y420 support flag
+ */
+ sde_edid_set_y420_support(connector, video_format);
+ }
+ SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_mode_format(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
+{
+ const u8 *db = NULL;
+ struct drm_display_mode *mode;
+
+ SDE_EDID_DEBUG("%s +\n", __func__);
+ /* Set YUV mode support flags for YCbcr420VDB */
+ db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+ Y420_VIDEO_DATA_BLOCK);
+ if (db)
+ sde_edid_parse_Y420VDB(connector, edid_ctrl, db);
+ else
+ SDE_EDID_DEBUG("YCbCr420 VDB is not present\n");
+
+ /* Set RGB supported on all modes where YUV is not set */
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (!(mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV))
+ mode->flags |= DRM_MODE_FLAG_SUPPORTS_RGB;
+ }
+
+
+ db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+ Y420_CAPABILITY_MAP_DATA_BLOCK);
+ if (db)
+ sde_edid_parse_Y420CMDB(connector, edid_ctrl, db);
+ else
+ SDE_EDID_DEBUG("YCbCr420 CMDB is not present\n");
+
+ SDE_EDID_DEBUG("%s -\n", __func__);
+}
+
+static void _sde_edid_extract_audio_data_blocks(
+ struct sde_edid_ctrl *edid_ctrl)
+{
+ u8 len = 0;
+ u8 adb_max = 0;
+ const u8 *adb = NULL;
+ u32 offset = DBC_START_OFFSET;
+ u8 *cea = NULL;
+
+ if (!edid_ctrl) {
+ SDE_ERROR("invalid edid_ctrl\n");
+ return;
+ }
+ SDE_EDID_DEBUG("%s +", __func__);
+ cea = sde_find_cea_extension(edid_ctrl->edid);
+ if (!cea) {
+ SDE_DEBUG("CEA extension not found\n");
+ return;
+ }
+
+ edid_ctrl->adb_size = 0;
+
+ memset(edid_ctrl->audio_data_block, 0,
+ sizeof(edid_ctrl->audio_data_block));
+
+ do {
+ len = 0;
+ adb = _sde_edid_find_block(cea, offset, AUDIO_DATA_BLOCK,
+ &len);
+
+ if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
+ adb_max >= MAX_NUMBER_ADB)) {
+ if (!edid_ctrl->adb_size) {
+ SDE_DEBUG("No/Invalid Audio Data Block\n");
+ return;
+ }
+
+ continue;
+ }
+
+ memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
+ adb + 1, len);
+ offset = (adb - cea) + 1 + len;
+
+ edid_ctrl->adb_size += len;
+ adb_max++;
+ } while (adb);
+ SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void _sde_edid_extract_speaker_allocation_data(
+ struct sde_edid_ctrl *edid_ctrl)
+{
+ u8 len;
+ const u8 *sadb = NULL;
+ u8 *cea = NULL;
+
+ if (!edid_ctrl) {
+ SDE_ERROR("invalid edid_ctrl\n");
+ return;
+ }
+ SDE_EDID_DEBUG("%s +", __func__);
+ cea = sde_find_cea_extension(edid_ctrl->edid);
+ if (!cea) {
+ SDE_DEBUG("CEA extension not found\n");
+ return;
+ }
+
+ sadb = _sde_edid_find_block(cea, DBC_START_OFFSET,
+ SPEAKER_ALLOCATION_DATA_BLOCK, &len);
+ if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
+ SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n");
+ return;
+ }
+
+ memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
+ edid_ctrl->sadb_size = len;
+
+ SDE_EDID_DEBUG("speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
+ sadb[1],
+ (sadb[1] & BIT(0)) ? "FL/FR," : "",
+ (sadb[1] & BIT(1)) ? "LFE," : "",
+ (sadb[1] & BIT(2)) ? "FC," : "",
+ (sadb[1] & BIT(3)) ? "RL/RR," : "",
+ (sadb[1] & BIT(4)) ? "RC," : "",
+ (sadb[1] & BIT(5)) ? "FLC/FRC," : "",
+ (sadb[1] & BIT(6)) ? "RLC/RRC," : "");
+ SDE_EDID_DEBUG("%s -", __func__);
+}
+
+struct sde_edid_ctrl *sde_edid_init(void)
+{
+ struct sde_edid_ctrl *edid_ctrl = NULL;
+
+ SDE_EDID_DEBUG("%s +\n", __func__);
+ edid_ctrl = kzalloc(sizeof(*edid_ctrl), GFP_KERNEL);
+ if (!edid_ctrl) {
+ SDE_ERROR("edid_ctrl alloc failed\n");
+ return NULL;
+ }
+ memset((edid_ctrl), 0, sizeof(*edid_ctrl));
+ SDE_EDID_DEBUG("%s -\n", __func__);
+ return edid_ctrl;
+}
+
+void sde_free_edid(void **input)
+{
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+ SDE_EDID_DEBUG("%s +", __func__);
+ kfree(edid_ctrl->edid);
+ edid_ctrl->edid = NULL;
+}
+
+void sde_edid_deinit(void **input)
+{
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+ SDE_EDID_DEBUG("%s +", __func__);
+ sde_free_edid((void *)&edid_ctrl);
+ kfree(edid_ctrl);
+ SDE_EDID_DEBUG("%s -", __func__);
+}
+
+int _sde_edid_update_modes(struct drm_connector *connector,
+ void *input)
+{
+ int rc = 0;
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+
+ SDE_EDID_DEBUG("%s +", __func__);
+ if (edid_ctrl->edid) {
+ drm_mode_connector_update_edid_property(connector,
+ edid_ctrl->edid);
+
+ rc = drm_add_edid_modes(connector, edid_ctrl->edid);
+ sde_edid_set_mode_format(connector, edid_ctrl);
+ SDE_EDID_DEBUG("%s -", __func__);
+ return rc;
+ }
+
+ drm_mode_connector_update_edid_property(connector, NULL);
+ SDE_EDID_DEBUG("%s null edid -", __func__);
+ return rc;
+}
+
+bool sde_detect_hdmi_monitor(void *input)
+{
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+
+ return drm_detect_hdmi_monitor(edid_ctrl->edid);
+}
+
+void sde_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter, void **input)
+{
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+ edid_ctrl->edid = drm_get_edid(connector, adapter);
+ SDE_EDID_DEBUG("%s +\n", __func__);
+
+ if (!edid_ctrl->edid)
+ SDE_ERROR("EDID read failed\n");
+
+ if (edid_ctrl->edid) {
+ sde_edid_extract_vendor_id(edid_ctrl);
+ _sde_edid_extract_audio_data_blocks(edid_ctrl);
+ _sde_edid_extract_speaker_allocation_data(edid_ctrl);
+ }
+ SDE_EDID_DEBUG("%s -\n", __func__);
+};
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
new file mode 100644
index 000000000000..1143dc2c7bec
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_EDID_PARSER_H_
+#define _SDE_EDID_PARSER_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+
+#define MAX_NUMBER_ADB 5
+#define MAX_AUDIO_DATA_BLOCK_SIZE 30
+#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3
+#define EDID_VENDOR_ID_SIZE 4
+
+#define SDE_CEA_EXT 0x02
+#define SDE_EXTENDED_TAG 0x07
+
+enum extended_data_block_types {
+ VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
+ VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
+ HDMI_VIDEO_DATA_BLOCK = 0x04,
+ HDR_STATIC_METADATA_DATA_BLOCK = 0x06,
+ Y420_VIDEO_DATA_BLOCK = 0x0E,
+ VIDEO_FORMAT_PREFERENCE_DATA_BLOCK = 0x0D,
+ Y420_CAPABILITY_MAP_DATA_BLOCK = 0x0F,
+ VENDOR_SPECIFIC_AUDIO_DATA_BLOCK = 0x11,
+ INFOFRAME_DATA_BLOCK = 0x20,
+};
+
+#ifdef SDE_EDID_DEBUG_ENABLE
+#define SDE_EDID_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args)
+#else
+#define SDE_EDID_DEBUG(fmt, args...) SDE_DEBUG(fmt, ##args)
+#endif
+
+/*
+ * struct hdmi_edid_hdr_data - HDR Static Metadata
+ * @eotf: Electro-Optical Transfer Function
+ * @metadata_type_one: Static Metadata Type 1 support
+ * @max_luminance: Desired Content Maximum Luminance
+ * @avg_luminance: Desired Content Frame-average Luminance
+ * @min_luminance: Desired Content Minimum Luminance
+ */
+struct sde_edid_hdr_data {
+ u32 eotf;
+ bool metadata_type_one;
+ u32 max_luminance;
+ u32 avg_luminance;
+ u32 min_luminance;
+};
+
+struct sde_edid_sink_caps {
+ u32 max_pclk_in_hz;
+ bool scdc_present;
+ bool scramble_support; /* scramble support for less than 340Mcsc */
+ bool read_req_support;
+ bool osd_disparity;
+ bool dual_view_support;
+ bool ind_view_support;
+};
+
+struct sde_edid_ctrl {
+ struct edid *edid;
+ u8 pt_scan_info;
+ u8 it_scan_info;
+ u8 ce_scan_info;
+ u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
+ int adb_size;
+ u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
+ int sadb_size;
+ bool hdr_supported;
+ char vendor_id[EDID_VENDOR_ID_SIZE];
+ struct sde_edid_sink_caps sink_caps;
+ struct sde_edid_hdr_data hdr_data;
+};
+
+/**
+ * sde_edid_init() - init edid structure.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ * Return: handle to sde_edid_ctrl for the client.
+ */
+struct sde_edid_ctrl *sde_edid_init(void);
+
+/**
+ * sde_edid_deinit() - deinit edid structure.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_edid_deinit(void **edid_ctrl);
+
+/**
+ * sde_get_edid() - get edid info.
+ * @connector: Handle to the drm_connector.
+ * @adapter: handle to i2c adapter for DDC read
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_get_edid(struct drm_connector *connector,
+struct i2c_adapter *adapter,
+void **edid_ctrl);
+
+/**
+ * sde_free_edid() - free edid structure.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_free_edid(void **edid_ctrl);
+
+/**
+ * sde_detect_hdmi_monitor() - detect HDMI mode.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+bool sde_detect_hdmi_monitor(void *edid_ctrl);
+
+/**
+ * _sde_edid_update_modes() - populate EDID modes.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+int _sde_edid_update_modes(struct drm_connector *connector,
+ void *edid_ctrl);
+
+#endif /* _SDE_EDID_PARSER_H_ */
+
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index ece9f4102c0e..7f8acb3ebfcd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -714,7 +714,7 @@ nv4a_chipset = {
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
.mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
+ .mmu = nv04_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
.timer = nv41_timer_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index d4d8942b1347..e55f8302d08a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
}
if (type == 0x00000010) {
- if (!nv31_mpeg_mthd(mpeg, mthd, data))
+ if (nv31_mpeg_mthd(mpeg, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index d433cfa4a8ab..36af0a8927fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
}
if (type == 0x00000010) {
- if (!nv44_mpeg_mthd(subdev->device, mthd, data))
+ if (nv44_mpeg_mthd(subdev->device, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 949dc6101a58..7c0b58613747 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -130,7 +130,7 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
poll = false;
}
- if (list_empty(&therm->alarm.head) && poll)
+ if (poll)
nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
spin_unlock_irqrestore(&therm->lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 91198d79393a..e2feccec25f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -83,7 +83,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
spin_unlock_irqrestore(&fan->lock, flags);
/* schedule next fan update, if not at target speed already */
- if (list_empty(&fan->alarm.head) && target != duty) {
+ if (target != duty) {
u16 bump_period = fan->bios.bump_period;
u16 slow_down_period = fan->bios.slow_down_period;
u64 delay;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index 59701b7a6597..ff9fbe7950e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -53,7 +53,7 @@ nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
- if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
+ if (percent != (duty * 100)) {
u64 next_change = (percent * fan->period_us) / 100;
if (!duty)
next_change = fan->period_us - next_change;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index b9703c02d8ca..9a79e91fdfdc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -185,7 +185,7 @@ alarm_timer_callback(struct nvkm_alarm *alarm)
spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
/* schedule the next poll in one second */
- if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
+ if (therm->func->temp_get(therm) >= 0)
nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index d4dae1f12d62..79fcdb43e174 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -36,23 +36,29 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
unsigned long flags;
LIST_HEAD(exec);
- /* move any due alarms off the pending list */
+ /* Process pending alarms. */
spin_lock_irqsave(&tmr->lock, flags);
list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
- if (alarm->timestamp <= nvkm_timer_read(tmr))
- list_move_tail(&alarm->head, &exec);
+ /* Have we hit the earliest alarm that hasn't gone off? */
+ if (alarm->timestamp > nvkm_timer_read(tmr)) {
+ /* Schedule it. If we didn't race, we're done. */
+ tmr->func->alarm_init(tmr, alarm->timestamp);
+ if (alarm->timestamp > nvkm_timer_read(tmr))
+ break;
+ }
+
+ /* Move to completed list. We'll drop the lock before
+ * executing the callback so it can reschedule itself.
+ */
+ list_move_tail(&alarm->head, &exec);
}
- /* reschedule interrupt for next alarm time */
- if (!list_empty(&tmr->alarms)) {
- alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
- tmr->func->alarm_init(tmr, alarm->timestamp);
- } else {
+ /* Shut down interrupt if no more pending alarms. */
+ if (list_empty(&tmr->alarms))
tmr->func->alarm_fini(tmr);
- }
spin_unlock_irqrestore(&tmr->lock, flags);
- /* execute any pending alarm handlers */
+ /* Execute completed callbacks. */
list_for_each_entry_safe(alarm, atemp, &exec, head) {
list_del_init(&alarm->head);
alarm->func(alarm);
@@ -65,24 +71,37 @@ nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
struct nvkm_alarm *list;
unsigned long flags;
- alarm->timestamp = nvkm_timer_read(tmr) + nsec;
-
- /* append new alarm to list, in soonest-alarm-first order */
+ /* Remove alarm from pending list.
+ *
+ * This both protects against the corruption of the list,
+ * and implements alarm rescheduling/cancellation.
+ */
spin_lock_irqsave(&tmr->lock, flags);
- if (!nsec) {
- if (!list_empty(&alarm->head))
- list_del(&alarm->head);
- } else {
+ list_del_init(&alarm->head);
+
+ if (nsec) {
+ /* Insert into pending list, ordered earliest to latest. */
+ alarm->timestamp = nvkm_timer_read(tmr) + nsec;
list_for_each_entry(list, &tmr->alarms, head) {
if (list->timestamp > alarm->timestamp)
break;
}
+
list_add_tail(&alarm->head, &list->head);
+
+ /* Update HW if this is now the earliest alarm. */
+ list = list_first_entry(&tmr->alarms, typeof(*list), head);
+ if (list == alarm) {
+ tmr->func->alarm_init(tmr, alarm->timestamp);
+ /* This shouldn't happen if callers aren't stupid.
+ *
+ * Worst case scenario is that it'll take roughly
+ * 4 seconds for the next alarm to trigger.
+ */
+ WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
+ }
}
spin_unlock_irqrestore(&tmr->lock, flags);
-
- /* process pending alarms */
- nvkm_timer_alarm_trigger(tmr);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 7b9ce87f0617..7f48249f41de 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -76,8 +76,8 @@ nv04_timer_intr(struct nvkm_timer *tmr)
u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
if (stat & 0x00000001) {
- nvkm_timer_alarm_trigger(tmr);
nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
+ nvkm_timer_alarm_trigger(tmr);
stat &= ~0x00000001;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 35310336dd0a..d684e2b79d2b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -213,8 +213,8 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo->placement.num_busy_placement = 0;
for (i = 0; i < rbo->placement.num_placement; i++) {
if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
- if (rbo->placements[0].fpfn < fpfn)
- rbo->placements[0].fpfn = fpfn;
+ if (rbo->placements[i].fpfn < fpfn)
+ rbo->placements[i].fpfn = fpfn;
} else {
rbo->placement.busy_placement =
&rbo->placements[i];
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 8fb7213277cc..b75391495778 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -66,8 +66,11 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_unlock;
+ ttm_bo_reference(bo);
up_read(&vma->vm_mm->mmap_sem);
(void) ttm_bo_wait(bo, false, true, false);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
goto out_unlock;
}
@@ -114,8 +117,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ ttm_bo_reference(bo);
up_read(&vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
+ ttm_bo_unref(&bo);
}
return VM_FAULT_RETRY;
@@ -160,6 +165,13 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
if (unlikely(ret != 0)) {
retval = ret;
+
+ if (retval == VM_FAULT_RETRY &&
+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /* The BO has already been unreserved. */
+ return retval;
+ }
+
goto out_unlock;
}
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 4f5fa8d65fe9..144367c0c28f 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
if (unlikely(ret != 0))
goto out_err0;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed)
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
}
rcu_read_unlock();
+ if (require_existed)
+ return -EPERM;
+
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
@@ -635,7 +639,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->hash.key;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 8e689b439890..6c649f7b5929 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -539,7 +539,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(fence == NULL))
@@ -702,6 +702,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
}
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+ if (!base) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (base->refcount_release != vmw_user_fence_base_release) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ ttm_base_object_unref(&base);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return base;
+}
+
+
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -727,13 +762,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
arg->kernel_cookie = jiffies + wait_timeout;
}
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Wait invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
@@ -772,13 +803,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Fence signaled invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fman_from_fence(fence);
@@ -1093,6 +1120,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_event_arg *) data;
struct vmw_fence_obj *fence = NULL;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct ttm_object_file *tfile = vmw_fp->tfile;
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
@@ -1106,24 +1134,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup_for_ref(dev_priv->tdev,
- arg->handle);
-
- if (unlikely(base == NULL)) {
- DRM_ERROR("Fence event invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ vmw_fence_obj_lookup(tfile, arg->handle);
+
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
fence = &(container_of(base, struct vmw_user_fence,
base)->fence);
(void) vmw_fence_obj_reference(fence);
if (user_fence_rep != NULL) {
- bool existed;
-
ret = ttm_ref_object_add(vmw_fp->tfile, base,
- TTM_REF_USAGE, &existed);
+ TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
@@ -1166,8 +1188,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
return 0;
out_no_create:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b8c6a03c8c54..5ec24fd801cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = dev_priv->has_dx;
break;
default:
- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
- param->param);
return -EINVAL;
}
@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- if (unlikely(arg->pad64 != 0)) {
+ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e57667ca7557..dbca128a9aa6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -591,7 +591,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed);
+ TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->dma.base);
@@ -775,7 +775,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
*handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL);
+ TTM_REF_USAGE, NULL, false);
}
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7d620e82e000..c9c04ccccdd9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -715,11 +715,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
128;
num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
num_sizes += req->mip_levels[i];
+ }
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+ num_sizes == 0)
return -EINVAL;
size = vmw_user_surface_size + 128 +
@@ -904,17 +907,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
uint32_t handle;
struct ttm_base_object *base;
int ret;
+ bool require_exist = false;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
- if (unlikely(drm_is_render_client(file_priv))) {
- DRM_ERROR("Render client refused legacy "
- "surface reference.\n");
- return -EACCES;
- }
+ if (unlikely(drm_is_render_client(file_priv)))
+ require_exist = true;
+
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
@@ -942,17 +944,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
/*
* Make sure the surface creator has the same
- * authenticating master.
+ * authenticating master, or is already registered with us.
*/
if (drm_is_primary_client(file_priv) &&
- user_srf->master != file_priv->master) {
- DRM_ERROR("Trying to reference surface outside of"
- " master domain.\n");
- ret = -EACCES;
- goto out_bad_resource;
- }
+ user_srf->master != file_priv->master)
+ require_exist = true;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
+ require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 0715022be6e3..dcc6651710fe 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -59,7 +59,7 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
{ adreno_is_a530, a530_vbif },
{ adreno_is_a512, a540_vbif },
{ adreno_is_a510, a530_vbif },
- { adreno_is_a508, a530_vbif },
+ { adreno_is_a508, a540_vbif },
{ adreno_is_a505, a530_vbif },
{ adreno_is_a506, a530_vbif },
};
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index f084ca9a62a1..55f906c9cb90 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1041,6 +1041,13 @@ static void _set_ft_policy(struct adreno_device *adreno_dev,
*/
if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
set_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy);
+ /*
+ * Set the fault tolerance policy to FT_REPLAY - As context wants
+ * to invalidate it after a replay attempt fails. This doesn't
+ * require to execute the default FT policy.
+ */
+ else if (drawctxt->base.flags & KGSL_CONTEXT_INVALIDATE_ON_FAULT)
+ set_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy);
else
cmdobj->fault_policy = adreno_dev->ft_policy;
}
@@ -2083,7 +2090,12 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
/* Turn off all the timers */
del_timer_sync(&dispatcher->timer);
del_timer_sync(&dispatcher->fault_timer);
- del_timer_sync(&adreno_dev->preempt.timer);
+ /*
+ * Deleting uninitialized timer will block for ever on kernel debug
+ * disable build. Hence skip del timer if it is not initialized.
+ */
+ if (adreno_is_preemption_enabled(adreno_dev))
+ del_timer_sync(&adreno_dev->preempt.timer);
mutex_lock(&device->mutex);
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 21a6399ba38e..b8ae24bc3935 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -346,6 +346,7 @@ adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
KGSL_CONTEXT_PER_CONTEXT_TS |
KGSL_CONTEXT_USER_GENERATED_TS |
KGSL_CONTEXT_NO_FAULT_TOLERANCE |
+ KGSL_CONTEXT_INVALIDATE_ON_FAULT |
KGSL_CONTEXT_CTX_SWITCH |
KGSL_CONTEXT_PRIORITY_MASK |
KGSL_CONTEXT_TYPE_MASK |
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 1de8e212a703..b2def8dea954 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -255,14 +255,25 @@ static void _deferred_put(struct work_struct *work)
kgsl_mem_entry_put(entry);
}
+static inline void
+kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
+{
+ if (entry)
+ queue_work(kgsl_driver.mem_workqueue, &entry->work);
+}
+
static inline struct kgsl_mem_entry *
kgsl_mem_entry_create(void)
{
struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (entry != NULL)
+ if (entry != NULL) {
kref_init(&entry->refcount);
+ /* put this ref in the caller functions after init */
+ kref_get(&entry->refcount);
+ INIT_WORK(&entry->work, _deferred_put);
+ }
return entry;
}
#ifdef CONFIG_DMA_SHARED_BUFFER
@@ -1227,7 +1238,8 @@ kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr)
spin_lock(&private->mem_lock);
idr_for_each_entry(&private->mem_idr, entry, id) {
if (GPUADDR_IN_MEMDESC(gpuaddr, &entry->memdesc)) {
- ret = kgsl_mem_entry_get(entry);
+ if (!entry->pending_free)
+ ret = kgsl_mem_entry_get(entry);
break;
}
}
@@ -1764,9 +1776,9 @@ long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
/* Commit the pointer to the context in context_idr */
write_lock(&device->context_lock);
idr_replace(&device->context_idr, context, context->id);
+ param->drawctxt_id = context->id;
write_unlock(&device->context_lock);
- param->drawctxt_id = context->id;
done:
return result;
}
@@ -1857,7 +1869,7 @@ long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv,
return -EINVAL;
ret = gpumem_free_entry(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return ret;
}
@@ -1875,7 +1887,7 @@ long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv,
return -EINVAL;
ret = gpumem_free_entry(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return ret;
}
@@ -1912,8 +1924,7 @@ static void gpuobj_free_fence_func(void *priv)
{
struct kgsl_mem_entry *entry = priv;
- INIT_WORK(&entry->work, _deferred_put);
- queue_work(kgsl_driver.mem_workqueue, &entry->work);
+ kgsl_mem_entry_put_deferred(entry);
}
static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
@@ -1977,7 +1988,7 @@ long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv,
else
ret = -EINVAL;
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return ret;
}
@@ -2399,6 +2410,9 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
trace_kgsl_mem_map(entry, fd);
kgsl_mem_entry_commit_process(entry);
+
+ /* put the extra refcount for kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
return 0;
unmap:
@@ -2705,6 +2719,9 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
trace_kgsl_mem_map(entry, param->fd);
kgsl_mem_entry_commit_process(entry);
+
+ /* put the extra refcount for kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
return result;
error_attach:
@@ -3143,6 +3160,9 @@ long kgsl_ioctl_gpuobj_alloc(struct kgsl_device_private *dev_priv,
param->mmapsize = kgsl_memdesc_footprint(&entry->memdesc);
param->id = entry->id;
+ /* put the extra refcount for kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return 0;
}
@@ -3166,6 +3186,9 @@ long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
param->size = (size_t) entry->memdesc.size;
param->flags = (unsigned int) entry->memdesc.flags;
+ /* put the extra refcount for kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return 0;
}
@@ -3189,6 +3212,9 @@ long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc);
param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
+ /* put the extra refcount for kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return 0;
}
@@ -3306,6 +3332,9 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv,
trace_sparse_phys_alloc(entry->id, param->size, param->pagesize);
kgsl_mem_entry_commit_process(entry);
+ /* put the extra refcount for kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return 0;
err_invalid_pages:
@@ -3334,7 +3363,13 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
if (entry == NULL)
return -EINVAL;
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
+
if (entry->memdesc.cur_bindings != 0) {
+ kgsl_mem_entry_unset_pend(entry);
kgsl_mem_entry_put(entry);
return -EINVAL;
}
@@ -3343,7 +3378,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv,
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return 0;
}
@@ -3385,6 +3420,9 @@ long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv,
trace_sparse_virt_alloc(entry->id, param->size, param->pagesize);
kgsl_mem_entry_commit_process(entry);
+ /* put the extra refcount for kgsl_mem_entry_create() */
+ kgsl_mem_entry_put(entry);
+
return 0;
}
@@ -3400,7 +3438,13 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
if (entry == NULL)
return -EINVAL;
+ if (!kgsl_mem_entry_set_pend(entry)) {
+ kgsl_mem_entry_put(entry);
+ return -EBUSY;
+ }
+
if (entry->bind_tree.rb_node != NULL) {
+ kgsl_mem_entry_unset_pend(entry);
kgsl_mem_entry_put(entry);
return -EINVAL;
}
@@ -3409,7 +3453,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
/* One put for find_id(), one put for the kgsl_mem_entry_create() */
kgsl_mem_entry_put(entry);
- kgsl_mem_entry_put(entry);
+ kgsl_mem_entry_put_deferred(entry);
return 0;
}
@@ -4882,7 +4926,7 @@ static int __init kgsl_core_init(void)
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM, 0);
kgsl_events_init();
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 177b283a2dda..d93fd9bfbcd0 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -77,6 +77,7 @@ enum kgsl_event_results {
{ KGSL_CONTEXT_PER_CONTEXT_TS, "PER_CONTEXT_TS" }, \
{ KGSL_CONTEXT_USER_GENERATED_TS, "USER_TS" }, \
{ KGSL_CONTEXT_NO_FAULT_TOLERANCE, "NO_FT" }, \
+ { KGSL_CONTEXT_INVALIDATE_ON_FAULT, "INVALIDATE_ON_FAULT" }, \
{ KGSL_CONTEXT_PWR_CONSTRAINT, "PWR" }, \
{ KGSL_CONTEXT_SAVE_GMEM, "SAVE_GMEM" }
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index af9fc1c15236..57d99c451952 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1547,6 +1547,8 @@ static int _setup_user_context(struct kgsl_mmu *mmu)
ret = PTR_ERR(mmu->defaultpagetable);
mmu->defaultpagetable = NULL;
return ret;
+ } else if (mmu->defaultpagetable == NULL) {
+ return -ENOMEM;
}
}
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 6e2a0e3f2645..7f4a5a3b251f 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -560,16 +560,73 @@ static inline unsigned int _fixup_cache_range_op(unsigned int op)
}
#endif
-int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
- uint64_t size, unsigned int op)
+static int kgsl_do_cache_op(struct page *page, void *addr,
+ uint64_t offset, uint64_t size, unsigned int op)
{
+ void (*cache_op)(const void *, const void *);
+
/*
- * If the buffer is mapped in the kernel operate on that address
- * otherwise use the user address
+ * The dmac_xxx_range functions handle addresses and sizes that
+ * are not aligned to the cacheline size correctly.
*/
+ switch (_fixup_cache_range_op(op)) {
+ case KGSL_CACHE_OP_FLUSH:
+ cache_op = dmac_flush_range;
+ break;
+ case KGSL_CACHE_OP_CLEAN:
+ cache_op = dmac_clean_range;
+ break;
+ case KGSL_CACHE_OP_INV:
+ cache_op = dmac_inv_range;
+ break;
+ default:
+ return -EINVAL;
+ }
- void *addr = (memdesc->hostptr) ?
- memdesc->hostptr : (void *) memdesc->useraddr;
+ if (page != NULL) {
+ unsigned long pfn = page_to_pfn(page) + offset / PAGE_SIZE;
+ /*
+ * page_address() returns the kernel virtual address of page.
+ * For high memory kernel virtual address exists only if page
+ * has been mapped. So use a version of kmap rather than
+ * page_address() for high memory.
+ */
+ if (PageHighMem(page)) {
+ offset &= ~PAGE_MASK;
+
+ do {
+ unsigned int len = size;
+
+ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
+ page = pfn_to_page(pfn++);
+ addr = kmap_atomic(page);
+ cache_op(addr + offset, addr + offset + len);
+ kunmap_atomic(addr);
+
+ size -= len;
+ offset = 0;
+ } while (size);
+
+ return 0;
+ }
+
+ addr = page_address(page);
+ }
+
+ cache_op(addr + offset, addr + offset + (size_t) size);
+ return 0;
+}
+
+int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
+ uint64_t size, unsigned int op)
+{
+ void *addr = NULL;
+ struct sg_table *sgt = NULL;
+ struct scatterlist *sg;
+ unsigned int i, pos = 0;
+ int ret = 0;
if (size == 0 || size > UINT_MAX)
return -EINVAL;
@@ -578,38 +635,57 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
if ((offset + size < offset) || (offset + size < size))
return -ERANGE;
- /* Make sure the offset + size do not overflow the address */
- if (addr + ((size_t) offset + (size_t) size) < addr)
- return -ERANGE;
-
/* Check that offset+length does not exceed memdesc->size */
if (offset + size > memdesc->size)
return -ERANGE;
- /* Return quietly if the buffer isn't mapped on the CPU */
- if (addr == NULL)
- return 0;
+ if (memdesc->hostptr) {
+ addr = memdesc->hostptr;
+ /* Make sure the offset + size do not overflow the address */
+ if (addr + ((size_t) offset + (size_t) size) < addr)
+ return -ERANGE;
- addr = addr + offset;
+ ret = kgsl_do_cache_op(NULL, addr, offset, size, op);
+ return ret;
+ }
/*
- * The dmac_xxx_range functions handle addresses and sizes that
- * are not aligned to the cacheline size correctly.
+ * If the buffer is not to mapped to kernel, perform cache
+ * operations after mapping to kernel.
*/
+ if (memdesc->sgt != NULL)
+ sgt = memdesc->sgt;
+ else {
+ if (memdesc->pages == NULL)
+ return ret;
+
+ sgt = kgsl_alloc_sgt_from_pages(memdesc);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+ }
- switch (_fixup_cache_range_op(op)) {
- case KGSL_CACHE_OP_FLUSH:
- dmac_flush_range(addr, addr + (size_t) size);
- break;
- case KGSL_CACHE_OP_CLEAN:
- dmac_clean_range(addr, addr + (size_t) size);
- break;
- case KGSL_CACHE_OP_INV:
- dmac_inv_range(addr, addr + (size_t) size);
- break;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ uint64_t sg_offset, sg_left;
+
+ if (offset >= (pos + sg->length)) {
+ pos += sg->length;
+ continue;
+ }
+ sg_offset = offset > pos ? offset - pos : 0;
+ sg_left = (sg->length - sg_offset > size) ? size :
+ sg->length - sg_offset;
+ ret = kgsl_do_cache_op(sg_page(sg), NULL, sg_offset,
+ sg_left, op);
+ size -= sg_left;
+ if (size == 0)
+ break;
+ pos += sg->length;
}
- return 0;
+ if (memdesc->sgt == NULL)
+ kgsl_free_sgt(sgt);
+
+ return ret;
}
EXPORT_SYMBOL(kgsl_cache_range_op);