summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorLloyd Atkinson <latkinso@codeaurora.org>2016-05-30 14:37:06 -0400
committerDhaval Patel <pdhaval@codeaurora.org>2016-08-01 12:35:45 -0700
commit10ebad22a54ad9e183acacda194d7fd036ea0ace (patch)
tree4200c924e4eb179344d395603e5c77ac573d06eb /drivers/gpu
parent034f48db2c7b129d15fbd05edc24759a31425fa3 (diff)
drm/msm/sde: command mode panel encoder support
Add support for command mode panel physical encoders. Change-Id: I82868a1aa3d5adc8ce41670840c8a3ef8b1d1bd6 Signed-off-by: Lloyd Atkinson <latkinso@codeaurora.org>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c20
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h53
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c513
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c6
4 files changed, 581 insertions, 11 deletions
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 41010d28a706..53a001615596 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -512,6 +512,7 @@ static int sde_encoder_virt_add_phys_encs(
struct sde_encoder_virt *sde_enc,
struct sde_kms *sde_kms,
enum sde_intf intf_idx,
+ enum sde_pingpong pp_idx,
enum sde_ctl ctl_idx,
enum sde_enc_split_role split_role)
{
@@ -548,6 +549,21 @@ static int sde_encoder_virt_add_phys_encs(
++sde_enc->num_phys_encs;
}
+ if (intf_mode & DISPLAY_INTF_MODE_CMD) {
+ enc = sde_encoder_phys_cmd_init(sde_kms, intf_idx, pp_idx,
+ ctl_idx, split_role, &sde_enc->base,
+ parent_ops);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DRM_ERROR("Failed to initialize phys cmd enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+ ++sde_enc->num_phys_encs;
+ }
+
return 0;
}
@@ -585,6 +601,7 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
*/
const struct sde_hw_res_map *hw_res_map = NULL;
enum sde_intf intf_idx = INTF_MAX;
+ enum sde_pingpong pp_idx = PINGPONG_MAX;
enum sde_ctl ctl_idx = CTL_MAX;
u32 controller_id = disp_info->h_tile_instance[i];
enum sde_enc_split_role split_role = ENC_ROLE_SOLO;
@@ -610,13 +627,14 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
if (IS_ERR_OR_NULL(hw_res_map)) {
ret = -EINVAL;
} else {
+ pp_idx = hw_res_map->pp;
ctl_idx = hw_res_map->ctl;
}
if (!ret) {
ret = sde_encoder_virt_add_phys_encs(
disp_info->intf_mode,
- sde_enc, sde_kms, intf_idx,
+ sde_enc, sde_kms, intf_idx, pp_idx,
ctl_idx, split_role);
if (ret)
DRM_ERROR("Failed to add phys encs\n");
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 64563817d162..cc9fd9f06095 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -17,6 +17,7 @@
#include "sde_kms.h"
#include "sde_hw_intf.h"
+#include "sde_hw_pingpong.h"
#include "sde_hw_ctl.h"
#include "sde_hw_top.h"
@@ -155,6 +156,35 @@ struct sde_encoder_phys_vid {
};
/**
+ * struct sde_encoder_phys_cmd - sub-class of sde_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @intf_idx: Intf Block index used by this phys encoder
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @hw_pp: Hardware interface to the ping pong registers
+ * @pp_rd_ptr_irq_idx: IRQ signifying panel's frame read pointer
+ * For CMD encoders, VBLANK is driven by the PP RD Done IRQ
+ * @pp_tx_done_irq_idx: IRQ signifying frame transmission to panel complete
+ * @pp_tx_done_wq: Wait queue that tracks when a commit is flushed
+ * to hardware after the reception of pp_done
+ * Used to prevent back to back commits
+ * @pending_cnt: Atomic counter tracking the number of kickoffs vs.
+ * the number of pp_done irqs. Should hover between 0-2
+ * Incremented when a new kickoff is scheduled
+ * Decremented in pp_done irq
+ */
+struct sde_encoder_phys_cmd {
+ struct sde_encoder_phys base;
+ int intf_idx;
+ int stream_sel;
+ struct sde_hw_pingpong *hw_pp;
+ int pp_rd_ptr_irq_idx;
+ int pp_tx_done_irq_idx;
+ wait_queue_head_t pp_tx_done_wq;
+ atomic_t pending_cnt;
+};
+
+/**
* sde_encoder_phys_vid_init - Construct a new video mode physical encoder
* @sde_kms: Pointer to the sde_kms top level
* @intf_idx: Interface index this phys_enc will control
@@ -162,7 +192,6 @@ struct sde_encoder_phys_vid {
* @split_role: Role to play in a split-panel configuration
* @parent: Pointer to the containing virtual encoder
* @parent_ops: Callbacks exposed by the parent to the phys_enc
- *
* Return: Error code or newly allocated encoder
*/
struct sde_encoder_phys *sde_encoder_phys_vid_init(
@@ -173,4 +202,26 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init(
struct drm_encoder *parent,
struct sde_encoder_virt_ops parent_ops);
+/**
+ * sde_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @sde_kms: Pointer to the sde_kms top level
+ * @intf_idx: Interface index this phys_enc will control
+ * @pp_idx: PingPong index this phys_enc will control
+ * @ctl_idx: Control index this phys_enc requires
+ * @split_role: Role to play in a split-panel configuration
+ * @parent: Pointer to the containing virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * Return: Error code or newly allocated encoder
+ */
+struct sde_encoder_phys *sde_encoder_phys_cmd_init(
+ struct sde_kms *sde_kms,
+ enum sde_intf intf_idx,
+ enum sde_pingpong pp_idx,
+ enum sde_ctl ctl_idx,
+ enum sde_enc_split_role split_role,
+ struct drm_encoder *parent,
+ struct sde_encoder_virt_ops parent_ops);
+
+
+
#endif /* __sde_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 631b33686a0c..aa75d7782b06 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,14 +12,509 @@
*
*/
-#include "msm_drv.h"
-#include "sde_kms.h"
-#include "drm_crtc.h"
-#include "drm_crtc_helper.h"
+#include <linux/jiffies.h>
-#include "sde_hwio.h"
-#include "sde_hw_catalog.h"
-#include "sde_hw_intf.h"
+#include "sde_encoder_phys.h"
+#include "sde_hw_interrupts.h"
#include "sde_formats.h"
-#include "sde_encoder_phys.h"
+#define to_sde_encoder_phys_cmd(x) \
+ container_of(x, struct sde_encoder_phys_cmd, base)
+
+#define DEV(phy_enc) (phy_enc->parent->dev)
+
+#define WAIT_TIMEOUT_MSEC 100
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+
+static inline bool sde_encoder_phys_cmd_is_master(
+ struct sde_encoder_phys *phys_enc)
+{
+ return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool sde_encoder_phys_cmd_mode_fixup(
+ struct sde_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ return true;
+}
+
+static void sde_encoder_phys_cmd_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+
+ phys_enc->cached_mode = *adj_mode;
+ DBG("intf %d, caching mode:", cmd_enc->intf_idx);
+ drm_mode_debug_printmodeline(adj_mode);
+}
+
+static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_cmd *cmd_enc = arg;
+ struct sde_encoder_phys *phys_enc = &cmd_enc->base;
+ int new_pending_cnt;
+
+ new_pending_cnt = atomic_dec_return(&cmd_enc->pending_cnt);
+ MSM_EVT(DEV(phys_enc), cmd_enc->hw_pp->idx, new_pending_cnt);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&cmd_enc->pp_tx_done_wq);
+
+ /* Trigger a pending flush */
+ phys_enc->parent_ops.handle_ready_for_kickoff(phys_enc->parent,
+ phys_enc);
+}
+
+static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_cmd *cmd_enc = arg;
+ struct sde_encoder_phys *phys_enc = &cmd_enc->base;
+
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent);
+}
+
+static int sde_encoder_phys_cmd_register_pp_irq(
+ struct sde_encoder_phys *phys_enc,
+ enum sde_intr_type intr_type,
+ int *irq_idx,
+ void (*irq_func)(void *, int),
+ const char *irq_name)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_irq_callback irq_cb;
+ int ret = 0;
+
+ *irq_idx = sde_irq_idx_lookup(phys_enc->sde_kms, intr_type,
+ cmd_enc->hw_pp->idx);
+ if (*irq_idx < 0) {
+ DRM_ERROR(
+ "Failed to lookup IRQ index for %s with pp=%d",
+ irq_name,
+ cmd_enc->hw_pp->idx);
+ return -EINVAL;
+ }
+
+ irq_cb.func = irq_func;
+ irq_cb.arg = cmd_enc;
+ ret = sde_register_irq_callback(phys_enc->sde_kms, *irq_idx, &irq_cb);
+ if (ret) {
+ DRM_ERROR("Failed to register IRQ callback %s", irq_name);
+ return ret;
+ }
+
+ ret = sde_enable_irq(phys_enc->sde_kms, irq_idx, 1);
+ if (ret) {
+ DRM_ERROR(
+ "Failed to enable IRQ for %s, pp %d, irq_idx=%d",
+ irq_name,
+ cmd_enc->hw_pp->idx,
+ *irq_idx);
+ *irq_idx = -EINVAL;
+
+ /* Unregister callback on IRQ enable failure */
+ sde_register_irq_callback(phys_enc->sde_kms, *irq_idx, NULL);
+ return ret;
+ }
+
+ DBG("registered IRQ %s for pp %d, irq_idx=%d",
+ irq_name,
+ cmd_enc->hw_pp->idx,
+ *irq_idx);
+
+ return ret;
+}
+
+static int sde_encoder_phys_cmd_unregister_pp_irq(
+ struct sde_encoder_phys *phys_enc,
+ int irq_idx)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+
+ sde_disable_irq(phys_enc->sde_kms, &irq_idx, 1);
+ sde_register_irq_callback(phys_enc->sde_kms, irq_idx, NULL);
+
+ DBG("unregister IRQ for pp %d, irq_idx=%d\n",
+ cmd_enc->hw_pp->idx,
+ irq_idx);
+
+ return 0;
+}
+
+static void sde_encoder_phys_cmd_tearcheck_config(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_tear_check tc_cfg = { 0 };
+ struct drm_display_mode *mode = &phys_enc->cached_mode;
+ bool tc_enable = true;
+ u32 vsync_hz;
+
+ DBG("intf %d, pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx);
+
+ if (!cmd_enc->hw_pp->ops.setup_tearcheck ||
+ !cmd_enc->hw_pp->ops.enable_tearcheck) {
+ DBG("tearcheck unsupported");
+ return;
+ }
+
+ /*
+ * TE default: dsi byte clock calculated base on 70 fps;
+ * around 14 ms to complete a kickoff cycle if te disabled;
+ * vclk_line base on 60 fps; write is faster than read;
+ * init == start == rdptr;
+ *
+ * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+ * frequency divided by the no. of rows (lines) in the LCDpanel.
+ */
+ vsync_hz = clk_get_rate(phys_enc->sde_kms->vsync_clk);
+ tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+ tc_cfg.hw_vsync_mode = 1;
+
+ /*
+ * By setting sync_cfg_height to near max register value, we essentially
+ * disable sde hw generated TE signal, since hw TE will arrive first.
+ * Only caveat is if due to error, we hit wrap-around.
+ */
+ tc_cfg.sync_cfg_height = 0xFFF0;
+ tc_cfg.vsync_init_val = mode->vdisplay;
+ tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+ tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+ tc_cfg.start_pos = mode->vdisplay;
+ tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+ DBG("tc %d vsync_clk_speed_hz %u mode->vtotal %u mode->vrefresh %u",
+ cmd_enc->hw_pp->idx, vsync_hz, mode->vtotal, mode->vrefresh);
+ DBG("tc %d enable %u start_pos %u rd_ptr_irq %u",
+ tc_enable, cmd_enc->hw_pp->idx, tc_cfg.start_pos,
+ tc_cfg.rd_ptr_irq);
+ DBG("tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u",
+ cmd_enc->hw_pp->idx, tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
+ tc_cfg.vsync_init_val);
+ DBG("tc %d sync_cfgheight %u sync_thresh_start %u sync_thresh_cont %u",
+ cmd_enc->hw_pp->idx, tc_cfg.sync_cfg_height,
+ tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+ cmd_enc->hw_pp->ops.setup_tearcheck(cmd_enc->hw_pp, &tc_cfg);
+ cmd_enc->hw_pp->ops.enable_tearcheck(cmd_enc->hw_pp, tc_enable);
+}
+
+static void sde_encoder_phys_cmd_pingpong_config(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc->hw_ctl->ops.setup_intf_cfg)
+ return;
+
+ DBG("intf %d pp %d, enabling mode:", cmd_enc->intf_idx,
+ cmd_enc->hw_pp->idx);
+ drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+ intf_cfg.intf = cmd_enc->intf_idx;
+ intf_cfg.mode_3d = phys_enc->mode_3d;
+ intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
+ intf_cfg.stream_sel = cmd_enc->stream_sel;
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+
+ sde_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static void sde_encoder_phys_cmd_split_config(
+ struct sde_encoder_phys *phys_enc, bool enable)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_mdp *hw_mdptop = phys_enc->hw_mdptop;
+ struct split_pipe_cfg cfg = { 0 };
+
+ DBG("enable %d", enable);
+
+ cfg.en = enable;
+ cfg.mode = INTF_MODE_CMD;
+ cfg.intf = cmd_enc->intf_idx;
+ cfg.split_flush_en = enable;
+
+ if (hw_mdptop && hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+}
+
+static int sde_encoder_phys_cmd_control_vblank_irq(
+ struct sde_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ int ret = 0;
+
+ DBG("enable %d", enable);
+
+ /* Slave encoders don't report vblank */
+ if (sde_encoder_phys_cmd_is_master(phys_enc)) {
+ if (enable)
+ ret = sde_encoder_phys_cmd_register_pp_irq(phys_enc,
+ SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+ &cmd_enc->pp_rd_ptr_irq_idx,
+ sde_encoder_phys_cmd_pp_rd_ptr_irq,
+ "pp_rd_ptr");
+ else
+ ret = sde_encoder_phys_cmd_unregister_pp_irq(phys_enc,
+ cmd_enc->pp_rd_ptr_irq_idx);
+ }
+
+ if (ret)
+ DRM_ERROR("control vblank irq error %d, enable %d\n", ret,
+ enable);
+
+ return ret;
+}
+
+static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
+ u32 flush_mask;
+ int ret = 0;
+
+ DBG("intf %d, pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx);
+
+ if (WARN_ON(phys_enc->enable_state == SDE_ENC_ENABLED))
+ return;
+
+ /*
+ * Only master configures master/slave configuration, so no slave check
+ * In solo configuration, solo encoder needs to program no-split
+ */
+ if (phys_enc->split_role == ENC_ROLE_MASTER)
+ sde_encoder_phys_cmd_split_config(phys_enc, true);
+ else if (phys_enc->split_role == ENC_ROLE_SOLO)
+ sde_encoder_phys_cmd_split_config(phys_enc, false);
+
+ sde_encoder_phys_cmd_pingpong_config(phys_enc);
+
+ /* Both master and slave need to register for pp_tx_done */
+ ret = sde_encoder_phys_cmd_register_pp_irq(phys_enc,
+ SDE_IRQ_TYPE_PING_PONG_COMP,
+ &cmd_enc->pp_tx_done_irq_idx,
+ sde_encoder_phys_cmd_pp_tx_done_irq,
+ "pp_tx_done");
+
+ if (ret)
+ return;
+
+ ret = sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+ if (ret) {
+ sde_encoder_phys_cmd_unregister_pp_irq(phys_enc,
+ cmd_enc->pp_tx_done_irq_idx);
+ return;
+ }
+
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, cmd_enc->intf_idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+ phys_enc->enable_state = SDE_ENC_ENABLED;
+
+ DBG("Update pending flush CTL_ID %d flush_mask %x, INTF %d",
+ ctl->idx, flush_mask, cmd_enc->intf_idx);
+}
+
+static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+
+ DBG("intf %d, pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx);
+
+ if (WARN_ON(phys_enc->enable_state == SDE_ENC_DISABLED))
+ return;
+
+ sde_encoder_phys_cmd_unregister_pp_irq(phys_enc,
+ cmd_enc->pp_tx_done_irq_idx);
+ sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+
+ atomic_set(&cmd_enc->pending_cnt, 0);
+ wake_up_all(&cmd_enc->pp_tx_done_wq);
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+}
+
+static void sde_encoder_phys_cmd_destroy(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+
+ if (phys_enc->hw_ctl)
+ sde_rm_release_ctl_path(phys_enc->sde_kms,
+ phys_enc->hw_ctl->idx);
+ sde_hw_mdp_destroy(phys_enc->hw_mdptop);
+ sde_hw_pingpong_destroy(cmd_enc->hw_pp);
+ kfree(cmd_enc);
+}
+
+static void sde_encoder_phys_cmd_get_hw_resources(
+ struct sde_encoder_phys *phys_enc,
+ struct sde_encoder_hw_resources *hw_res)
+{
+ const struct sde_hw_res_map *hw_res_map;
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+
+ DBG("intf %d pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx);
+
+ hw_res->intfs[cmd_enc->intf_idx] = INTF_MODE_CMD;
+ hw_res->pingpongs[cmd_enc->hw_pp->idx] = true;
+ hw_res_map = sde_rm_get_res_map(phys_enc->sde_kms,
+ cmd_enc->intf_idx);
+ if (IS_ERR_OR_NULL(hw_res_map)) {
+ DRM_ERROR("Failed to get hw_res_map: %ld", PTR_ERR(hw_res_map));
+ return;
+ }
+ hw_res->ctls[hw_res_map->ctl] = true;
+}
+
+static int sde_encoder_phys_cmd_wait_for_commit_done(
+ struct sde_encoder_phys *phys_enc)
+{
+ /*
+ * Since ctl_start "commits" the transaction to hardware, and the
+ * tearcheck block takes it from there, there is no need to have a
+ * separate wait for committed, a la wait-for-vsync in video mode
+ */
+
+ return 0;
+}
+
+static void sde_encoder_phys_cmd_prepare_for_kickoff(
+ struct sde_encoder_phys *phys_enc,
+ bool *need_to_wait)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ int new_pending_cnt;
+
+ DBG("intf %d, pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx);
+
+ /*
+ * Mark kickoff request as outstanding. If there are more than one,
+ * outstanding, then we have to wait for the previous one to complete
+ */
+ new_pending_cnt = atomic_inc_return(&cmd_enc->pending_cnt);
+ *need_to_wait = new_pending_cnt != 1;
+
+ if (*need_to_wait)
+ SDE_DEBUG("intf %d pp %d needs to wait, new_pending_cnt %d",
+ cmd_enc->intf_idx, cmd_enc->hw_pp->idx,
+ new_pending_cnt);
+ MSM_EVT(DEV(phys_enc), cmd_enc->hw_pp->idx, new_pending_cnt);
+}
+
+static void sde_encoder_phys_cmd_init_ops(
+ struct sde_encoder_phys_ops *ops)
+{
+ ops->is_master = sde_encoder_phys_cmd_is_master;
+ ops->mode_set = sde_encoder_phys_cmd_mode_set;
+ ops->mode_fixup = sde_encoder_phys_cmd_mode_fixup;
+ ops->enable = sde_encoder_phys_cmd_enable;
+ ops->disable = sde_encoder_phys_cmd_disable;
+ ops->destroy = sde_encoder_phys_cmd_destroy;
+ ops->get_hw_resources = sde_encoder_phys_cmd_get_hw_resources;
+ ops->control_vblank_irq = sde_encoder_phys_cmd_control_vblank_irq;
+ ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
+ ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
+}
+
+struct sde_encoder_phys *sde_encoder_phys_cmd_init(
+ struct sde_kms *sde_kms,
+ enum sde_intf intf_idx,
+ enum sde_pingpong pp_idx,
+ enum sde_ctl ctl_idx,
+ enum sde_enc_split_role split_role,
+ struct drm_encoder *parent,
+ struct sde_encoder_virt_ops parent_ops)
+{
+ struct sde_encoder_phys *phys_enc = NULL;
+ struct sde_encoder_phys_cmd *cmd_enc = NULL;
+ int ret = 0;
+
+ DBG("intf %d, pp %d", intf_idx, pp_idx);
+
+ cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ if (!cmd_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ phys_enc = &cmd_enc->base;
+
+ phys_enc->hw_mdptop = sde_hw_mdptop_init(MDP_TOP, sde_kms->mmio,
+ sde_kms->catalog);
+ if (IS_ERR_OR_NULL(phys_enc->hw_mdptop)) {
+ ret = PTR_ERR(phys_enc->hw_mdptop);
+ phys_enc->hw_mdptop = NULL;
+ DRM_ERROR("Failed init hw_top: %d\n", ret);
+ goto fail_mdptop;
+ }
+
+ cmd_enc->intf_idx = intf_idx;
+
+ phys_enc->hw_ctl = sde_rm_acquire_ctl_path(sde_kms, ctl_idx);
+ if (phys_enc->hw_ctl == ERR_PTR(-ENODEV))
+ phys_enc->hw_ctl = sde_rm_get_ctl_path(sde_kms, ctl_idx);
+
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ ret = PTR_ERR(phys_enc->hw_ctl);
+ phys_enc->hw_ctl = NULL;
+ DRM_ERROR("Failed init hw_ctl: %d\n", ret);
+ goto fail_ctl;
+ }
+
+ cmd_enc->hw_pp = sde_hw_pingpong_init(pp_idx, sde_kms->mmio,
+ sde_kms->catalog);
+ if (IS_ERR_OR_NULL(cmd_enc->hw_pp)) {
+ ret = PTR_ERR(cmd_enc->hw_pp);
+ cmd_enc->hw_pp = NULL;
+ DRM_ERROR("Failed init hw_pingpong: %d\n", ret);
+ goto fail_pingpong;
+ }
+
+ sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
+ phys_enc->parent = parent;
+ phys_enc->parent_ops = parent_ops;
+ phys_enc->sde_kms = sde_kms;
+ phys_enc->split_role = split_role;
+ spin_lock_init(&phys_enc->spin_lock);
+ phys_enc->mode_3d = BLEND_3D_NONE;
+ cmd_enc->stream_sel = 0;
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+ atomic_set(&cmd_enc->pending_cnt, 0);
+
+ init_waitqueue_head(&cmd_enc->pp_tx_done_wq);
+
+ DBG("Created sde_encoder_phys_cmd for intf %d pp %d",
+ cmd_enc->intf_idx, cmd_enc->hw_pp->idx);
+
+ return phys_enc;
+
+fail_pingpong:
+ sde_rm_release_ctl_path(phys_enc->sde_kms, phys_enc->hw_ctl->idx);
+fail_ctl:
+ sde_hw_mdp_destroy(phys_enc->hw_mdptop);
+fail_mdptop:
+ kfree(cmd_enc);
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index a26e707db63e..22d87f35b6a5 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -174,10 +174,13 @@ static void sde_prepare_commit(struct msm_kms *kms,
static void sde_commit(struct msm_kms *kms, struct drm_atomic_state *old_state)
{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
+ MSM_EVT(sde_kms->dev, 0, 0);
+
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
if (crtc->state->active)
sde_crtc_commit_kickoff(crtc);
@@ -187,7 +190,10 @@ static void sde_complete_commit(struct msm_kms *kms,
struct drm_atomic_state *state)
{
struct sde_kms *sde_kms = to_sde_kms(kms);
+
sde_disable(sde_kms);
+
+ MSM_EVT(sde_kms->dev, 0, 0);
}
static void sde_wait_for_crtc_commit_done(struct msm_kms *kms,