summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c13
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h2
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_shd.c43
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c9
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.c30
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.h16
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.c128
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.h23
9 files changed, 155 insertions, 111 deletions
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 9805c8e8acb4..3ad884b2dbf7 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -318,6 +318,8 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
struct sde_hw_mixer *lm;
struct sde_splash_info *sinfo;
struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
+ bool splash_enabled = false;
+ u32 mixer_mask = 0, mixer_ext_mask = 0;
int i;
@@ -339,6 +341,9 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
return;
}
+ sde_splash_get_mixer_mask(sinfo, &splash_enabled,
+ &mixer_mask, &mixer_ext_mask);
+
for (i = 0; i < sde_crtc->num_mixers; i++) {
if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
SDE_ERROR("invalid lm or ctl assigned to mixer\n");
@@ -348,10 +353,8 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
mixer[i].flush_mask = 0;
if (mixer[i].hw_ctl->ops.clear_all_blendstages)
mixer[i].hw_ctl->ops.clear_all_blendstages(
- mixer[i].hw_ctl,
- sinfo->handoff,
- sinfo->reserved_pipe_info,
- MAX_BLOCKS);
+ mixer[i].hw_ctl, splash_enabled,
+ mixer_mask, mixer_ext_mask);
}
/* initialize stage cfg */
@@ -379,7 +382,7 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
&sde_crtc->stage_cfg, i,
- sinfo->handoff, sinfo->reserved_pipe_info, MAX_BLOCKS);
+ splash_enabled, mixer_mask, mixer_ext_mask);
}
}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index b91fd7f42af3..1a18f785a497 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -796,6 +796,8 @@ static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+ phys->splash_flush_bits = phys->sde_kms->splash_info.flush_bits;
+
ctl->ops.trigger_flush(ctl);
SDE_EVT32(DRMID(drm_enc), ctl->idx);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 4653a7fe650b..0e323f716d2c 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -186,6 +186,7 @@ enum sde_intr_idx {
* between 0-2 Incremented when a new kickoff is
* scheduled. Decremented in irq handler
* @pending_kickoff_wq: Wait queue for blocking until kickoff completes
+ * @splash_flush_bits: Flush bits of splash reserved hardware pipes
*/
struct sde_encoder_phys {
struct drm_encoder *parent;
@@ -211,6 +212,7 @@ struct sde_encoder_phys {
atomic_t underrun_cnt;
atomic_t pending_kickoff_cnt;
wait_queue_head_t pending_kickoff_wq;
+ uint32_t splash_flush_bits;
};
static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_shd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_shd.c
index 46f5cadff3e8..116a057a3c87 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_shd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_shd.c
@@ -132,7 +132,14 @@ static void sde_encoder_phys_shd_vblank_irq(void *arg, int irq_idx)
shd_ctl = container_of(hw_ctl, struct sde_shd_hw_ctl, base);
- if ((flush_register & shd_ctl->flush_mask) == 0)
+ /*
+ * When bootloader's splash is presented, as bootloader is concurrently
+ * flushing hardware pipes, so when checking flush_register, we need
+ * to care if the active bit in the flush_register matches with the
+ * bootloader's splash pipe flush bits.
+ */
+ if ((flush_register & shd_ctl->flush_mask &
+ ~phys_enc->splash_flush_bits) == 0)
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-1, 0);
@@ -198,14 +205,12 @@ static int _sde_encoder_phys_shd_unregister_irq(
static void _sde_shd_hw_ctl_clear_blendstages_in_range(
struct sde_shd_hw_ctl *hw_ctl, enum sde_lm lm,
- bool handoff, const struct splash_reserved_pipe_info *resv_pipes,
- u32 resv_pipes_length)
+ bool handoff, u32 splash_mask, u32 splash_ext_mask)
{
struct sde_hw_blk_reg_map *c = &hw_ctl->base.hw;
u32 mixercfg, mixercfg_ext;
u32 mixercfg_ext2;
u32 mask = 0, ext_mask = 0, ext2_mask = 0;
- u32 splash_mask = 0, splash_ext_mask = 0;
u32 start = hw_ctl->range.start + SDE_STAGE_0;
u32 end = start + hw_ctl->range.size;
u32 i;
@@ -218,8 +223,6 @@ static void _sde_shd_hw_ctl_clear_blendstages_in_range(
goto end;
if (handoff) {
- sde_splash_get_mixer_mask(resv_pipes,
- resv_pipes_length, &splash_mask, &splash_ext_mask);
mask |= splash_mask;
ext_mask |= splash_ext_mask;
}
@@ -321,8 +324,7 @@ end:
}
static void _sde_shd_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx,
- bool handoff, const struct splash_reserved_pipe_info *resv_pipes,
- u32 resv_pipes_length)
+ bool handoff, u32 splash_mask, u32 splash_ext_mask)
{
struct sde_shd_hw_ctl *hw_ctl;
int i;
@@ -336,8 +338,7 @@ static void _sde_shd_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx,
int mixer_id = ctx->mixer_hw_caps[i].id;
_sde_shd_hw_ctl_clear_blendstages_in_range(hw_ctl, mixer_id,
- handoff, resv_pipes,
- resv_pipes_length);
+ handoff, splash_mask, splash_ext_mask);
}
}
@@ -358,12 +359,10 @@ static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
static void _sde_shd_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index,
- bool handoff, const struct splash_reserved_pipe_info *resv_pipes,
- u32 resv_pipes_length)
+ bool handoff, u32 splash_mask, u32 splash_ext_mask)
{
struct sde_shd_hw_ctl *hw_ctl;
u32 mixercfg = 0, mixercfg_ext = 0, mix, ext, full, mixercfg_ext2;
- u32 splash_mask = 0, splash_ext_mask = 0;
u32 mask = 0, ext_mask = 0, ext2_mask = 0;
int i, j;
int stages;
@@ -383,7 +382,7 @@ static void _sde_shd_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
pipes_per_stage = 1;
_sde_shd_hw_ctl_clear_blendstages_in_range(hw_ctl, lm, handoff,
- resv_pipes, resv_pipes_length);
+ splash_mask, splash_ext_mask);
if (!stage_cfg)
goto exit;
@@ -396,8 +395,6 @@ static void _sde_shd_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
if (handoff) {
mixercfg = SDE_REG_READ(c, CTL_LAYER(lm));
mixercfg_ext = SDE_REG_READ(c, CTL_LAYER_EXT(lm));
- sde_splash_get_mixer_mask(resv_pipes,
- resv_pipes_length, &splash_mask, &splash_ext_mask);
mixercfg &= splash_mask;
mixercfg_ext &= splash_ext_mask;
@@ -610,6 +607,7 @@ static void _sde_shd_trigger_flush(struct sde_hw_ctl *ctx)
{
struct sde_shd_hw_ctl *hw_ctl;
struct sde_encoder_phys_shd *shd_enc;
+ struct sde_encoder_phys *phys;
struct sde_hw_blk_reg_map *c;
unsigned long lock_flags;
int i;
@@ -621,6 +619,9 @@ static void _sde_shd_trigger_flush(struct sde_hw_ctl *ctx)
spin_lock_irqsave(&hw_ctl_lock, lock_flags);
+ phys = &shd_enc->base;
+ phys->splash_flush_bits = phys->sde_kms->splash_info.flush_bits;
+
_sde_shd_hw_ctl_trigger_flush(ctx);
for (i = 0; i < shd_enc->num_mixers; i++)
@@ -903,6 +904,8 @@ static void sde_encoder_phys_shd_disable(struct sde_encoder_phys *phys_enc)
{
struct sde_connector *sde_conn;
struct shd_display *display;
+ bool splash_enabled = false;
+ u32 mixer_mask = 0, mixer_ext_mask = 0;
SHD_DEBUG("%d\n", phys_enc->parent->base.id);
@@ -917,10 +920,11 @@ static void sde_encoder_phys_shd_disable(struct sde_encoder_phys *phys_enc)
return;
}
+ sde_splash_get_mixer_mask(&phys_enc->sde_kms->splash_info,
+ &splash_enabled, &mixer_mask, &mixer_ext_mask);
+
_sde_shd_hw_ctl_clear_all_blendstages(phys_enc->hw_ctl,
- phys_enc->sde_kms->splash_info.handoff,
- phys_enc->sde_kms->splash_info.reserved_pipe_info,
- MAX_BLOCKS);
+ splash_enabled, mixer_mask, mixer_ext_mask);
_sde_shd_trigger_flush(phys_enc->hw_ctl);
@@ -1019,6 +1023,7 @@ struct sde_encoder_phys *sde_encoder_phys_shd_init(
INIT_LIST_HEAD(&shd_enc->irq_cb[i].list);
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ phys_enc->splash_flush_bits = 0;
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
phys_enc->enable_state = SDE_ENC_DISABLED;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 0d5cc47294ab..b1e09d336c63 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -308,7 +308,13 @@ static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
if (hw_ctl && hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
- if (flush_register == 0)
+ /*
+ * When bootloader's splash is presented, as bootloader is concurrently
+ * flushing hardware pipes, so when checking flush_register, we need
+ * to care if the active bit in the flush_register matches with the
+ * bootloader's splash pipe flush bits.
+ */
+ if ((flush_register & ~phys_enc->splash_flush_bits) == 0)
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -983,6 +989,7 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init(
INIT_LIST_HEAD(&vid_enc->irq_cb[i].list);
atomic_set(&phys_enc->vblank_refcount, 0);
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ phys_enc->splash_flush_bits = 0;
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
phys_enc->enable_state = SDE_ENC_DISABLED;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
index 7cd7b5f234b8..964e02f7293c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -313,36 +313,27 @@ static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx)
}
static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx,
- bool handoff,
- const struct splash_reserved_pipe_info *resv_pipes,
- u32 resv_pipes_length)
+ bool handoff, u32 splash_mask, u32 splash_ext_mask)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
int i;
u32 mixercfg = 0;
u32 mixercfg_ext = 0;
- u32 mixer_mask, mixerext_mask;
int mixer_id;
for (i = 0; i < ctx->mixer_count; i++) {
mixer_id = ctx->mixer_hw_caps[i].id;
/*
- * if bootloaer still has early RVC running, mixer status
- * can't be direcly cleared.
+ * if bootloaer still has early splash or RVC running,
+ * mixer status can't be directly cleared.
*/
if (handoff) {
- /*
- * if bootloaer still has early display or early RVC
- * running,mixer status can't be direcly cleared.
- */
mixercfg = SDE_REG_READ(c, CTL_LAYER(mixer_id));
mixercfg_ext = SDE_REG_READ(c,
CTL_LAYER_EXT(mixer_id));
- sde_splash_get_mixer_mask(resv_pipes,
- resv_pipes_length, &mixer_mask, &mixerext_mask);
- mixercfg &= mixer_mask;
- mixercfg_ext &= mixerext_mask;
+ mixercfg &= splash_mask;
+ mixercfg_ext &= splash_ext_mask;
}
SDE_REG_WRITE(c, CTL_LAYER(mixer_id), mixercfg);
SDE_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), mixercfg_ext);
@@ -353,13 +344,10 @@ static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx,
static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index,
- bool handoff,
- const struct splash_reserved_pipe_info *resv_pipes,
- u32 resv_pipes_length)
+ bool handoff, u32 splash_mask, u32 splash_ext_mask)
{
struct sde_hw_blk_reg_map *c = &ctx->hw;
u32 mixercfg, mixercfg_ext, mix, ext, mixercfg_ext2;
- u32 mixer_mask, mixerext_mask;
int i, j;
u8 stages;
int pipes_per_stage;
@@ -388,10 +376,8 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
if (handoff) {
mixercfg = SDE_REG_READ(c, CTL_LAYER(lm));
mixercfg_ext = SDE_REG_READ(c, CTL_LAYER_EXT(lm));
- sde_splash_get_mixer_mask(resv_pipes,
- resv_pipes_length, &mixer_mask, &mixerext_mask);
- mixercfg &= mixer_mask;
- mixercfg_ext &= mixerext_mask;
+ mixercfg &= splash_mask;
+ mixercfg_ext &= splash_ext_mask;
mixercfg |= BIT(24);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
index 6711bf227fe9..dab0b686cb74 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -154,13 +154,11 @@ struct sde_hw_ctl_ops {
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
* @handoff : indicate if lk is prepare for handoff
- * @resv_pipes : reserved pipes in DT
- * @resv_pipes_length: array size of array reserved_pipes
+ * @splash_mask : layer mixer mask of splash layers
+ * @splash_ext_mask: layer mixer extension mask of splash layers
*/
void (*clear_all_blendstages)(struct sde_hw_ctl *ctx,
- bool handoff,
- const struct splash_reserved_pipe_info *resv_pipes,
- u32 resv_pipes_length);
+ bool handoff, u32 splash_mask, u32 splash_ext_mask);
/**
* Configure layer mixer to pipe configuration
@@ -168,14 +166,12 @@ struct sde_hw_ctl_ops {
* @lm : layer mixer enumeration
* @cfg : blend stage configuration
* @handoff : indicate if lk is prepare for handoff
- * @resv_pipes : reserved pipes in DT
- * @resv_pipes_length: array size of array reserved_pipes
+ * @splash_mask : layer mixer mask of splash layers
+ * @splash_ext_mask: layer mixer extension mask of splash layers
*/
void (*setup_blendstage)(struct sde_hw_ctl *ctx,
enum sde_lm lm, struct sde_hw_stage_cfg *cfg, u32 index,
- bool handoff,
- const struct splash_reserved_pipe_info *resv_pipes,
- u32 resv_pipes_length);
+ bool handoff, u32 splash_mask, u32 splash_ext_mask);
/**
* read CTL_TOP register value for splash case
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c
index 8cc1125fea2e..765e3634a936 100644
--- a/drivers/gpu/drm/msm/sde/sde_splash.c
+++ b/drivers/gpu/drm/msm/sde/sde_splash.c
@@ -52,6 +52,35 @@
static DEFINE_MUTEX(sde_splash_lock);
+static struct splash_pipe_caps splash_pipe_cap[MAX_BLOCKS] = {
+ {SSPP_VIG0, BIT(0), 0x7 << 0, BIT(0)},
+ {SSPP_VIG1, BIT(1), 0x7 << 3, BIT(2)},
+ {SSPP_VIG2, BIT(2), 0x7 << 6, BIT(4)},
+ {SSPP_VIG3, BIT(18), 0x7 << 26, BIT(6)},
+ {SSPP_RGB0, BIT(3), 0x7 << 9, BIT(8)},
+ {SSPP_RGB1, BIT(4), 0x7 << 12, BIT(10)},
+ {SSPP_RGB2, BIT(5), 0x7 << 15, BIT(12)},
+ {SSPP_RGB3, BIT(19), 0x7 << 29, BIT(14)},
+ {SSPP_DMA0, BIT(11), 0x7 << 18, BIT(16)},
+ {SSPP_DMA1, BIT(12), 0x7 << 21, BIT(18)},
+ {SSPP_CURSOR0, 0, 0, 0},
+ {SSPP_CURSOR1, 0, 0, 0},
+};
+
+static inline uint32_t _sde_splash_get_pipe_arrary_index(enum sde_sspp pipe)
+{
+ uint32_t i = 0, index = MAX_BLOCKS;
+
+ for (i = 0; i < MAX_BLOCKS; i++) {
+ if (pipe == splash_pipe_cap[i].pipe) {
+ index = i;
+ break;
+ }
+ }
+
+ return index;
+}
+
/*
* In order to free reseved memory from bootup, and we are not
* able to call the __init free functions, so we need to free
@@ -407,9 +436,25 @@ static void
_sde_splash_release_early_splash_layer(struct sde_splash_info *splash_info)
{
int i = 0;
+ uint32_t index;
for (i = 0; i < MAX_BLOCKS; i++) {
if (splash_info->reserved_pipe_info[i].early_release) {
+ index = _sde_splash_get_pipe_arrary_index(
+ splash_info->reserved_pipe_info[i].pipe_id);
+ if (index < MAX_BLOCKS) {
+ /*
+ * Clear flush bits, mixer mask and extension
+ * mask of released pipes.
+ */
+ splash_info->flush_bits &=
+ ~splash_pipe_cap[index].flush_bit;
+ splash_info->mixer_mask &=
+ ~splash_pipe_cap[index].mixer_mask;
+ splash_info->mixer_ext_mask &=
+ ~splash_pipe_cap[index].mixer_ext_mask;
+ }
+
splash_info->reserved_pipe_info[i].pipe_id =
0xFFFFFFFF;
splash_info->reserved_pipe_info[i].early_release =
@@ -566,6 +611,7 @@ int sde_splash_parse_reserved_plane_dt(struct drm_device *dev,
struct property *prop;
const char *cname;
int ret = 0, i = 0;
+ uint32_t index;
if (!splash_info || !cfg)
return -EINVAL;
@@ -580,6 +626,11 @@ int sde_splash_parse_reserved_plane_dt(struct drm_device *dev,
splash_info->reserved_pipe_info[i].early_release = false;
}
+ /* Reset flush bits and mixer mask of reserved planes */
+ splash_info->flush_bits = 0;
+ splash_info->mixer_mask = 0;
+ splash_info->mixer_ext_mask = 0;
+
i = 0;
for_each_child_of_node(parent, node) {
if (i >= MAX_BLOCKS) {
@@ -596,6 +647,19 @@ int sde_splash_parse_reserved_plane_dt(struct drm_device *dev,
splash_info->reserved_pipe_info[i].early_release =
of_property_read_bool(node, "qcom,pipe-early-release");
+
+ index = _sde_splash_get_pipe_arrary_index(
+ splash_info->reserved_pipe_info[i].pipe_id);
+
+ if (index < MAX_BLOCKS) {
+ splash_info->flush_bits |=
+ splash_pipe_cap[index].flush_bit;
+ splash_info->mixer_mask |=
+ splash_pipe_cap[index].mixer_mask;
+ splash_info->mixer_ext_mask |=
+ splash_pipe_cap[index].mixer_ext_mask;
+ }
+
i++;
}
@@ -878,55 +942,14 @@ void sde_splash_decrease_connector_cnt(struct drm_device *dev,
}
}
-void sde_splash_get_mixer_mask(
- const struct splash_reserved_pipe_info *resv_pipes,
- u32 length, u32 *mixercfg, u32 *mixercfg_ext)
+void sde_splash_get_mixer_mask(struct sde_splash_info *sinfo,
+ bool *splash_on, u32 *mixercfg, u32 *mixercfg_ext)
{
- int i = 0;
- u32 mixer_mask = 0;
- u32 mixer_ext_mask = 0;
-
- for (i = 0; i < length; i++) {
- switch (resv_pipes[i].pipe_id) {
- case SSPP_VIG0:
- mixer_mask |= 0x7 << 0;
- mixer_ext_mask |= BIT(0);
- break;
- case SSPP_VIG1:
- mixer_mask |= 0x7 << 3;
- mixer_ext_mask |= BIT(2);
- break;
- case SSPP_VIG2:
- mixer_mask |= 0x7 << 6;
- mixer_ext_mask |= BIT(4);
- break;
- case SSPP_VIG3:
- mixer_mask |= 0x7 << 26;
- mixer_ext_mask |= BIT(6);
- break;
- case SSPP_RGB0:
- mixer_mask |= 0x7 << 9;
- mixer_ext_mask |= BIT(8);
- break;
- case SSPP_RGB1:
- mixer_mask |= 0x7 << 12;
- mixer_ext_mask |= BIT(10);
- break;
- case SSPP_RGB2:
- mixer_mask |= 0x7 << 15;
- mixer_ext_mask |= BIT(12);
- break;
- case SSPP_RGB3:
- mixer_mask |= 0x7 << 29;
- mixer_ext_mask |= BIT(14);
- break;
- default:
- break;
- }
- }
-
- *mixercfg = mixer_mask;
- *mixercfg_ext = mixer_ext_mask;
+ mutex_lock(&sde_splash_lock);
+ *splash_on = sinfo->handoff;
+ *mixercfg = sinfo->mixer_mask;
+ *mixercfg_ext = sinfo->mixer_ext_mask;
+ mutex_unlock(&sde_splash_lock);
}
bool sde_splash_get_lk_complete_status(struct msm_kms *kms)
@@ -1029,6 +1052,11 @@ int sde_splash_free_resource(struct msm_kms *kms,
/* set display's splash status to false after handoff is done */
_sde_splash_update_display_splash_status(sde_kms);
+ /* Reset flush_bits and mixer mask */
+ sinfo->flush_bits = 0;
+ sinfo->mixer_mask = 0;
+ sinfo->mixer_ext_mask = 0;
+
/* Finally mark handoff flag to false to say
* handoff is complete.
*/
@@ -1129,8 +1157,8 @@ static int _sde_splash_clear_mixer_blendstage(struct msm_kms *kms,
mixer[i].hw_ctl->ops.clear_all_blendstages(
mixer[i].hw_ctl,
sinfo->handoff,
- sinfo->reserved_pipe_info,
- MAX_BLOCKS);
+ sinfo->mixer_mask,
+ sinfo->mixer_ext_mask);
}
}
return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h
index fc2cc54a00b7..345bf819ee1b 100644
--- a/drivers/gpu/drm/msm/sde/sde_splash.h
+++ b/drivers/gpu/drm/msm/sde/sde_splash.h
@@ -48,6 +48,13 @@ struct splash_reserved_pipe_info {
bool early_release;
};
+struct splash_pipe_caps {
+ enum sde_sspp pipe;
+ u32 flush_bit;
+ u32 mixer_mask;
+ u32 mixer_ext_mask;
+};
+
struct sde_splash_info {
/* handoff flag */
bool handoff;
@@ -93,6 +100,15 @@ struct sde_splash_info {
/* reserved pipe info both for early RVC and early splash */
struct splash_reserved_pipe_info reserved_pipe_info[MAX_BLOCKS];
+
+ /* flush bits of reserved pipes */
+ uint32_t flush_bits;
+
+ /* layer mixer mask of reserved pipes */
+ uint32_t mixer_mask;
+
+ /* layer mixer extension mask of reserved pipes */
+ uint32_t mixer_ext_mask;
};
/* APIs for early splash handoff functions */
@@ -210,9 +226,8 @@ void sde_splash_decrease_connector_cnt(struct drm_device *dev,
/**
* sde_splash_get_mixer_mask
*
- * Get mask configuration of splash layer mixer.
+ * Retrieve mixer mask and extension mask from splash_info structure.
*/
-void sde_splash_get_mixer_mask(
- const struct splash_reserved_pipe_info *resv_pipes,
- u32 length, u32 *mixercfg, u32 *mixercfg_ext);
+void sde_splash_get_mixer_mask(struct sde_splash_info *sinfo,
+ bool *splash_on, u32 *mixercfg, u32 *mixercfg_ext);
#endif