diff options
| author | Lloyd Atkinson <latkinso@codeaurora.org> | 2016-08-11 11:19:52 -0400 |
|---|---|---|
| committer | Lloyd Atkinson <latkinso@codeaurora.org> | 2016-08-25 11:01:37 -0400 |
| commit | 14c3c9cf0432f787fd5b7445ce07d4c65f4db82e (patch) | |
| tree | ca646728f8ac422c5b3c2bbb5659ed5f5302d73b /drivers/gpu | |
| parent | 6d9046ba7fb6237afa8d0a9d14e94ec85d5009dc (diff) | |
drm/msm/sde: enable dynamic hardware resource assignment
Need to remove hard coded use case table with previously added
dynamic hardware resource manager. Resource manager works with
hardware blocks in the system, and user requests via
per-connector properties to assign hw blocks to use cases.
Change-Id: If5564cf755dd2358aca756feddddb6f58969eb5c
Signed-off-by: Lloyd Atkinson <latkinso@codeaurora.org>
Diffstat (limited to 'drivers/gpu')
| -rw-r--r-- | drivers/gpu/drm/msm/Makefile | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_connector.c | 6 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_crtc.c | 199 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_crtc.h | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_encoder.c | 145 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_encoder_phys.h | 11 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c | 109 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c | 122 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c | 159 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_kms.c | 44 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_kms.h | 144 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_kms_utils.c | 217 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_rm.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_rm.h | 7 |
14 files changed, 374 insertions, 793 deletions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 89d74cec0ad9..ddefd7f41bef 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -36,6 +36,7 @@ msm-y := \ sde/sde_encoder_phys_vid.o \ sde/sde_encoder_phys_cmd.o \ sde/sde_irq.o \ + sde/sde_rm.o \ sde/sde_kms_utils.o \ sde/sde_kms.o \ sde/sde_plane.o \ diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index a39b994d6fbf..f123778ad4aa 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -253,6 +253,12 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector, } } + if (idx == CONNECTOR_PROP_TOPOLOGY_CONTROL) { + rc = sde_rm_check_property_topctl(val); + if (rc) + SDE_ERROR("invalid topology_control: 0x%llX\n", val); + } + /* check for custom property handling */ if (!rc && c_conn->ops.set_property) { rc = c_conn->ops.set_property(connector, diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 638cbe7bd89c..4a3ebbc3bf48 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -41,116 +41,6 @@ static struct sde_kms *get_kms(struct drm_crtc *crtc) return to_sde_kms(priv->kms); } -static int sde_crtc_reserve_hw_resources(struct drm_crtc *crtc) -{ - struct sde_crtc *sde_crtc = to_sde_crtc(crtc); - struct sde_kms *sde_kms = get_kms(crtc); - struct sde_encoder_hw_resources enc_hw_res; - const struct sde_hw_res_map *plat_hw_res_map; - enum sde_lm unused_lm_id[CRTC_DUAL_MIXERS] = {0}; - enum sde_lm lm_idx; - int i, unused_lm_count = 0; - - if (!sde_kms) { - DBG("[%s] invalid kms", __func__); - return -EINVAL; - } - - if (!sde_kms->mmio) - return -EINVAL; - - /* Get unused LMs */ - for (i = sde_kms->catalog->mixer_count - 1; i >= 0; --i) { - if (!sde_rm_get_mixer(sde_kms, LM(i))) { - unused_lm_id[unused_lm_count++] = LM(i); - if (unused_lm_count == CRTC_DUAL_MIXERS) - break; - } - } - - /* query encoder resources */ - sde_encoder_get_hw_resources(sde_crtc->mixers[0].encoder, &enc_hw_res); - - /* parse encoder hw resources, find CTL paths */ - for (i = CTL_0; i <= sde_kms->catalog->ctl_count; i++) { - WARN_ON(sde_crtc->num_ctls > CRTC_DUAL_MIXERS); - if (enc_hw_res.ctls[i]) { - struct sde_crtc_mixer *mixer = - &sde_crtc->mixers[sde_crtc->num_ctls]; - mixer->hw_ctl = sde_rm_get_ctl_path(sde_kms, i); - if (IS_ERR_OR_NULL(mixer->hw_ctl)) { - DRM_ERROR("Invalid ctl_path\n"); - return PTR_ERR(mixer->hw_ctl); - } - sde_crtc->num_ctls++; - } - } - - /* shortcut this process if encoder has no ctl paths */ - if (!sde_crtc->num_ctls) - return 0; - - /* - * Get default LMs if specified in platform config - * other wise acquire the free LMs - */ - for (i = INTF_0; i <= sde_kms->catalog->intf_count; i++) { - if (enc_hw_res.intfs[i]) { - struct sde_crtc_mixer *mixer = - &sde_crtc->mixers[sde_crtc->num_mixers]; - plat_hw_res_map = sde_rm_get_res_map(sde_kms, - i, SDE_NONE); - - lm_idx = plat_hw_res_map->lm; - if (!lm_idx && unused_lm_count) - lm_idx = unused_lm_id[--unused_lm_count]; - - DBG("intf %d acquiring lm %d", i, lm_idx); - mixer->hw_lm = sde_rm_acquire_mixer(sde_kms, lm_idx); - if (IS_ERR_OR_NULL(mixer->hw_lm)) { - DRM_ERROR("Invalid mixer\n"); - return -EACCES; - } - sde_crtc->num_mixers++; - } - } - - /* - * Get default LMs if specified in platform config, - * otherwise acquire the free LMs. - */ - for (i = WB_0; i < WB_MAX; i++) { - if (enc_hw_res.wbs[i]) { - struct sde_crtc_mixer *mixer = - &sde_crtc->mixers[sde_crtc->num_mixers]; - plat_hw_res_map = sde_rm_get_res_map(sde_kms, - SDE_NONE, i); - - lm_idx = plat_hw_res_map->lm; - if (!lm_idx && unused_lm_count) - lm_idx = unused_lm_id[--unused_lm_count]; - - DBG("wb %d acquiring lm %d", i, lm_idx); - mixer->hw_lm = sde_rm_acquire_mixer(sde_kms, lm_idx); - if (IS_ERR_OR_NULL(mixer->hw_lm)) { - DRM_ERROR("Invalid mixer\n"); - return -EACCES; - } - sde_crtc->num_mixers++; - } - } - - DBG("control paths %d, num_mixers %d, lm[0] %d, ctl[0] %d ", - sde_crtc->num_ctls, sde_crtc->num_mixers, - sde_crtc->mixers[0].hw_lm->idx, - sde_crtc->mixers[0].hw_ctl->idx); - if (sde_crtc->num_mixers > 1) - DBG("lm[1] %d, ctl[1], %d", - sde_crtc->mixers[1].hw_lm->idx, - sde_crtc->mixers[1].hw_ctl->idx); - return 0; -} - static void sde_crtc_destroy(struct drm_crtc *crtc) { struct sde_crtc *sde_crtc = to_sde_crtc(crtc); @@ -560,6 +450,66 @@ static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc) } } +static void _sde_crtc_setup_mixer_for_encoder( + struct drm_crtc *crtc, + struct drm_encoder *enc) +{ + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + struct sde_kms *sde_kms = get_kms(crtc); + struct sde_rm *rm = &sde_kms->rm; + struct sde_crtc_mixer *mixer; + int i; + struct sde_rm_hw_iter lm_iter, ctl_iter; + + sde_rm_init_hw_iter(&lm_iter, enc->base.id, SDE_HW_BLK_LM); + sde_rm_init_hw_iter(&ctl_iter, enc->base.id, SDE_HW_BLK_CTL); + + /* Set up the mixer for this encoder, checking all channels */ + for (i = sde_crtc->num_mixers; i < ARRAY_SIZE(sde_crtc->mixers); i++) { + mixer = &sde_crtc->mixers[i]; + + /* Add mixer if reservation exists on (encoder, chan) */ + if (!sde_rm_get_hw(rm, &lm_iter)) + break; + mixer->hw_lm = (struct sde_hw_mixer *)lm_iter.hw; + + /* CTL may be null, not necessarily 1:1 with LM */ + (void) sde_rm_get_hw(rm, &ctl_iter); + mixer->hw_ctl = (struct sde_hw_ctl *)ctl_iter.hw; + + mixer->flush_mask = 0; + mixer->encoder = enc; + + sde_crtc->num_mixers++; + SDE_DEBUG("setup mixer %d: lm %d\n", + i, mixer->hw_lm->idx); + + if (mixer->hw_ctl) { + sde_crtc->num_ctls++; + SDE_DEBUG("setup mixer %d: ctl %d\n", + i, mixer->hw_ctl->idx); + } + } +} + +static void _sde_crtc_setup_mixers(struct drm_crtc *crtc) +{ + struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + struct drm_encoder *enc; + + sde_crtc->num_ctls = 0; + sde_crtc->num_mixers = 0; + memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers)); + + /* Check for mixers on all encoders attached to this crtc */ + list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) { + if (enc->crtc != crtc) + continue; + + _sde_crtc_setup_mixer_for_encoder(crtc, enc); + } +} + static void sde_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { @@ -578,6 +528,8 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc, sde_crtc = to_sde_crtc(crtc); dev = crtc->dev; + _sde_crtc_setup_mixers(crtc); + if (sde_crtc->event) { WARN_ON(sde_crtc->event); } else { @@ -831,7 +783,6 @@ static void sde_crtc_enable(struct drm_crtc *crtc) struct sde_hw_mixer_cfg cfg; u32 mixer_width; int i; - int rc; if (!crtc) { DRM_ERROR("invalid crtc\n"); @@ -850,20 +801,7 @@ static void sde_crtc_enable(struct drm_crtc *crtc) drm_mode_debug_printmodeline(mode); - /* - * reserve mixer(s) if not already avaialable - * if dual mode, mixer_width = half mode width - * program mode configuration on mixer(s) - */ - if ((sde_crtc->num_ctls == 0) || - (sde_crtc->num_mixers == 0)) { - rc = sde_crtc_reserve_hw_resources(crtc); - if (rc) { - DRM_ERROR("error reserving HW resource for CRTC\n"); - return; - } - } - + /* Update LMs for dual mode: mixer_width = half mode width */ if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS) mixer_width = mode->hdisplay >> 1; else @@ -1199,7 +1137,6 @@ static void _sde_crtc_init_debugfs(struct sde_crtc *sde_crtc, /* initialize crtc */ struct drm_crtc *sde_crtc_init(struct drm_device *dev, - struct drm_encoder *encoder, struct drm_plane *plane, int drm_crtc_id) { @@ -1207,7 +1144,6 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct sde_crtc *sde_crtc = NULL; struct msm_drm_private *priv = NULL; struct sde_kms *kms = NULL; - int i, rc; priv = dev->dev_private; kms = to_sde_kms(priv->kms); @@ -1228,15 +1164,6 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, drm_crtc_helper_add(crtc, &sde_crtc_helper_funcs); plane->crtc = crtc; - for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) - sde_crtc->mixers[i].encoder = encoder; - - rc = sde_crtc_reserve_hw_resources(crtc); - if (rc) { - DRM_ERROR(" error reserving HW resource for this CRTC\n"); - return ERR_PTR(-EINVAL); - } - /* save user friendly CRTC name for later */ snprintf(sde_crtc->name, SDE_CRTC_NAME_SIZE, "crtc%u", crtc->base.id); diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index e0c74f98e40c..f85088b1bee2 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -62,6 +62,7 @@ struct sde_crtc_mixer { struct sde_crtc { struct drm_crtc base; char name[SDE_CRTC_NAME_SIZE]; + int drm_crtc_id; spinlock_t lm_lock; /* protect registers */ diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 644cca31eec1..67190a3448d0 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -70,6 +70,8 @@ struct sde_encoder_virt { spinlock_t spin_lock; uint32_t bus_scaling_client; + uint32_t display_num_of_h_tiles; + unsigned int num_phys_encs; struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL]; struct sde_encoder_phys *cur_master; @@ -155,14 +157,15 @@ static void bs_set(struct sde_encoder_virt *sde_enc, int idx) #endif void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc, - struct sde_encoder_hw_resources *hw_res) + struct sde_encoder_hw_resources *hw_res, + struct drm_connector_state *conn_state) { struct sde_encoder_virt *sde_enc = NULL; int i = 0; DBG(""); - if (!hw_res || !drm_enc) { + if (!hw_res || !drm_enc || !conn_state) { DRM_ERROR("Invalid pointer"); return; } @@ -171,11 +174,13 @@ void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc, /* Query resources used by phys encs, expected to be without overlap */ memset(hw_res, 0, sizeof(*hw_res)); + hw_res->display_num_of_h_tiles = sde_enc->display_num_of_h_tiles; + for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; if (phys && phys->ops.get_hw_resources) - phys->ops.get_hw_resources(phys, hw_res); + phys->ops.get_hw_resources(phys, hw_res, conn_state); } } @@ -231,54 +236,14 @@ static void sde_encoder_destroy(struct drm_encoder *drm_enc) kfree(sde_enc); } -static bool sde_encoder_virt_mode_fixup(struct drm_encoder *drm_enc, - const struct drm_display_mode *mode, - struct drm_display_mode *adj_mode) -{ - struct sde_encoder_virt *sde_enc = NULL; - int i = 0; - bool ret = true; - - DBG(""); - - if (!drm_enc) { - DRM_ERROR("Invalid pointer"); - return false; - } - - sde_enc = to_sde_encoder_virt(drm_enc); - MSM_EVT(drm_enc->dev, 0, 0); - - for (i = 0; i < sde_enc->num_phys_encs; i++) { - struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; - - if (phys && phys->ops.mode_fixup) { - ret = phys->ops.mode_fixup(phys, mode, adj_mode); - if (!ret) { - DRM_ERROR("Mode unsupported, phys_enc %d\n", i); - break; - } - - if (sde_enc->num_phys_encs > 1) { - DBG("ModeFix only checking 1 phys_enc"); - break; - } - } - } - - /* Call to populate mode->crtc* information required by framework */ - drm_mode_set_crtcinfo(adj_mode, 0); - - MSM_EVT(drm_enc->dev, adj_mode->flags, adj_mode->private_flags); - - return ret; -} - -static int sde_encoder_virt_atomic_check(struct drm_encoder *drm_enc, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) +static int sde_encoder_virt_atomic_check( + struct drm_encoder *drm_enc, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { - struct sde_encoder_virt *sde_enc = NULL; + struct sde_encoder_virt *sde_enc; + struct msm_drm_private *priv; + struct sde_kms *sde_kms; const struct drm_display_mode *mode; struct drm_display_mode *adj_mode; int i = 0; @@ -292,6 +257,8 @@ static int sde_encoder_virt_atomic_check(struct drm_encoder *drm_enc, } sde_enc = to_sde_encoder_virt(drm_enc); + priv = drm_enc->dev->dev_private; + sde_kms = to_sde_kms(priv->kms); mode = &crtc_state->mode; adj_mode = &crtc_state->adjusted_mode; MSM_EVT(drm_enc->dev, 0, 0); @@ -300,23 +267,25 @@ static int sde_encoder_virt_atomic_check(struct drm_encoder *drm_enc, for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; - if (phys && phys->ops.atomic_check) { + if (phys && phys->ops.atomic_check) ret = phys->ops.atomic_check(phys, crtc_state, conn_state); - if (ret) - DRM_ERROR("Mode unsupported, phys_enc %d.%d\n", - drm_enc->base.id, i); - break; - } else if (phys && phys->ops.mode_fixup) { - if (!phys->ops.mode_fixup(phys, mode, adj_mode)) { - DRM_ERROR("Mode unsupported, phys_enc %d.%d\n", - drm_enc->base.id, i); + else if (phys && phys->ops.mode_fixup) + if (!phys->ops.mode_fixup(phys, mode, adj_mode)) ret = -EINVAL; - } + + if (ret) { + SDE_ERROR("enc %d mode unsupported, phys %d\n", + drm_enc->base.id, i); break; } } + /* Reserve dynamic resources now. Indicating AtomicTest phase */ + if (!ret) + ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state, + conn_state, true); + /* Call to populate mode->crtc* information required by framework */ drm_mode_set_crtcinfo(adj_mode, 0); @@ -329,8 +298,12 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc, struct drm_display_mode *mode, struct drm_display_mode *adj_mode) { - struct sde_encoder_virt *sde_enc = NULL; - int i = 0; + struct sde_encoder_virt *sde_enc; + struct msm_drm_private *priv; + struct sde_kms *sde_kms; + struct list_head *connector_list; + struct drm_connector *conn = NULL, *conn_iter; + int i = 0, ret; DBG(""); @@ -340,8 +313,31 @@ static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc, } sde_enc = to_sde_encoder_virt(drm_enc); + priv = drm_enc->dev->dev_private; + sde_kms = to_sde_kms(priv->kms); + connector_list = &sde_kms->dev->mode_config.connector_list; + MSM_EVT(drm_enc->dev, 0, 0); + list_for_each_entry(conn_iter, connector_list, head) + if (conn_iter->encoder == drm_enc) + conn = conn_iter; + + if (!conn) { + SDE_ERROR("enc %d failed to find attached connector\n", + drm_enc->base.id); + return; + } + + /* Reserve dynamic resources now. Indicating non-AtomicTest phase */ + ret = sde_rm_reserve(&sde_kms->rm, drm_enc, drm_enc->crtc->state, + conn->state, false); + if (ret) { + SDE_ERROR("enc %d failed to reserve hw resources, ret %d\n", + drm_enc->base.id, ret); + return; + } + for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; @@ -390,6 +386,8 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc = NULL; + struct msm_drm_private *priv; + struct sde_kms *sde_kms; int i = 0; DBG(""); @@ -400,6 +398,9 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) } sde_enc = to_sde_encoder_virt(drm_enc); + priv = drm_enc->dev->dev_private; + sde_kms = to_sde_kms(priv->kms); + MSM_EVT(drm_enc->dev, 0, 0); for (i = 0; i < sde_enc->num_phys_encs; i++) { @@ -413,10 +414,11 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) DBG("clear phys enc master"); bs_set(sde_enc, 0); + + sde_rm_release(&sde_kms->rm, drm_enc); } static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = { - .mode_fixup = sde_encoder_virt_mode_fixup, .mode_set = sde_encoder_virt_mode_set, .disable = sde_encoder_virt_disable, .enable = sde_encoder_virt_enable, @@ -699,6 +701,8 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, WARN_ON(disp_info->num_of_h_tiles < 1); + sde_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles; + DBG("dsi_info->num_of_h_tiles %d", disp_info->num_of_h_tiles); for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) { @@ -707,7 +711,6 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right */ - const struct sde_hw_res_map *hw_res_map = NULL; u32 controller_id = disp_info->h_tile_instance[i]; if (disp_info->num_of_h_tiles > 1) { @@ -723,6 +726,7 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, i, controller_id, phys_params.split_role); if (intf_type == INTF_WB) { + phys_params.intf_idx = INTF_MAX; phys_params.wb_idx = sde_encoder_get_wb( sde_kms->catalog, intf_type, controller_id); @@ -733,6 +737,7 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, ret = -EINVAL; } } else { + phys_params.wb_idx = WB_MAX; phys_params.intf_idx = sde_encoder_get_intf( sde_kms->catalog, intf_type, controller_id); @@ -744,18 +749,6 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, } } - hw_res_map = sde_rm_get_res_map(sde_kms, phys_params.intf_idx, - phys_params.wb_idx); - if (IS_ERR_OR_NULL(hw_res_map)) { - DRM_ERROR("failed to get hw_res_map: %ld\n", - PTR_ERR(hw_res_map)); - ret = -EINVAL; - } else { - phys_params.pp_idx = hw_res_map->pp; - phys_params.ctl_idx = hw_res_map->ctl; - phys_params.cdm_idx = hw_res_map->cdm; - } - if (!ret) { if (intf_type == INTF_WB) ret = sde_encoder_virt_add_phys_enc_wb(sde_enc, diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index f67ae38660c2..e63c2b31f425 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -97,7 +97,8 @@ struct sde_encoder_phys_ops { struct drm_connector_state *conn_state); void (*destroy)(struct sde_encoder_phys *encoder); void (*get_hw_resources)(struct sde_encoder_phys *encoder, - struct sde_encoder_hw_resources *hw_res); + struct sde_encoder_hw_resources *hw_res, + struct drm_connector_state *conn_state); int (*control_vblank_irq)(struct sde_encoder_phys *enc, bool enable); int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc); void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc, @@ -130,6 +131,7 @@ enum sde_enc_enable_state { * @hw_ctl: Hardware interface to the ctl registers * @hw_cdm: Hardware interface to the cdm registers * @cdm_cfg: Chroma-down hardware configuration + * @needs_cdm: Whether the mode / fmt require a CDM to function * @sde_kms: Pointer to the sde_kms top level * @cached_mode: DRM mode cached at mode_set time, acted on in enable * @enabled: Whether the encoder has enabled and running a mode @@ -144,6 +146,7 @@ struct sde_encoder_phys { struct sde_encoder_virt_ops parent_ops; struct sde_hw_mdp *hw_mdptop; struct sde_hw_ctl *hw_ctl; + bool needs_cdm; struct sde_hw_cdm *hw_cdm; struct sde_hw_cdm_cfg cdm_cfg; struct sde_kms *sde_kms; @@ -251,9 +254,6 @@ struct sde_encoder_phys_wb { * @split_role: Role to play in a split-panel configuration * @intf_idx: Interface index this phys_enc will control * @wb_idx: Writeback index this phys_enc will control - * @pp_idx: Pingpong index this phys_enc will control - * @ctl_idx: Control path index this phys_enc will use - * @cdm_idx: Chromadown index this phys_enc will use */ struct sde_enc_phys_init_params { struct sde_kms *sde_kms; @@ -262,9 +262,6 @@ struct sde_enc_phys_init_params { enum sde_enc_split_role split_role; enum sde_intf intf_idx; enum sde_wb wb_idx; - enum sde_pingpong pp_idx; - enum sde_ctl ctl_idx; - enum sde_cdm cdm_idx; }; /** diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c index 700f780a4389..64c69dabd862 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -54,10 +54,46 @@ static void sde_encoder_phys_cmd_mode_set( { struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc); + struct sde_rm *rm = &phys_enc->sde_kms->rm; + struct sde_rm_hw_iter iter; + int i, instance; phys_enc->cached_mode = *adj_mode; - DBG("intf %d, caching mode:", cmd_enc->intf_idx); + SDE_DEBUG("intf %d, caching mode:\n", cmd_enc->intf_idx); drm_mode_debug_printmodeline(adj_mode); + + instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0; + + /* Retrieve previously allocated HW Resources. Shouldn't fail */ + sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL); + for (i = 0; i <= instance; i++) { + sde_rm_get_hw(rm, &iter); + if (i == instance) + phys_enc->hw_ctl = (struct sde_hw_ctl *) iter.hw; + } + + if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) { + SDE_ERROR("failed init ctl: %ld\n", PTR_ERR(phys_enc->hw_ctl)); + phys_enc->hw_ctl = NULL; + return; + } + + sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, + SDE_HW_BLK_PINGPONG); + for (i = 0; i <= instance; i++) { + sde_rm_get_hw(rm, &iter); + if (i == instance) + cmd_enc->hw_pp = (struct sde_hw_pingpong *) iter.hw; + } + + if (IS_ERR_OR_NULL(cmd_enc->hw_pp)) { + SDE_ERROR("failed init pingpong: %ld\n", + PTR_ERR(cmd_enc->hw_pp)); + cmd_enc->hw_pp = NULL; + phys_enc->hw_ctl = NULL; + return; + } + } static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx) @@ -293,11 +329,11 @@ static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc) u32 flush_mask; int ret = 0; - DBG("intf %d, pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx); - if (WARN_ON(phys_enc->enable_state == SDE_ENC_ENABLED)) return; + DBG("intf %d, pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx); + /* * Only master configures master/slave configuration, so no slave check * In solo configuration, solo encoder needs to program no-split @@ -358,33 +394,19 @@ static void sde_encoder_phys_cmd_destroy(struct sde_encoder_phys *phys_enc) struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc); - if (phys_enc->hw_ctl) - sde_rm_release_ctl_path(phys_enc->sde_kms, - phys_enc->hw_ctl->idx); - sde_hw_mdp_destroy(phys_enc->hw_mdptop); - sde_hw_pingpong_destroy(cmd_enc->hw_pp); kfree(cmd_enc); } static void sde_encoder_phys_cmd_get_hw_resources( struct sde_encoder_phys *phys_enc, - struct sde_encoder_hw_resources *hw_res) + struct sde_encoder_hw_resources *hw_res, + struct drm_connector_state *conn_state) { - const struct sde_hw_res_map *hw_res_map; struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc); - DBG("intf %d pp %d", cmd_enc->intf_idx, cmd_enc->hw_pp->idx); - - hw_res->intfs[cmd_enc->intf_idx] = INTF_MODE_CMD; - hw_res->pingpongs[cmd_enc->hw_pp->idx] = true; - hw_res_map = sde_rm_get_res_map(phys_enc->sde_kms, - cmd_enc->intf_idx, SDE_NONE); - if (IS_ERR_OR_NULL(hw_res_map)) { - DRM_ERROR("Failed to get hw_res_map: %ld", PTR_ERR(hw_res_map)); - return; - } - hw_res->ctls[hw_res_map->ctl] = true; + DBG("intf %d", cmd_enc->intf_idx); + hw_res->intfs[cmd_enc->intf_idx - INTF_0] = INTF_MODE_CMD; } static int sde_encoder_phys_cmd_wait_for_commit_done( @@ -450,9 +472,10 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init( { struct sde_encoder_phys *phys_enc = NULL; struct sde_encoder_phys_cmd *cmd_enc = NULL; + struct sde_hw_mdp *hw_mdp; int ret = 0; - DBG("intf %d, pp %d", p->intf_idx, p->pp_idx); + DBG("intf %d", p->intf_idx); cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL); if (!cmd_enc) { @@ -461,37 +484,16 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init( } phys_enc = &cmd_enc->base; - phys_enc->hw_mdptop = sde_hw_mdptop_init(MDP_TOP, p->sde_kms->mmio, - p->sde_kms->catalog); - if (IS_ERR_OR_NULL(phys_enc->hw_mdptop)) { - ret = PTR_ERR(phys_enc->hw_mdptop); - phys_enc->hw_mdptop = NULL; - DRM_ERROR("Failed init hw_top: %d\n", ret); - goto fail_mdptop; + hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm); + if (IS_ERR_OR_NULL(hw_mdp)) { + ret = PTR_ERR(hw_mdp); + SDE_ERROR("failed to get mdptop\n"); + goto fail_mdp_init; } + phys_enc->hw_mdptop = hw_mdp; cmd_enc->intf_idx = p->intf_idx; - phys_enc->hw_ctl = sde_rm_acquire_ctl_path(p->sde_kms, p->ctl_idx); - if (phys_enc->hw_ctl == ERR_PTR(-ENODEV)) - phys_enc->hw_ctl = sde_rm_get_ctl_path(p->sde_kms, p->ctl_idx); - - if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) { - ret = PTR_ERR(phys_enc->hw_ctl); - phys_enc->hw_ctl = NULL; - DRM_ERROR("Failed init hw_ctl: %d\n", ret); - goto fail_ctl; - } - - cmd_enc->hw_pp = sde_hw_pingpong_init(p->pp_idx, p->sde_kms->mmio, - p->sde_kms->catalog); - if (IS_ERR_OR_NULL(cmd_enc->hw_pp)) { - ret = PTR_ERR(cmd_enc->hw_pp); - cmd_enc->hw_pp = NULL; - DRM_ERROR("Failed init hw_pingpong: %d\n", ret); - goto fail_pingpong; - } - sde_encoder_phys_cmd_init_ops(&phys_enc->ops); phys_enc->parent = p->parent; phys_enc->parent_ops = p->parent_ops; @@ -505,16 +507,11 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init( init_waitqueue_head(&cmd_enc->pp_tx_done_wq); - DBG("Created sde_encoder_phys_cmd for intf %d pp %d", - cmd_enc->intf_idx, cmd_enc->hw_pp->idx); + DBG("Created sde_encoder_phys_cmd for intf %d", cmd_enc->intf_idx); return phys_enc; -fail_pingpong: - sde_rm_release_ctl_path(phys_enc->sde_kms, phys_enc->hw_ctl->idx); -fail_ctl: - sde_hw_mdp_destroy(phys_enc->hw_mdptop); -fail_mdptop: +fail_mdp_init: kfree(cmd_enc); fail: return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 3ebefc9fb057..cb9f2c11c689 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -379,11 +379,30 @@ static void sde_encoder_phys_vid_mode_set( struct drm_display_mode *adj_mode) { struct sde_encoder_phys_vid *vid_enc = - to_sde_encoder_phys_vid(phys_enc); + to_sde_encoder_phys_vid(phys_enc); + struct sde_rm *rm = &phys_enc->sde_kms->rm; + struct sde_rm_hw_iter iter; + int i, instance; phys_enc->cached_mode = *adj_mode; - DBG("intf %d, caching mode:", vid_enc->hw_intf->idx); + SDE_DEBUG("intf %d, caching mode:\n", vid_enc->hw_intf->idx); drm_mode_debug_printmodeline(adj_mode); + + instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0; + + /* Retrieve previously allocated HW Resources. Shouldn't fail */ + sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL); + for (i = 0; i <= instance; i++) { + sde_rm_get_hw(rm, &iter); + if (i == instance) + phys_enc->hw_ctl = (struct sde_hw_ctl *) iter.hw; + } + + if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) { + SDE_ERROR("failed init ctl: %ld\n", PTR_ERR(phys_enc->hw_ctl)); + phys_enc->hw_ctl = NULL; + return; + } } static int sde_encoder_phys_vid_control_vblank_irq( @@ -417,6 +436,12 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) struct sde_hw_ctl *ctl = phys_enc->hw_ctl; u32 flush_mask = 0; + if (!vid_enc->hw_intf || !phys_enc->hw_ctl) { + SDE_ERROR("invalid hw: intf %pK ctl %pK\n", vid_enc->hw_intf, + phys_enc->hw_ctl); + return; + } + DBG("intf %d", vid_enc->hw_intf->idx); if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing)) @@ -428,14 +453,14 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) sde_encoder_phys_vid_split_config(phys_enc, false); sde_encoder_phys_vid_setup_timing_engine(phys_enc); - sde_encoder_phys_vid_control_vblank_irq(phys_enc, true); ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx); - DBG("Update pending flush CTL_ID %d flush_mask %x, INTF %d", - ctl->idx, flush_mask, intf->idx); ctl->ops.update_pending_flush(ctl, flush_mask); + DBG("Update pending flush CTL_ID %d flush_mask %x, INTF %d", + ctl->idx, flush_mask, intf->idx); + /* ctl_flush & timing engine enable will be triggered by framework */ if (phys_enc->enable_state == SDE_ENC_DISABLED) phys_enc->enable_state = SDE_ENC_ENABLING; @@ -447,6 +472,12 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) struct sde_encoder_phys_vid *vid_enc = to_sde_encoder_phys_vid(phys_enc); + if (!vid_enc->hw_intf || !phys_enc->hw_ctl) { + SDE_ERROR("invalid hw: intf %pK ctl %pK\n", vid_enc->hw_intf, + phys_enc->hw_ctl); + return; + } + DBG("intf %d", vid_enc->hw_intf->idx); if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing)) @@ -481,44 +512,19 @@ static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc) to_sde_encoder_phys_vid(phys_enc); DBG("intf %d", vid_enc->hw_intf->idx); - sde_rm_release_ctl_path(phys_enc->sde_kms, phys_enc->hw_ctl->idx); - sde_hw_intf_destroy(vid_enc->hw_intf); - sde_hw_mdp_destroy(phys_enc->hw_mdptop); kfree(vid_enc); } static void sde_encoder_phys_vid_get_hw_resources( struct sde_encoder_phys *phys_enc, - struct sde_encoder_hw_resources *hw_res) + struct sde_encoder_hw_resources *hw_res, + struct drm_connector_state *conn_state) { - const struct sde_hw_res_map *hw_res_map; struct sde_encoder_phys_vid *vid_enc = to_sde_encoder_phys_vid(phys_enc); - DBG("Intf %d", vid_enc->hw_intf->idx); - - hw_res->intfs[vid_enc->hw_intf->idx] = INTF_MODE_VIDEO; - - /* - * defaults should not be in use, - * otherwise signal/return failure - */ - hw_res_map = sde_rm_get_res_map(phys_enc->sde_kms, - vid_enc->hw_intf->idx, SDE_NONE); - if (IS_ERR_OR_NULL(hw_res_map)) { - DRM_ERROR("Failed to get hw_res_map: %ld", PTR_ERR(hw_res_map)); - return; - } - - /* - * This is video mode panel so PINGPONG will be in by-pass mode - * only assign ctl path.For cmd panel check if pp_split is - * enabled, override default map - */ - /* - * phys_enc->hw_ctl->idx - */ - hw_res->ctls[hw_res_map->ctl] = true; + DBG("intf %d", vid_enc->hw_intf->idx); + hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO; } static int sde_encoder_phys_vid_wait_for_commit_done( @@ -613,6 +619,8 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init( { struct sde_encoder_phys *phys_enc = NULL; struct sde_encoder_phys_vid *vid_enc = NULL; + struct sde_rm_hw_iter iter; + struct sde_hw_mdp *hw_mdp; int ret = 0; DBG("intf %d", p->intf_idx); @@ -627,37 +635,31 @@ struct sde_encoder_phys *sde_encoder_phys_vid_init( phys_enc = &vid_enc->base; - phys_enc->hw_mdptop = sde_hw_mdptop_init(MDP_TOP, p->sde_kms->mmio, - p->sde_kms->catalog); - if (IS_ERR_OR_NULL(phys_enc->hw_mdptop)) { - ret = PTR_ERR(phys_enc->hw_mdptop); - phys_enc->hw_mdptop = NULL; - DRM_ERROR("Failed init hw_top: %d\n", ret); - goto fail; - } - - vid_enc->hw_intf = sde_hw_intf_init(p->intf_idx, p->sde_kms->mmio, - p->sde_kms->catalog); - if (IS_ERR_OR_NULL(vid_enc->hw_intf)) { - ret = PTR_ERR(vid_enc->hw_intf); - vid_enc->hw_intf = NULL; - DRM_ERROR("Failed init hw_intf: %d\n", ret); + hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm); + if (IS_ERR_OR_NULL(hw_mdp)) { + ret = PTR_ERR(hw_mdp); + SDE_ERROR("failed to get mdptop\n"); goto fail; } + phys_enc->hw_mdptop = hw_mdp; - /* - * For cmd/vid switching, same ctl is used. VID enc and CMD enc - * will both try to get it, depending on which is first need to call - * acquire (initial), and other one needs to call get (add ref count) + /** + * hw_intf resource permanently assigned to this encoder + * Other resources allocated at atomic commit time by use case */ - phys_enc->hw_ctl = sde_rm_acquire_ctl_path(p->sde_kms, p->ctl_idx); - if (phys_enc->hw_ctl == ERR_PTR(-ENODEV)) - phys_enc->hw_ctl = sde_rm_get_ctl_path(p->sde_kms, p->ctl_idx); + sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_INTF); + while (sde_rm_get_hw(&p->sde_kms->rm, &iter)) { + struct sde_hw_intf *hw_intf = (struct sde_hw_intf *)iter.hw; + + if (hw_intf->idx == p->intf_idx) { + vid_enc->hw_intf = hw_intf; + break; + } + } - if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) { - ret = PTR_ERR(phys_enc->hw_ctl); - phys_enc->hw_ctl = NULL; - DRM_ERROR("Failed init hw_ctl: %d\n", ret); + if (!vid_enc->hw_intf) { + ret = -EINVAL; + DRM_ERROR("failed to get hw_intf\n"); goto fail; } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c index df2a7e59a7c9..f3a6e5941294 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c @@ -225,7 +225,7 @@ static void sde_encoder_phys_wb_setup_cdp(struct sde_encoder_phys *phys_enc) intf_cfg->intf = SDE_NONE; intf_cfg->wb = hw_wb->idx; - if (phys_enc->hw_ctl->ops.setup_intf_cfg) + if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, intf_cfg); } @@ -297,6 +297,8 @@ static int sde_encoder_phys_wb_atomic_check( return -EINVAL; } + phys_enc->needs_cdm = SDE_FORMAT_IS_YUV(fmt); + if (wb_roi.w && wb_roi.h) { if (wb_roi.w != mode->hdisplay) { SDE_ERROR("invalid roi w=%d, mode w=%d\n", wb_roi.w, @@ -356,6 +358,11 @@ static void sde_encoder_phys_wb_flush(struct sde_encoder_phys *phys_enc) SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0); + if (!hw_ctl) { + SDE_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0); + return; + } + if (hw_ctl->ops.get_bitmask_wb) hw_ctl->ops.get_bitmask_wb(hw_ctl, &flush_mask, hw_wb->idx); @@ -537,13 +544,48 @@ static void sde_encoder_phys_wb_mode_set( struct drm_display_mode *adj_mode) { struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); + struct sde_rm *rm = &phys_enc->sde_kms->rm; struct sde_hw_wb *hw_wb = wb_enc->hw_wb; + struct sde_rm_hw_iter iter; + int i, instance; phys_enc->cached_mode = *adj_mode; + instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0; SDE_DEBUG("[mode_set_cache:%d,%d,\"%s\",%d,%d]\n", hw_wb->idx - WB_0, mode->base.id, mode->name, mode->hdisplay, mode->vdisplay); + + /* Retrieve previously allocated HW Resources. CTL shouldn't fail */ + sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL); + for (i = 0; i <= instance; i++) { + sde_rm_get_hw(rm, &iter); + if (i == instance) + phys_enc->hw_ctl = (struct sde_hw_ctl *) iter.hw; + } + + if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) { + SDE_ERROR("failed init ctl: %ld\n", PTR_ERR(phys_enc->hw_ctl)); + phys_enc->hw_ctl = NULL; + return; + } + + /* CDM is optional */ + sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM); + for (i = 0; i <= instance; i++) { + sde_rm_get_hw(rm, &iter); + if (i == instance) + phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw; + } + + if (IS_ERR_OR_NULL(phys_enc->hw_cdm)) { + if (phys_enc->needs_cdm) { + SDE_ERROR("CDM required but not allocated: %ld\n", + PTR_ERR(phys_enc->hw_cdm)); + phys_enc->hw_ctl = NULL; + } + phys_enc->hw_cdm = NULL; + } } /** @@ -740,6 +782,11 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc) sde_encoder_phys_wb_wait_for_commit_done(phys_enc); } + if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) { + SDE_DEBUG_DRIVER("[cdm_disable]\n"); + phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm); + } + phys_enc->enable_state = SDE_ENC_DISABLED; } @@ -750,35 +797,17 @@ static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc) */ static void sde_encoder_phys_wb_get_hw_resources( struct sde_encoder_phys *phys_enc, - struct sde_encoder_hw_resources *hw_res) + struct sde_encoder_hw_resources *hw_res, + struct drm_connector_state *conn_state) { struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc); struct sde_hw_wb *hw_wb = wb_enc->hw_wb; - const struct sde_hw_res_map *hw_res_map; SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0); - - hw_res->wbs[hw_wb->idx] = INTF_MODE_WB_LINE; - - /* - * Validate if we want to use the default map - * defaults should not be in use, - * otherwise signal/return failure - */ - hw_res_map = sde_rm_get_res_map(phys_enc->sde_kms, - SDE_NONE, hw_wb->idx); - if (IS_ERR_OR_NULL(hw_res_map)) { - SDE_ERROR("failed to get hw_res_map: %ld\n", - PTR_ERR(hw_res_map)); - return; - } - - /* - * cached ctl_idx at init time, shouldn't we use that? - */ - hw_res->ctls[hw_res_map->ctl] = true; - + hw_res->wbs[hw_wb->idx - WB_0] = INTF_MODE_WB_LINE; + hw_res->needs_cdm = phys_enc->needs_cdm; } + /** * sde_encoder_phys_wb_needs_ctl_start - Whether encoder needs ctl_start * @phys_enc: Pointer to physical encoder @@ -871,17 +900,6 @@ static void sde_encoder_phys_wb_destroy(struct sde_encoder_phys *phys_enc) sde_encoder_phys_wb_destroy_debugfs(phys_enc); - if (phys_enc->hw_ctl) - sde_rm_release_ctl_path(phys_enc->sde_kms, - phys_enc->hw_ctl->idx); - if (phys_enc->hw_cdm) - sde_rm_release_cdm_path(phys_enc->sde_kms, - phys_enc->hw_cdm->idx); - if (hw_wb) - sde_hw_wb_destroy(hw_wb); - if (phys_enc->hw_mdptop) - sde_hw_mdp_destroy(phys_enc->hw_mdptop); - kfree(wb_enc); } @@ -942,8 +960,7 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init( p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE]; } - hw_mdp = sde_hw_mdptop_init(MDP_TOP, p->sde_kms->mmio, - p->sde_kms->catalog); + hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm); if (IS_ERR_OR_NULL(hw_mdp)) { ret = PTR_ERR(hw_mdp); SDE_ERROR("failed to init hw_top: %d\n", ret); @@ -951,55 +968,32 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init( } phys_enc->hw_mdptop = hw_mdp; + /** + * hw_wb resource permanently assigned to this encoder + * Other resources allocated at atomic commit time by use case + */ if (p->wb_idx != SDE_NONE) { - struct sde_hw_wb *hw_wb; - - hw_wb = sde_hw_wb_init(p->wb_idx, p->sde_kms->mmio, - p->sde_kms->catalog, phys_enc->hw_mdptop); - if (IS_ERR_OR_NULL(hw_wb)) { - ret = PTR_ERR(hw_wb); - SDE_ERROR("failed to init hw_wb%d: %d\n", - p->wb_idx - WB_0, ret); - goto fail_wb_init; - } - wb_enc->hw_wb = hw_wb; - } else { - ret = -EINVAL; - SDE_ERROR("invalid wb_idx\n"); - goto fail_wb_check; - } + struct sde_rm_hw_iter iter; - if (p->cdm_idx != SDE_NONE) { - struct sde_hw_cdm *hw_cdm; - - SDE_DEBUG("Acquiring CDM %d\n", p->cdm_idx - CDM_0); - hw_cdm = sde_rm_acquire_cdm_path(p->sde_kms, p->cdm_idx, - phys_enc->hw_mdptop); - if (IS_ERR_OR_NULL(hw_cdm)) { - ret = PTR_ERR(hw_cdm); - SDE_ERROR("failed to init hw_cdm%d: %d\n", - p->cdm_idx - CDM_0, ret); - goto fail_cdm_init; - } - phys_enc->hw_cdm = hw_cdm; - } + sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_WB); + while (sde_rm_get_hw(&p->sde_kms->rm, &iter)) { + struct sde_hw_wb *hw_wb = (struct sde_hw_wb *)iter.hw; - if (p->ctl_idx != SDE_NONE) { - struct sde_hw_ctl *hw_ctl; + if (hw_wb->idx == p->wb_idx) { + wb_enc->hw_wb = hw_wb; + break; + } + } - SDE_DEBUG("Acquiring CTL %d\n", p->ctl_idx - CTL_0); - hw_ctl = sde_rm_acquire_ctl_path(p->sde_kms, p->ctl_idx); - if (IS_ERR_OR_NULL(hw_ctl)) { - ret = PTR_ERR(hw_ctl); - SDE_ERROR("failed to init hw_ctl%d: %d\n", - p->ctl_idx - CTL_0, ret); - goto fail_ctl_init; + if (!wb_enc->hw_wb) { + ret = -EINVAL; + SDE_ERROR("failed to init hw_wb%d\n", p->wb_idx - WB_0); + goto fail_wb_init; } - phys_enc->hw_ctl = hw_ctl; } else { ret = -EINVAL; - SDE_ERROR("invalid ctl_idx\n"); - goto fail_ctl_check; + SDE_ERROR("invalid wb_idx\n"); + goto fail_wb_check; } sde_encoder_phys_wb_init_ops(&phys_enc->ops); @@ -1021,15 +1015,8 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init( return phys_enc; fail_debugfs_init: - sde_rm_release_ctl_path(p->sde_kms, p->ctl_idx); -fail_ctl_init: -fail_ctl_check: - sde_rm_release_cdm_path(p->sde_kms, p->cdm_idx); -fail_cdm_init: - sde_hw_wb_destroy(wb_enc->hw_wb); fail_wb_init: fail_wb_check: - sde_hw_mdp_destroy(phys_enc->hw_mdptop); fail_mdp_init: kfree(wb_enc); fail_alloc: diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index f2a54f762b0b..800a5cb2e723 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -25,18 +25,6 @@ static const char * const iommu_ports[] = { "mdp_0", }; -static const struct sde_hw_res_map res_table[INTF_MAX + WB_MAX] = { - {SDE_NONE, SDE_NONE, SDE_NONE, SDE_NONE, SDE_NONE, SDE_NONE}, - {INTF_0, SDE_NONE, SDE_NONE, SDE_NONE, SDE_NONE, SDE_NONE}, - {INTF_1, SDE_NONE, LM_0, PINGPONG_0, CTL_0, SDE_NONE}, - {INTF_2, SDE_NONE, LM_1, PINGPONG_1, CTL_1, SDE_NONE }, - {INTF_3, SDE_NONE, SDE_NONE, SDE_NONE, CTL_2, SDE_NONE}, - {SDE_NONE, WB_0, LM_3, SDE_NONE, CTL_3, SDE_NONE}, - {SDE_NONE, WB_1, LM_4, SDE_NONE, CTL_4, SDE_NONE}, - {SDE_NONE, WB_2, LM_2, SDE_NONE, CTL_2, CDM_0}, -}; - - #define DEFAULT_MDP_SRC_CLK 300000000 /** @@ -299,21 +287,15 @@ static int modeset_init(struct sde_kms *sde_kms) goto fail; } - /* - * Enumerate displays supported - */ - sde_encoders_init(dev); - - /* Create one CRTC per display */ - for (i = 0; i < priv->num_encoders; i++) { + /* Create one CRTC per layer mixer */ + for (i = 0; i < catalog->mixer_count; i++) { /* * Each CRTC receives a private plane. We start * with first RGB, and then DMA and then VIG. */ struct drm_crtc *crtc; - crtc = sde_crtc_init(dev, priv->encoders[i], - primary_planes[i], i); + crtc = sde_crtc_init(dev, primary_planes[i], i); if (IS_ERR(crtc)) { ret = PTR_ERR(crtc); goto fail; @@ -321,10 +303,10 @@ static int modeset_init(struct sde_kms *sde_kms) priv->crtcs[priv->num_crtcs++] = crtc; } - /* - * Iterate through the list of encoders and - * set the possible CRTCs - */ + /* Enumerate displays supported */ + sde_encoders_init(dev); + + /* All CRTCs are compatible with all encoders */ for (i = 0; i < priv->num_encoders; i++) priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1; @@ -363,6 +345,7 @@ static void sde_destroy(struct msm_kms *kms) sde_debugfs_destroy(sde_kms); sde_irq_domain_fini(sde_kms); sde_hw_intr_destroy(sde_kms->hw_intr); + sde_rm_destroy(&sde_kms->rm); kfree(sde_kms); } @@ -691,7 +674,13 @@ struct msm_kms *sde_kms_init(struct drm_device *dev) */ clk_set_rate(sde_kms->src_clk, DEFAULT_MDP_SRC_CLK); - sde_kms->hw_res.res_table = res_table; + + sde_enable(sde_kms); + ret = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio, + sde_kms->dev); + sde_disable(sde_kms); + if (ret) + goto fail; /* * Now we need to read the HW catalog and initialize resources such as @@ -730,8 +719,7 @@ struct msm_kms *sde_kms_init(struct drm_device *dev) */ dev->mode_config.allow_fb_modifiers = true; - sde_kms->hw_intr = sde_rm_acquire_intr(sde_kms); - + sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog); if (IS_ERR_OR_NULL(sde_kms->hw_intr)) goto fail; diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index e0bb7a8cfbcf..f115d4345723 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -24,6 +24,7 @@ #include "sde_hw_wb.h" #include "sde_hw_top.h" #include "sde_connector.h" +#include "sde_rm.h" /** * SDE_DEBUG - macro for kms/plane/crtc/encoder/connector logs @@ -51,47 +52,6 @@ #define SDE_ERROR(fmt, ...) pr_err(fmt, ##__VA_ARGS__) -/** - * enum sde_rm_topology_name - HW resource use case in use by connector - * @SDE_RM_TOPOLOGY_UNKNOWN: No topology in use currently - * @SDE_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB - * @SDE_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB - * @SDE_RM_TOPOLOGY_PPSPLIT: 1 LM, 2 PPs, 2 INTF/WB - * @SDE_RM_TOPOLOGY_DUALPIPEMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB - */ -enum sde_rm_topology_name { - SDE_RM_TOPOLOGY_UNKNOWN = 0, - SDE_RM_TOPOLOGY_SINGLEPIPE, - SDE_RM_TOPOLOGY_DUALPIPE, - SDE_RM_TOPOLOGY_PPSPLIT, - SDE_RM_TOPOLOGY_DUALPIPEMERGE, -}; - -/** - * enum sde_rm_topology_control - HW resource use case in use by connector - * @SDE_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful - * test, reserve the resources for this display. - * Normal behavior would not impact the reservation - * list during the AtomicTest phase. - * @SDE_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing, - * release any reservation held by this display. - * Normal behavior would not impact the - * reservation list during the AtomicTest phase. - * @SDE_RM_TOPCTL_DSPP: Require layer mixers with DSPP capabilities - * @SDE_RM_TOPCTL_FORCE_TILING: Require kernel to split across multiple layer - * mixers, despite width fitting within capability - * of a single layer mixer. - * @SDE_RM_TOPCTL_PPSPLIT: Require kernel to use pingpong split pipe - * configuration instead of dual pipe. - */ -enum sde_rm_topology_control { - SDE_RM_TOPCTL_RESERVE_LOCK, - SDE_RM_TOPCTL_RESERVE_CLEAR, - SDE_RM_TOPCTL_DSPP, - SDE_RM_TOPCTL_FORCE_TILING, - SDE_RM_TOPCTL_PPSPLIT, -}; - /* * struct sde_irq_callback - IRQ callback handlers * @func: intr handler @@ -115,39 +75,17 @@ struct sde_irq { }; /** - * struct sde_hw_res_map : Default resource table identifying default - * hw resource map. Primarily used for forcing DSI to use CTL_0/1 - * and PingPong 0/1, if the field is set to SDE_NONE means any HW - * instance for that type is allowed as long as it is unused. + * Encoder functions and data types + * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused + * @wbs: Writebacks this encoder is using, INTF_MODE_NONE if unused + * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs + * @display_num_of_h_tiles: */ -struct sde_hw_res_map { - enum sde_intf intf; - enum sde_wb wb; - enum sde_lm lm; - enum sde_pingpong pp; - enum sde_ctl ctl; - enum sde_cdm cdm; -}; - -/* struct sde_hw_resource_manager : Resource manager maintains the current - * default platform config and manages shared - * hw resources ex:ctl_path hw driver context - * is needed by CRTCs/PLANEs/ENCODERs - * @ctl : table of control path hw driver contexts allocated - * @cdm : table of chroma down path hw driver contexts allocated - * @mixer : list of mixer hw drivers contexts allocated - * @intr : pointer to hw interrupt context - * @res_table : pointer to default hw_res table for this platform - * @feature_map :BIT map for default enabled features ex:specifies if PP_SPLIT - * is enabled/disabled by default for this platform - */ -struct sde_hw_resource_manager { - struct sde_hw_ctl *ctl[CTL_MAX]; - struct sde_hw_cdm *cdm[CDM_MAX]; - struct sde_hw_mixer *mixer[LM_MAX]; - struct sde_hw_intr *intr; - const struct sde_hw_res_map *res_table; - bool feature_map; +struct sde_encoder_hw_resources { + enum sde_intf_mode intfs[INTF_MAX]; + enum sde_intf_mode wbs[WB_MAX]; + bool needs_cdm; + u32 display_num_of_h_tiles; }; struct sde_kms { @@ -186,7 +124,8 @@ struct sde_kms { struct sde_hw_intr *hw_intr; struct sde_irq irq_obj; - struct sde_hw_resource_manager hw_res; + + struct sde_rm rm; }; struct vsync_info { @@ -439,46 +378,6 @@ void sde_kms_info_append_format(struct sde_kms_info *info, void sde_kms_info_stop(struct sde_kms_info *info); /** - * HW resource manager functions - * @sde_rm_acquire_ctl_path : Allocates control path - * @sde_rm_get_ctl_path : returns control path driver context for already - * acquired ctl path - * @sde_rm_release_ctl_path : Frees control path driver context - * @sde_rm_acquire_mixer : Allocates mixer hw driver context - * @sde_rm_get_mixer : returns mixer context for already - * acquired mixer - * @sde_rm_release_mixer : Frees mixer hw driver context - * @sde_rm_acquire_intr : Allocate hw intr context - * @sde_rm_get_intr : Returns already acquired intr context - * @sde_rm_get_hw_res_map : Returns map for the passed INTF - */ -struct sde_hw_ctl *sde_rm_acquire_ctl_path(struct sde_kms *sde_kms, - enum sde_ctl idx); -struct sde_hw_ctl *sde_rm_get_ctl_path(struct sde_kms *sde_kms, - enum sde_ctl idx); -void sde_rm_release_ctl_path(struct sde_kms *sde_kms, - enum sde_ctl idx); - -struct sde_hw_cdm *sde_rm_acquire_cdm_path(struct sde_kms *sde_kms, - enum sde_cdm idx, struct sde_hw_mdp *hw_mdp); -struct sde_hw_cdm *sde_rm_get_cdm_path(struct sde_kms *sde_kms, - enum sde_cdm idx); -void sde_rm_release_cdm_path(struct sde_kms *sde_kms, - enum sde_cdm idx); - -struct sde_hw_mixer *sde_rm_acquire_mixer(struct sde_kms *sde_kms, - enum sde_lm idx); -struct sde_hw_mixer *sde_rm_get_mixer(struct sde_kms *sde_kms, - enum sde_lm idx); -void sde_rm_release_mixer(struct sde_kms *sde_kms, - enum sde_lm idx); -struct sde_hw_intr *sde_rm_acquire_intr(struct sde_kms *sde_kms); -struct sde_hw_intr *sde_rm_get_intr(struct sde_kms *sde_kms); - -const struct sde_hw_res_map *sde_rm_get_res_map(struct sde_kms *sde_kms, - enum sde_intf intf, enum sde_wb wb); - -/** * IRQ functions */ int sde_irq_domain_init(struct sde_kms *sde_kms); @@ -625,13 +524,11 @@ void sde_crtc_prepare_fence(struct drm_crtc *crtc); /** * sde_crtc_init - create a new crtc object * @dev: sde device - * @encoder: encoder attached to this crtc * @plane: base plane * @vblank_id: Id for reporting vblank. Id in range from 0..dev->num_crtcs. * @Return: new crtc object or error */ struct drm_crtc *sde_crtc_init(struct drm_device *dev, - struct drm_encoder *encoder, struct drm_plane *plane, int vblank_id); @@ -642,23 +539,14 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, void sde_crtc_complete_commit(struct drm_crtc *crtc); /** - * Encoder functions and data types - */ -struct sde_encoder_hw_resources { - enum sde_intf_mode intfs[INTF_MAX]; - enum sde_intf_mode wbs[WB_MAX]; - bool pingpongs[PINGPONG_MAX]; - bool ctls[CTL_MAX]; - bool pingpongsplit; -}; - -/** * sde_encoder_get_hw_resources - Populate table of required hardware resources * @encoder: encoder pointer * @hw_res: resource table to populate with encoder required resources + * @conn_state: report hw reqs based on this proposed connector state */ void sde_encoder_get_hw_resources(struct drm_encoder *encoder, - struct sde_encoder_hw_resources *hw_res); + struct sde_encoder_hw_resources *hw_res, + struct drm_connector_state *conn_state); /** * sde_encoder_needs_ctl_start - Get whether encoder type requires ctl_start diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c index c4c9f7601053..6e29c09deb40 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms_utils.c +++ b/drivers/gpu/drm/msm/sde/sde_kms_utils.c @@ -10,221 +10,9 @@ * GNU General Public License for more details. */ -#include "sde_kms.h" -#include "sde_hw_lm.h" -#include "sde_hw_ctl.h" -#include "sde_hw_cdm.h" - -struct sde_hw_intr *sde_rm_acquire_intr(struct sde_kms *sde_kms) -{ - struct sde_hw_intr *hw_intr; - - if (!sde_kms) { - DRM_ERROR("Invalid KMS Driver"); - return ERR_PTR(-EINVAL); - } - - if (sde_kms->hw_res.intr) { - DRM_ERROR("intr already in use "); - return ERR_PTR(-ENODEV); - } - - sde_enable(sde_kms); - hw_intr = sde_hw_intr_init(sde_kms->mmio, - sde_kms->catalog); - sde_disable(sde_kms); - - if (!IS_ERR_OR_NULL(hw_intr)) - sde_kms->hw_res.intr = hw_intr; - - return hw_intr; -} - -struct sde_hw_intr *sde_rm_get_intr(struct sde_kms *sde_kms) -{ - if (!sde_kms) { - DRM_ERROR("Invalid KMS Driver"); - return ERR_PTR(-EINVAL); - } - - return sde_kms->hw_res.intr; -} - -struct sde_hw_ctl *sde_rm_acquire_ctl_path(struct sde_kms *sde_kms, - enum sde_ctl idx) -{ - struct sde_hw_ctl *hw_ctl; - - if (!sde_kms) { - DRM_ERROR("Invalid KMS driver"); - return ERR_PTR(-EINVAL); - } - - if ((idx == SDE_NONE) || (idx > sde_kms->catalog->ctl_count)) { - DRM_ERROR("Invalid Ctl Path Idx %d", idx); - return ERR_PTR(-EINVAL); - } +#define pr_fmt(fmt) "sde-kms_utils:[%s] " fmt, __func__ - if (sde_kms->hw_res.ctl[idx]) { - DRM_ERROR("CTL path %d already in use ", idx); - return ERR_PTR(-ENODEV); - } - - sde_enable(sde_kms); - hw_ctl = sde_hw_ctl_init(idx, sde_kms->mmio, sde_kms->catalog); - sde_disable(sde_kms); - - if (!IS_ERR_OR_NULL(hw_ctl)) - sde_kms->hw_res.ctl[idx] = hw_ctl; - - return hw_ctl; -} - -struct sde_hw_ctl *sde_rm_get_ctl_path(struct sde_kms *sde_kms, - enum sde_ctl idx) -{ - if (!sde_kms) { - DRM_ERROR("Invalid KMS Driver"); - return ERR_PTR(-EINVAL); - } - if ((idx == SDE_NONE) || (idx > sde_kms->catalog->ctl_count)) { - DRM_ERROR("Invalid Ctl path Idx %d", idx); - return ERR_PTR(-EINVAL); - } - - return sde_kms->hw_res.ctl[idx]; -} - -void sde_rm_release_ctl_path(struct sde_kms *sde_kms, enum sde_ctl idx) -{ - if (!sde_kms) { - DRM_ERROR("Invalid pointer\n"); - return; - } - if ((idx == SDE_NONE) || (idx > sde_kms->catalog->ctl_count)) { - DRM_ERROR("Invalid Ctl path Idx %d", idx); - return; - } -} - -struct sde_hw_cdm *sde_rm_acquire_cdm_path(struct sde_kms *sde_kms, - enum sde_cdm idx, struct sde_hw_mdp *hw_mdp) -{ - struct sde_hw_cdm *hw_cdm; - - if (!sde_kms) { - DRM_ERROR("Invalid KMS driver"); - return ERR_PTR(-EINVAL); - } else if ((idx == SDE_NONE) || (idx > sde_kms->catalog->cdm_count)) { - DRM_ERROR("Invalid Cdm Path Idx %d", idx); - return ERR_PTR(-EINVAL); - } else if (sde_kms->hw_res.cdm[idx]) { - DRM_ERROR("Cdm path %d already in use ", idx); - return ERR_PTR(-ENODEV); - } - - sde_enable(sde_kms); - hw_cdm = sde_hw_cdm_init(idx, sde_kms->mmio, sde_kms->catalog, hw_mdp); - sde_disable(sde_kms); - - if (!IS_ERR_OR_NULL(hw_cdm)) - sde_kms->hw_res.cdm[idx] = hw_cdm; - - return hw_cdm; -} - -struct sde_hw_cdm *sde_rm_get_cdm_path(struct sde_kms *sde_kms, - enum sde_cdm idx) -{ - if (!sde_kms) { - DRM_ERROR("Invalid KMS Driver"); - return ERR_PTR(-EINVAL); - } else if ((idx == SDE_NONE) || (idx > sde_kms->catalog->cdm_count)) { - DRM_ERROR("Invalid Cdm path Idx %d", idx); - return ERR_PTR(-EINVAL); - } - - return sde_kms->hw_res.cdm[idx]; -} - -void sde_rm_release_cdm_path(struct sde_kms *sde_kms, enum sde_cdm idx) -{ -} - -struct sde_hw_mixer *sde_rm_acquire_mixer(struct sde_kms *sde_kms, - enum sde_lm idx) -{ - struct sde_hw_mixer *mixer; - - if (!sde_kms) { - DRM_ERROR("Invalid KMS Driver"); - return ERR_PTR(-EINVAL); - } - - if ((idx == SDE_NONE) || (idx > sde_kms->catalog->mixer_count)) { - DBG("Invalid mixer id %d", idx); - return ERR_PTR(-EINVAL); - } - - if (sde_kms->hw_res.mixer[idx]) { - DRM_ERROR("mixer %d already in use ", idx); - return ERR_PTR(-ENODEV); - } - - sde_enable(sde_kms); - mixer = sde_hw_lm_init(idx, sde_kms->mmio, sde_kms->catalog); - sde_disable(sde_kms); - - if (!IS_ERR_OR_NULL(mixer)) - sde_kms->hw_res.mixer[idx] = mixer; - - return mixer; -} - -struct sde_hw_mixer *sde_rm_get_mixer(struct sde_kms *sde_kms, - enum sde_lm idx) -{ - if (!sde_kms) { - DRM_ERROR("Invalid KMS Driver"); - return ERR_PTR(-EINVAL); - } - - if ((idx == SDE_NONE) || (idx > sde_kms->catalog->mixer_count)) { - DRM_ERROR("Invalid mixer id %d", idx); - return ERR_PTR(-EINVAL); - } - - return sde_kms->hw_res.mixer[idx]; -} - -const struct sde_hw_res_map *sde_rm_get_res_map(struct sde_kms *sde_kms, - enum sde_intf intf, enum sde_wb wb) -{ - int i; - - if (!sde_kms) { - DRM_ERROR("Invalid KMS Driver"); - return ERR_PTR(-EINVAL); - } - - for (i = 0; i < (INTF_MAX + WB_MAX); i++) { - if ((sde_kms->hw_res.res_table[i].intf == intf) && - (sde_kms->hw_res.res_table[i].wb == wb)) { - DBG( - " Platform Resource map for INTF %d, WB %d -> lm %d, pp %d ctl %d cdm %d", - sde_kms->hw_res.res_table[i].intf, - sde_kms->hw_res.res_table[i].wb, - sde_kms->hw_res.res_table[i].lm, - sde_kms->hw_res.res_table[i].pp, - sde_kms->hw_res.res_table[i].ctl, - sde_kms->hw_res.res_table[i].cdm); - - return &(sde_kms->hw_res.res_table[i]); - } - } - - return ERR_PTR(-EINVAL); -} +#include "sde_kms.h" void sde_kms_info_reset(struct sde_kms_info *info) { @@ -363,4 +151,3 @@ void sde_kms_info_stop(struct sde_kms_info *info) info->len = info->staged_len + len; } } - diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c index 3e9f85c8cd9b..56ade70774ec 100644 --- a/drivers/gpu/drm/msm/sde/sde_rm.c +++ b/drivers/gpu/drm/msm/sde/sde_rm.c @@ -117,7 +117,7 @@ struct sde_hw_mdp *sde_rm_get_mdp(struct sde_rm *rm) return rm->hw_mdp; } -void sde_rm_init_iter( +void sde_rm_init_hw_iter( struct sde_rm_hw_iter *iter, uint32_t enc_id, enum sde_hw_blk_type type) diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h index ebc8683c4731..5f1726932a84 100644 --- a/drivers/gpu/drm/msm/sde/sde_rm.h +++ b/drivers/gpu/drm/msm/sde/sde_rm.h @@ -190,4 +190,11 @@ bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *iter); */ int sde_rm_check_property_topctl(uint64_t val); +/** + * sde_rm_check_property_topctl - validate property bitmask before it is set + * @val: user's proposed topology control bitmask + * @Return: 0 on success or error + */ +int sde_rm_check_property_topctl(uint64_t val); + #endif /* __sde_kms_rm_H__ */ |
