diff options
| author | Guchun Chen <guchunc@codeaurora.org> | 2017-04-20 19:32:09 +0800 |
|---|---|---|
| committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2017-04-27 20:15:29 -0700 |
| commit | 45380e224102c5caec2f31e545015261a6b75808 (patch) | |
| tree | 3d261074fd08d4c257ba830099d0e278a11b5d1d /drivers/gpu | |
| parent | 74966209e53007e80a1c25ef3af9c87213cbb571 (diff) | |
msm: sde: add early display handoff feature.
When enabling animation/state splash in LK, drm/kms driver needs
to involve handoff code to support smooth transition. In display
probe it will do following items:
1. Check the status in LK for early splash.
2. Handle SMMU mapping issue to avoid SMMU fault problem.
3. Reserved memory, and bypass hardware reset to avoid glitch.
And after user's space is up, when first commit comes, it will call
sde_splash_clean_up to:
1. tell LK to stop splash and to exit.
2. set early_domain_map_attr to 1 to enable stage 1 translation in
iommu driver.
3. free the memory to system.
Change-Id: If425f044e2c40301eed57375a33a26ec1970abd5
Signed-off-by: Guchun Chen <guchunc@codeaurora.org>
Diffstat (limited to 'drivers/gpu')
| -rw-r--r-- | drivers/gpu/drm/msm/Makefile | 1 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c | 27 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h | 8 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c | 5 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/msm_mmu.h | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/msm_smmu.c | 23 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_kms.c | 74 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_kms.h | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_splash.c | 418 | ||||
| -rw-r--r-- | drivers/gpu/drm/msm/sde/sde_splash.h | 89 |
10 files changed, 647 insertions, 4 deletions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index d7e56f57c78e..dabd46e7103a 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -47,6 +47,7 @@ msm_drm-y := \ sde/sde_backlight.o \ sde/sde_color_processing.o \ sde/sde_vbif.o \ + sde/sde_splash.o \ sde_dbg_evtlog.o \ sde_io_util.o \ dba_bridge.o \ diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c index 347b78886b24..93bda58dc18f 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c @@ -288,6 +288,13 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi) uint32_t hpd_ctrl; int i, ret; unsigned long flags; + struct drm_connector *connector; + struct msm_drm_private *priv; + struct sde_kms *sde_kms; + + connector = hdmi->connector; + priv = connector->dev->dev_private; + sde_kms = to_sde_kms(priv->kms); for (i = 0; i < config->hpd_reg_cnt; i++) { ret = regulator_enable(hdmi->hpd_regs[i]); @@ -327,9 +334,11 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi) } } - sde_hdmi_set_mode(hdmi, false); - _sde_hdmi_phy_reset(hdmi); - sde_hdmi_set_mode(hdmi, true); + if (!sde_kms->splash_info.handoff) { + sde_hdmi_set_mode(hdmi, false); + _sde_hdmi_phy_reset(hdmi); + sde_hdmi_set_mode(hdmi, true); + } hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b); @@ -1258,6 +1267,7 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc) struct msm_drm_private *priv = NULL; struct hdmi *hdmi; struct platform_device *pdev; + struct sde_kms *sde_kms; DBG(""); if (!display || !display->drm_dev || !enc) { @@ -1309,6 +1319,17 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc) enc->bridge = hdmi->bridge; priv->bridges[priv->num_bridges++] = hdmi->bridge; + /* + * After initialising HDMI bridge, we need to check + * whether the early display is enabled for HDMI. + * If yes, we need to increase refcount of hdmi power + * clocks. This can skip the clock disabling operation in + * clock_late_init when finding clk.count == 1. + */ + sde_kms = to_sde_kms(priv->kms); + if (sde_kms->splash_info.handoff) + sde_hdmi_bridge_power_on(hdmi->bridge); + mutex_unlock(&display->display_lock); return 0; diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h index 869d1bebf9db..054e8d7bc5d8 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h @@ -242,6 +242,14 @@ int sde_hdmi_get_info(struct msm_display_info *info, void *display); /** + * sde_hdmi_bridge_power_on -- A wrapper of _sde_hdmi_bridge_power_on. + * @bridge: Handle to the drm bridge. + * + * Return: void. + */ +void sde_hdmi_bridge_power_on(struct drm_bridge *bridge); + +/** * sde_hdmi_bridge_init() - init sde hdmi bridge * @hdmi: Handle to the hdmi. * diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c index 681dca501f9b..17c1ea3271fe 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c @@ -375,6 +375,11 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge, } } +void sde_hdmi_bridge_power_on(struct drm_bridge *bridge) +{ + _sde_hdmi_bridge_power_on(bridge); +} + static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = { .pre_enable = _sde_hdmi_bridge_pre_enable, .enable = _sde_hdmi_bridge_enable, diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 501f12bef00d..c8703136ecce 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -45,6 +45,8 @@ struct msm_mmu_funcs { void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt, struct dma_buf *dma_buf, int dir); void (*destroy)(struct msm_mmu *mmu); + int (*set_property)(struct msm_mmu *mmu, + enum iommu_attr attr, void *data); }; struct msm_mmu { diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c index 172aba387982..ccb43dd94f53 100644 --- a/drivers/gpu/drm/msm/msm_smmu.c +++ b/drivers/gpu/drm/msm/msm_smmu.c @@ -255,6 +255,28 @@ static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt, msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf); } +/* user can call this API to set the attribute of smmu*/ +int msm_smmu_set_property(struct msm_mmu *mmu, enum iommu_attr attr, void *data) +{ + struct msm_smmu *smmu = to_msm_smmu(mmu); + struct msm_smmu_client *client = msm_smmu_to_client(smmu); + struct iommu_domain *domain; + int ret = 0; + + if (!client) + return -EINVAL; + + domain = client->mmu_mapping->domain; + if (!domain) + return -EINVAL; + + ret = iommu_domain_set_attr(domain, attr, data); + if (ret) + DRM_ERROR("set domain attribute failed\n"); + + return ret; +} + static const struct msm_mmu_funcs funcs = { .attach = msm_smmu_attach, .detach = msm_smmu_detach, @@ -265,6 +287,7 @@ static const struct msm_mmu_funcs funcs = { .map_dma_buf = msm_smmu_map_dma_buf, .unmap_dma_buf = msm_smmu_unmap_dma_buf, .destroy = msm_smmu_destroy, + .set_property = msm_smmu_set_property, }; static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = { diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 195eadc2e5fd..503d07a03a42 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -359,6 +359,9 @@ static void sde_kms_prepare_commit(struct msm_kms *kms, struct drm_device *dev = sde_kms->dev; struct msm_drm_private *priv = dev->dev_private; + if (sde_kms->splash_info.handoff) + sde_splash_clean_up(kms); + sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true); } @@ -974,6 +977,8 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms, if (sde_kms->mmio) msm_iounmap(pdev, sde_kms->mmio); sde_kms->mmio = NULL; + + sde_splash_destroy(&sde_kms->splash_info); } static void sde_kms_destroy(struct msm_kms *kms) @@ -1072,6 +1077,24 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) continue; } + /* Attaching smmu means IOMMU HW starts to work immediately. + * However, display HW in LK is still accessing memory + * while the memory map is not done yet. + * So first set DOMAIN_ATTR_EARLY_MAP attribute 1 to bypass + * stage 1 translation in IOMMU HW. + */ + if ((i == MSM_SMMU_DOMAIN_UNSECURE) && + sde_kms->splash_info.handoff) { + ret = mmu->funcs->set_property(mmu, + DOMAIN_ATTR_EARLY_MAP, + &sde_kms->splash_info.handoff); + if (ret) { + SDE_ERROR("failed to set map att: %d\n", ret); + mmu->funcs->destroy(mmu); + goto fail; + } + } + aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev, mmu, "sde"); if (IS_ERR(aspace)) { @@ -1090,6 +1113,19 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) goto fail; } + /* + * It's safe now to map the physical memory blcok LK accesses. + */ + if ((i == MSM_SMMU_DOMAIN_UNSECURE) && + sde_kms->splash_info.handoff) { + ret = sde_splash_smmu_map(sde_kms->dev, mmu, + &sde_kms->splash_info); + if (ret) { + SDE_ERROR("map rsv mem failed: %d\n", ret); + msm_gem_address_space_put(aspace); + goto fail; + } + } } return 0; @@ -1104,6 +1140,7 @@ static int sde_kms_hw_init(struct msm_kms *kms) struct sde_kms *sde_kms; struct drm_device *dev; struct msm_drm_private *priv; + struct sde_splash_info *sinfo; int i, rc = -EINVAL; if (!kms) { @@ -1193,6 +1230,38 @@ static int sde_kms_hw_init(struct msm_kms *kms) goto power_error; } + rc = sde_splash_parse_dt(dev); + if (rc) { + SDE_ERROR("parse dt for splash info failed: %d\n", rc); + goto power_error; + } + + /* + * Read the DISP_INTF_SEL register to check + * whether early display is enabled in LK. + */ + rc = sde_splash_get_handoff_status(kms); + if (rc) { + SDE_ERROR("get early splash status failed: %d\n", rc); + goto power_error; + } + + /* + * when LK has enabled early display, the buffer LK used should be + * reserved first, not to be accessed again by other allocators. + */ + sinfo = &sde_kms->splash_info; + if (sinfo->handoff) { + for (i = 0; i < sinfo->splash_mem_num; i++) { + if (sde_splash_reserve_memory( + sinfo->splash_mem_paddr[i], + sinfo->splash_mem_size[i])) { + dev_err(dev->dev, "memblock reserve failed\n"); + goto power_error; + } + } + } + for (i = 0; i < sde_kms->catalog->vbif_count; i++) { u32 vbif_idx = sde_kms->catalog->vbif[i].id; @@ -1267,7 +1336,10 @@ static int sde_kms_hw_init(struct msm_kms *kms) */ dev->mode_config.allow_fb_modifiers = true; - sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false); + if (!sde_kms->splash_info.handoff) + sde_power_resource_enable(&priv->phandle, + sde_kms->core_client, false); + return 0; drm_obj_init_err: diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index 44f6be959ac9..d929e48a3fe8 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -34,6 +34,7 @@ #include "sde_power_handle.h" #include "sde_irq.h" #include "sde_core_perf.h" +#include "sde_splash.h" #define DRMID(x) ((x) ? (x)->base.id : -1) @@ -157,6 +158,9 @@ struct sde_kms { bool has_danger_ctrl; void **hdmi_displays; int hdmi_display_count; + + /* splash handoff structure */ + struct sde_splash_info splash_info; }; struct vsync_info { diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c new file mode 100644 index 000000000000..a48225e2a7cf --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_splash.c @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/of_address.h> +#include <linux/debugfs.h> +#include <linux/memblock.h> + +#include "msm_drv.h" +#include "msm_mmu.h" +#include "sde_kms.h" +#include "sde_hw_mdss.h" +#include "sde_hw_util.h" +#include "sde_hw_intf.h" +#include "sde_hw_catalog.h" + +#define MDP_SSPP_TOP0_OFF 0x1000 +#define DISP_INTF_SEL 0x004 +#define SPLIT_DISPLAY_EN 0x2F4 + +/* scratch registers */ +#define SCRATCH_REGISTER_0 0x014 +#define SCRATCH_REGISTER_1 0x018 +#define SCRATCH_REGISTER_2 0x01C + +#define SDE_LK_RUNNING_VALUE 0xC001CAFE +#define SDE_LK_SHUT_DOWN_VALUE 0xDEADDEAD +#define SDE_LK_EXIT_VALUE 0xDEADBEEF + +/* + * In order to free reseved memory from bootup, and we are not + * able to call the __init free functions, so we need to free + * this memory by ourselves using the free_reserved_page() function. + */ +static int sde_splash_release_bootup_memory(phys_addr_t phys, size_t size) +{ + unsigned long pfn_start, pfn_end, pfn_idx; + + pfn_start = phys >> PAGE_SHIFT; + pfn_end = (phys + size) >> PAGE_SHIFT; + + for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++) + free_reserved_page(pfn_to_page(pfn_idx)); + + return 0; +} + +int sde_splash_reserve_memory(phys_addr_t phys, size_t size) +{ + return memblock_reserve(phys, size); +} + +int sde_splash_parse_dt(struct drm_device *dev) +{ + struct msm_drm_private *priv = dev->dev_private; + unsigned long size = 0; + dma_addr_t start; + struct device_node *node; + struct sde_kms *sde_kms = to_sde_kms(priv->kms); + struct sde_splash_info *sinfo; + int ret = 0, i = 0, len = 0; + + sinfo = &sde_kms->splash_info; + if (!sinfo) + return -EINVAL; + + if (of_get_property(dev->dev->of_node, "contiguous-region", &len)) + sinfo->splash_mem_num = len/sizeof(u32); + else + sinfo->splash_mem_num = 0; + + sinfo->splash_mem_paddr = + kmalloc(sizeof(phys_addr_t) * sinfo->splash_mem_num, + GFP_KERNEL); + + sinfo->splash_mem_size = + kmalloc(sizeof(size_t) * sinfo->splash_mem_num, + GFP_KERNEL); + if (!sinfo->splash_mem_paddr || !sinfo->splash_mem_size) + return -ENOMEM; + + + sinfo->obj = kmalloc(sizeof(struct drm_gem_object *) * + sinfo->splash_mem_num, GFP_KERNEL); + if (!sinfo->obj) + return -ENOMEM; + + for (i = 0; i < sinfo->splash_mem_num; i++) { + node = of_parse_phandle(dev->dev->of_node, + "contiguous-region", i); + + if (node) { + struct resource r; + + ret = of_address_to_resource(node, 0, &r); + if (ret) + return ret; + + size = r.end - r.start; + start = (dma_addr_t)r.start; + + sinfo->splash_mem_paddr[i] = start; + sinfo->splash_mem_size[i] = size; + + DRM_INFO("blk: %d, addr:%pK, size:%pK\n", + i, (void *)sinfo->splash_mem_paddr[i], + (void *)sinfo->splash_mem_size[i]); + } + + of_node_put(node); + } + + return ret; +} + +int sde_splash_get_handoff_status(struct msm_kms *kms) +{ + uint32_t intf_sel = 0; + uint32_t split_display = 0; + uint32_t num_of_display_on = 0; + uint32_t i = 0; + struct sde_kms *sde_kms = to_sde_kms(kms); + struct sde_rm *rm; + struct sde_hw_blk_reg_map *c; + struct sde_splash_info *sinfo; + struct sde_mdss_cfg *catalog; + + sinfo = &sde_kms->splash_info; + if (!sinfo) { + SDE_ERROR("%s(%d): invalid splash info\n", + __func__, __LINE__); + return -EINVAL; + } + + rm = &sde_kms->rm; + + if (!rm || !rm->hw_mdp) { + SDE_ERROR("invalid rm.\n"); + return -EINVAL; + } + + c = &rm->hw_mdp->hw; + if (c) { + intf_sel = SDE_REG_READ(c, DISP_INTF_SEL); + split_display = SDE_REG_READ(c, SPLIT_DISPLAY_EN); + } + + catalog = sde_kms->catalog; + + if (intf_sel != 0) { + for (i = 0; i < catalog->intf_count; i++) + if ((intf_sel >> i*8) & 0x000000FF) + num_of_display_on++; + + /* + * For split display enabled - DSI0, DSI1 interfaces are + * considered as single display. So decrement + * 'num_of_display_on' by 1 + */ + if (split_display) + num_of_display_on--; + } + + if (num_of_display_on) { + sinfo->handoff = true; + sinfo->program_scratch_regs = true; + } else { + sinfo->handoff = false; + sinfo->program_scratch_regs = false; + } + + return 0; +} + +static bool sde_splash_lk_check(struct sde_hw_intr *intr) +{ + return (SDE_LK_RUNNING_VALUE == SDE_REG_READ(&intr->hw, + SCRATCH_REGISTER_1)) ? true : false; +} + +static int sde_splash_notify_lk_exit(struct sde_hw_intr *intr) +{ + int i = 0; + + /* first is to write exit signal to scratch register*/ + SDE_REG_WRITE(&intr->hw, SCRATCH_REGISTER_1, SDE_LK_SHUT_DOWN_VALUE); + + while ((SDE_LK_EXIT_VALUE != + SDE_REG_READ(&intr->hw, SCRATCH_REGISTER_1)) && (i++ < 20)) { + DRM_INFO("wait for LK's exit"); + msleep(20); + } + + return 0; +} + +static int sde_splash_gem_new(struct drm_device *dev, + struct sde_splash_info *sinfo) +{ + int i, ret; + + for (i = 0; i < sinfo->splash_mem_num; i++) { + mutex_lock(&dev->struct_mutex); + sinfo->obj[i] = msm_gem_new(dev, + sinfo->splash_mem_size[i], MSM_BO_UNCACHED); + mutex_unlock(&dev->struct_mutex); + + if (IS_ERR(sinfo->obj[i])) { + ret = PTR_ERR(sinfo->obj[i]); + SDE_ERROR("failed to allocate gem, ret=%d\n", ret); + goto error; + } + } + + return 0; + +error: + for (i = 0; i < sinfo->splash_mem_num; i++) { + if (sinfo->obj[i]) + msm_gem_free_object(sinfo->obj[i]); + sinfo->obj[i] = NULL; + } + + return ret; +} + +static int sde_splash_get_pages(struct drm_gem_object *obj, phys_addr_t phys) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct page **p; + dma_addr_t paddr; + int npages = obj->size >> PAGE_SHIFT; + int i; + + p = drm_malloc_ab(npages, sizeof(struct page *)); + if (!p) + return -ENOMEM; + + paddr = phys; + + for (i = 0; i < npages; i++) { + p[i] = phys_to_page(paddr); + paddr += PAGE_SIZE; + } + + msm_obj->sgt = drm_prime_pages_to_sg(p, npages); + if (IS_ERR(msm_obj->sgt)) { + SDE_ERROR("failed to allocate sgt\n"); + return -ENOMEM; + } + + msm_obj->pages = p; + + return 0; +} + +int sde_splash_destroy(struct sde_splash_info *sinfo) +{ + int i = 0; + struct msm_gem_object *msm_obj; + + kfree(sinfo->splash_mem_paddr); + sinfo->splash_mem_paddr = NULL; + + kfree(sinfo->splash_mem_size); + sinfo->splash_mem_size = NULL; + + for (i = 0; i < sinfo->splash_mem_num; i++) { + msm_obj = to_msm_bo(sinfo->obj[i]); + if (msm_obj->pages) { + sg_free_table(msm_obj->sgt); + kfree(msm_obj->sgt); + drm_free_large(msm_obj->pages); + msm_obj->pages = NULL; + } + } + + return 0; +} + +int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu, + struct sde_splash_info *sinfo) +{ + struct msm_gem_object *msm_obj; + int i = 0, ret = 0; + + if (!mmu || !sinfo) + return -EINVAL; + + /* first is to construct drm_gem_objects for splash memory */ + if (sde_splash_gem_new(dev, sinfo)) + return -ENOMEM; + + /* second is to contruct sgt table for calling smmu map */ + for (i = 0; i < sinfo->splash_mem_num; i++) { + if (sde_splash_get_pages(sinfo->obj[i], + sinfo->splash_mem_paddr[i])) + return -ENOMEM; + } + + for (i = 0; i < sinfo->splash_mem_num; i++) { + msm_obj = to_msm_bo(sinfo->obj[i]); + + if (mmu->funcs && mmu->funcs->map) { + ret = mmu->funcs->map(mmu, sinfo->splash_mem_paddr[i], + msm_obj->sgt, IOMMU_READ | IOMMU_NOEXEC); + + if (ret) { + SDE_ERROR("Map blk %d @%pK failed.\n", + i, (void *)sinfo->splash_mem_paddr[i]); + return ret; + } + } + } + + return ret; +} + +int sde_splash_smmu_unmap(struct msm_mmu *mmu, struct sde_splash_info *sinfo) +{ + struct msm_gem_object *msm_obj; + int i = 0, ret = 0; + + for (i = 0; i < sinfo->splash_mem_num; i++) { + msm_obj = to_msm_bo(sinfo->obj[i]); + + if (mmu->funcs && mmu->funcs->unmap) { + ret = mmu->funcs->unmap(mmu, + sinfo->splash_mem_paddr[i], msm_obj->sgt); + + /* We need to try the best efforts to unmap + * the memory, so even if unmap fails, we need + * to continue the reset unmap loop. + */ + if (ret) + SDE_ERROR("Unmap blk %d @%pK failed.\n", + i, (void *)sinfo->splash_mem_paddr[i]); + } + } + + return ret; +} + +/* + * In below cleanup, the steps are: + * 1. Notify LK to exit and wait for exiting is done. + * 2. Ummap the memory. + * 3. Set DOMAIN_ATTR_EARLY_MAP to 1 to enable stage 1 translation in iommu. + * 4. Free the reserved memory used by LK. + */ +int sde_splash_clean_up(struct msm_kms *kms) +{ + struct sde_splash_info *sinfo; + struct msm_mmu *mmu; + struct sde_kms *sde_kms = to_sde_kms(kms); + int ret; + int i = 0; + + sinfo = &sde_kms->splash_info; + + if (!sinfo) { + SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__); + return -EINVAL; + } + + /* Monitor LK's status and tell it to exit. */ + if (sinfo->program_scratch_regs) { + if (sde_splash_lk_check(sde_kms->hw_intr)) + sde_splash_notify_lk_exit(sde_kms->hw_intr); + + sinfo->handoff = false; + sinfo->program_scratch_regs = false; + } + + if (!sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) { + /* We do not return fault value here, to ensure + * memory can be freed to system later. + */ + SDE_ERROR("invalid mmu\n"); + WARN_ON(1); + } else { + mmu = sde_kms->aspace[0]->mmu; + + /* After LK has exited, set early domain map attribute + * to 1 to enable stage 1 translation in iommu driver. + */ + if (mmu->funcs && mmu->funcs->set_property) { + ret = mmu->funcs->set_property(mmu, + DOMAIN_ATTR_EARLY_MAP, &sinfo->handoff); + + if (ret) + SDE_ERROR("set_property failed\n"); + } + } + + /* release reserved memory to syetem for other allocations */ + for (i = 0; i < sinfo->splash_mem_num; i++) { + memblock_free(sinfo->splash_mem_paddr[i], + sinfo->splash_mem_size[i]); + + sde_splash_release_bootup_memory(sinfo->splash_mem_paddr[i], + sinfo->splash_mem_size[i]); + } + + /* free splash obejcts */ + sde_splash_destroy(sinfo); + + return 0; +} diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h new file mode 100644 index 000000000000..2603a4bba49a --- /dev/null +++ b/drivers/gpu/drm/msm/sde/sde_splash.h @@ -0,0 +1,89 @@ +/** + * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef SDE_SPLASH_H_ +#define SDE_SPLASH_H_ + +#include "msm_kms.h" +#include "msm_mmu.h" + +struct sde_splash_info { + bool handoff; + bool program_scratch_regs; + uint32_t splash_mem_num; + phys_addr_t *splash_mem_paddr; + size_t *splash_mem_size; + struct drm_gem_object **obj; +}; + +/* APIs for early splash handoff functions */ + +/** + * sde_splash_get_handoff_status. + * + * This function will read DISP_INTF_SEL regsiter to get + * the status of early splash. + */ +int sde_splash_get_handoff_status(struct msm_kms *kms); + +/** + * sde_splash_clean_up. + * + * Tell LK to exit, and clean up the resource. + */ +int sde_splash_clean_up(struct msm_kms *kms); + +/** + * sde_splash_reserve_memory. + * + * To reserve the memory block LK accesses. + */ +int sde_splash_reserve_memory(phys_addr_t phys, size_t size); + +/** + * sde_splash_notify_lk_to_exit. + * + * Function to monitor LK's status and tell it to exit. + */ +int sde_splash_notify_lk_to_exit(struct msm_kms *kms); + +/** + * sde_splash_parse_dt. + * + * Parse reserved memory block from DT for early splash. + */ +int sde_splash_parse_dt(struct drm_device *dev); + +/** + * sde_splash_smmu_map. + * + * Map the physical memory LK visited into iommu driver. + */ +int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu, + struct sde_splash_info *sinfo); + +/** + * sde_splash_smmu_unmap. + * + * Unmap the memory. + */ +int sde_splash_smmu_unmap(struct msm_mmu *mmu, + struct sde_splash_info *sinfo); + +/** + * sde_splash_destroy. + * + * Destroy the splash objects. + */ +int sde_splash_destroy(struct sde_splash_info *sinfo); + +#endif |
