summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/video/fbdev/msm/Makefile1
-rw-r--r--drivers/video/fbdev/msm/mdss.h64
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_cmd.c7
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c17
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c41
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c161
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h16
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c60
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_rotator.c26
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_splash_logo.c59
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_util.c99
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_wb.c16
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c932
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.h251
14 files changed, 1392 insertions, 358 deletions
diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile
index 82d32615d654..dbbe4f7798f5 100644
--- a/drivers/video/fbdev/msm/Makefile
+++ b/drivers/video/fbdev/msm/Makefile
@@ -16,6 +16,7 @@ mdss-mdp-objs += mdss_mdp_overlay.o
mdss-mdp-objs += mdss_mdp_splash_logo.o
mdss-mdp-objs += mdss_mdp_wb.o
mdss-mdp-objs += mdss_mdp_cdm.o
+mdss-mdp-objs += mdss_smmu.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_mdp_debug.o
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index 57cb83450ebd..9f63c0de9cae 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -42,8 +42,10 @@ enum mdss_mdp_clk_type {
};
enum mdss_iommu_domain_type {
- MDSS_IOMMU_DOMAIN_SECURE,
MDSS_IOMMU_DOMAIN_UNSECURE,
+ MDSS_IOMMU_DOMAIN_ROT_UNSECURE,
+ MDSS_IOMMU_DOMAIN_SECURE,
+ MDSS_IOMMU_DOMAIN_ROT_SECURE,
MDSS_IOMMU_MAX_DOMAIN
};
@@ -136,6 +138,46 @@ enum mdss_hw_quirk {
MDSS_QUIRK_MAX,
};
+struct mdss_smmu_client {
+ struct device *dev;
+ struct dma_iommu_mapping *mmu_mapping;
+ struct dss_module_power mp;
+ bool domain_attached;
+};
+
+struct mdss_data_type;
+
+struct mdss_smmu_ops {
+ int (*smmu_attach)(struct mdss_data_type *mdata);
+ int (*smmu_detach)(struct mdss_data_type *mdata);
+ int (*smmu_get_domain_id)(u32 type);
+ struct dma_buf_attachment * (*smmu_dma_buf_attach)(
+ struct dma_buf *dma_buf, struct device *devce,
+ int domain);
+ int (*smmu_map_dma_buf)(struct dma_buf *dma_buf,
+ struct sg_table *table, int domain,
+ dma_addr_t *iova, unsigned long *size, int dir);
+ void (*smmu_unmap_dma_buf)(struct sg_table *table, int domain,
+ int dir);
+ int (*smmu_dma_alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *phys, dma_addr_t *iova, void *cpu_addr,
+ gfp_t gfp, int domain);
+ void (*smmu_dma_free_coherent)(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t phys, dma_addr_t iova,
+ int domain);
+ int (*smmu_map)(int domain, phys_addr_t iova, phys_addr_t phys, int
+ gfp_order, int prot);
+ void (*smmu_unmap)(int domain, unsigned long iova, int gfp_order);
+ char * (*smmu_dsi_alloc_buf)(struct device *dev, int size,
+ dma_addr_t *dmap, gfp_t gfp);
+ int (*smmu_dsi_map_buffer)(phys_addr_t phys, unsigned int domain,
+ unsigned long size, dma_addr_t *dma_addr,
+ void *cpu_addr, int dir);
+ void (*smmu_dsi_unmap_buffer)(dma_addr_t dma_addr, int domain,
+ unsigned long size, int dir);
+ void (*smmu_deinit)(struct mdss_data_type *mdata);
+};
+
struct mdss_data_type {
u32 mdp_rev;
struct clk *mdp_clk[MDSS_MAX_CLK];
@@ -152,6 +194,8 @@ struct mdss_data_type {
struct dss_io_data vbif_nrt_io;
char __iomem *mdp_base;
+ struct mdss_smmu_client mdss_smmu[MDSS_IOMMU_MAX_DOMAIN];
+ struct mdss_smmu_ops smmu_ops;
struct mutex reg_lock;
/* bitmap to track pipes that have BWC enabled */
@@ -335,24 +379,6 @@ struct mdss_util_intf {
struct mdss_util_intf *mdss_get_util_intf(void);
-static inline struct ion_client *mdss_get_ionclient(void)
-{
- if (!mdss_res)
- return NULL;
- return mdss_res->iclient;
-}
-
-static inline int mdss_get_iommu_domain(u32 type)
-{
- if (type >= MDSS_IOMMU_MAX_DOMAIN)
- return -EINVAL;
-
- if (!mdss_res)
- return -ENODEV;
-
- return mdss_res->iommu_map[type].domain_idx;
-}
-
static inline int mdss_get_sd_client_cnt(void)
{
if (!mdss_res)
diff --git a/drivers/video/fbdev/msm/mdss_dsi_cmd.c b/drivers/video/fbdev/msm/mdss_dsi_cmd.c
index f233589d5de6..0e06842ea705 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_cmd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,7 @@
#include "mdss_dsi_cmd.h"
#include "mdss_dsi.h"
+#include "mdss_smmu.h"
/*
* mipi dsi buf mechanism
@@ -72,8 +73,8 @@ char *mdss_dsi_buf_init(struct dsi_buf *dp)
int mdss_dsi_buf_alloc(struct device *ctrl_dev, struct dsi_buf *dp, int size)
{
- dp->start = dma_alloc_writecombine(ctrl_dev, size, &dp->dmap,
- GFP_KERNEL);
+ dp->start = mdss_smmu_dsi_alloc_buf(ctrl_dev, size, &dp->dmap,
+ GFP_KERNEL);
if (dp->start == NULL) {
pr_err("%s:%u\n", __func__, __LINE__);
return -ENOMEM;
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index c70b21695c44..b947cadb1364 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -27,6 +27,7 @@
#include "mdss_dsi.h"
#include "mdss_panel.h"
#include "mdss_debug.h"
+#include "mdss_smmu.h"
#define VSYNC_PERIOD 17
@@ -1519,11 +1520,9 @@ static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
len = ALIGN(tp->len, 4);
ctrl->dma_size = ALIGN(tp->len, SZ_4K);
-
if (ctrl->mdss_util->iommu_attached()) {
- int ret = msm_iommu_map_contig_buffer(tp->dmap,
- ctrl->mdss_util->get_iommu_domain(domain), 0,
- ctrl->dma_size, SZ_4K, 0, &(ctrl->dma_addr));
+ ret = mdss_smmu_dsi_map_buffer(tp->dmap, domain, ctrl->dma_size,
+ &(ctrl->dma_addr), tp->start, DMA_TO_DEVICE);
if (IS_ERR_VALUE(ret)) {
pr_err("unable to map dma memory to iommu(%d)\n", ret);
return -ENOMEM;
@@ -1570,9 +1569,8 @@ static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
if (mctrl && mctrl->dma_addr) {
if (mctrl->dmap_iommu_map) {
- msm_iommu_unmap_contig_buffer(mctrl->dma_addr,
- mctrl->mdss_util->get_iommu_domain(domain),
- 0, mctrl->dma_size);
+ mdss_smmu_dsi_unmap_buffer(mctrl->dma_addr, domain,
+ mctrl->dma_size, DMA_TO_DEVICE);
mctrl->dmap_iommu_map = false;
}
mctrl->dma_addr = 0;
@@ -1580,9 +1578,8 @@ static int mdss_dsi_cmd_dma_tx(struct mdss_dsi_ctrl_pdata *ctrl,
}
if (ctrl->dmap_iommu_map) {
- msm_iommu_unmap_contig_buffer(ctrl->dma_addr,
- ctrl->mdss_util->get_iommu_domain(domain),
- 0, ctrl->dma_size);
+ mdss_smmu_dsi_unmap_buffer(ctrl->dma_addr, domain,
+ ctrl->dma_size, DMA_TO_DEVICE);
ctrl->dmap_iommu_map = false;
}
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index c4551ea831f9..f5108dd56a5c 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -57,6 +57,7 @@
#include "mdss_mdp_splash_logo.h"
#define CREATE_TRACE_POINTS
#include "mdss_debug.h"
+#include "mdss_smmu.h"
#ifdef CONFIG_FB_MSM_TRIPLE_BUFFER
#define MDSS_FB_NUM 3
@@ -1480,10 +1481,11 @@ void mdss_fb_free_fb_ion_memory(struct msm_fb_data_type *mfd)
ion_unmap_kernel(mfd->fb_ion_client, mfd->fb_ion_handle);
if (mfd->mdp.fb_mem_get_iommu_domain) {
- msm_unmap_dma_buf(mfd->fb_table,
- mfd->mdp.fb_mem_get_iommu_domain(), 0);
+ mdss_smmu_unmap_dma_buf(mfd->fb_table,
+ mfd->mdp.fb_mem_get_iommu_domain(),
+ DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment(mfd->fb_attachment, mfd->fb_table,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
dma_buf_detach(mfd->fbmem_buf, mfd->fb_attachment);
dma_buf_put(mfd->fbmem_buf);
}
@@ -1497,6 +1499,7 @@ int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd, size_t fb_size)
unsigned long buf_size;
int rc;
void *vaddr;
+ int domain;
if (!mfd) {
pr_err("Invalid input param - no mfd\n");
@@ -1528,23 +1531,25 @@ int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd, size_t fb_size)
goto fb_mmap_failed;
}
- mfd->fb_attachment = dma_buf_attach(mfd->fbmem_buf,
- &mfd->pdev->dev);
+ domain = mfd->mdp.fb_mem_get_iommu_domain();
+
+ mfd->fb_attachment = mdss_smmu_dma_buf_attach(mfd->fbmem_buf,
+ &mfd->pdev->dev, domain);
if (IS_ERR(mfd->fb_attachment)) {
rc = PTR_ERR(mfd->fb_attachment);
goto err_put;
}
mfd->fb_table = dma_buf_map_attachment(mfd->fb_attachment,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
if (IS_ERR(mfd->fb_table)) {
rc = PTR_ERR(mfd->fb_table);
goto err_detach;
}
- rc = msm_map_dma_buf(mfd->fbmem_buf, mfd->fb_table,
- mfd->mdp.fb_mem_get_iommu_domain(), 0, SZ_4K, 0,
- &mfd->iova, &buf_size, 0, 0);
+ rc = mdss_smmu_map_dma_buf(mfd->fbmem_buf, mfd->fb_table,
+ domain, &mfd->iova, &buf_size,
+ DMA_BIDIRECTIONAL);
if (rc) {
pr_err("Cannot map fb_mem to IOMMU. rc=%d\n", rc);
goto err_unmap;
@@ -1558,10 +1563,7 @@ int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd, size_t fb_size)
if (IS_ERR_OR_NULL(vaddr)) {
pr_err("ION memory mapping failed - %ld\n", PTR_ERR(vaddr));
rc = PTR_ERR(vaddr);
- if (mfd->mdp.fb_mem_get_iommu_domain) {
- goto err_unmap;
- }
- goto fb_mmap_failed;
+ goto err_unmap;
}
pr_debug("alloc 0x%zuB vaddr = %p (%pa iova) for fb%d\n", fb_size,
@@ -1809,23 +1811,20 @@ static int mdss_fb_alloc_fbmem_iommu(struct msm_fb_data_type *mfd, int dom)
mfd->fbi->var.yres_virtual))
pr_warn("reserve size is smaller than framebuffer size\n");
- virt = dma_alloc_coherent(&pdev->dev, size, &phys, GFP_KERNEL);
- if (!virt) {
+ rc = mdss_smmu_dma_alloc_coherent(&pdev->dev, size, &phys, &mfd->iova,
+ &virt, GFP_KERNEL, dom);
+ if (rc) {
pr_err("unable to alloc fbmem size=%zx\n", size);
return -ENOMEM;
}
if (MDSS_LPAE_CHECK(phys)) {
pr_warn("fb mem phys %pa > 4GB is not supported.\n", &phys);
- dma_free_coherent(&pdev->dev, size, &virt, GFP_KERNEL);
+ mdss_smmu_dma_free_coherent(&pdev->dev, size, &virt,
+ phys, mfd->iova, dom);
return -ERANGE;
}
- rc = msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K, 0,
- &mfd->iova);
- if (rc)
- pr_warn("Cannot map fb_mem %pa to IOMMU. rc=%d\n", &phys, rc);
-
pr_debug("alloc 0x%zxB @ (%pa phys) (0x%p virt) (%pa iova) for fb%d\n",
size, &phys, virt, &mfd->iova, mfd->index);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 1c55ef28d705..f9d06189f25b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -54,6 +54,7 @@
#include "mdss_debug.h"
#include "mdss_mdp_debug.h"
#include "mdss_mdp_rotator.h"
+#include "mdss_smmu.h"
#include "mdss_mdp_trace.h"
@@ -65,7 +66,7 @@ struct mdss_data_type *mdss_res;
static int mdss_fb_mem_get_iommu_domain(void)
{
- return mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE);
+ return mdss_smmu_get_domain_id(MDSS_IOMMU_DOMAIN_UNSECURE);
}
struct msm_mdp_interface mdp5 = {
@@ -100,31 +101,6 @@ static struct mdss_panel_intf pan_types[] = {
};
static char mdss_mdp_panel[MDSS_MAX_PANEL_LEN];
-struct mdss_iommu_map_type mdss_iommu_map[MDSS_IOMMU_MAX_DOMAIN] = {
- [MDSS_IOMMU_DOMAIN_UNSECURE] = {
- .client_name = "mdp_ns",
- .ctx_name = "mdp_0",
- .partitions = {
- {
- .start = SZ_128K,
- .size = SZ_1G - SZ_128K,
- },
- },
- .npartitions = 1,
- },
- [MDSS_IOMMU_DOMAIN_SECURE] = {
- .client_name = "mdp_secure",
- .ctx_name = "mdp_1",
- .partitions = {
- {
- .start = SZ_1G,
- .size = SZ_1G,
- },
- },
- .npartitions = 1,
- },
-};
-
struct mdss_hw mdss_mdp_hw = {
.hw_ndx = MDSS_HW_MDP,
.ptr = NULL,
@@ -652,13 +628,6 @@ unsigned long mdss_mdp_get_clk_rate(u32 clk_idx)
return clk_rate;
}
-static inline int is_mdss_iommu_attached(void)
-{
- if (!mdss_res)
- return false;
- return mdss_res->iommu_attached;
-}
-
int mdss_iommu_ctrl(int enable)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -674,13 +643,13 @@ int mdss_iommu_ctrl(int enable)
* finished handoff, as it may still be working with phys addr
*/
if (!mdata->iommu_attached && !mdata->handoff_pending)
- rc = mdss_iommu_attach(mdata);
+ rc = mdss_smmu_attach(mdata);
mdata->iommu_ref_cnt++;
} else {
if (mdata->iommu_ref_cnt) {
mdata->iommu_ref_cnt--;
if (mdata->iommu_ref_cnt == 0)
- rc = mdss_iommu_dettach(mdata);
+ rc = mdss_smmu_detach(mdata);
} else {
pr_err("unbalanced iommu ref\n");
}
@@ -947,119 +916,6 @@ static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
return 0;
}
-int mdss_iommu_attach(struct mdss_data_type *mdata)
-{
- struct iommu_domain *domain;
- struct mdss_iommu_map_type *iomap;
- int i, rc = 0;
-
- MDSS_XLOG(mdata->iommu_attached);
-
- if (mdata->iommu_attached) {
- pr_debug("mdp iommu already attached\n");
- goto end;
- }
-
- for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
- iomap = mdata->iommu_map + i;
-
- domain = msm_get_iommu_domain(iomap->domain_idx);
- if (!domain) {
- WARN(1, "could not attach iommu client %s to ctx %s\n",
- iomap->client_name, iomap->ctx_name);
- continue;
- }
-
- rc = iommu_attach_device(domain, iomap->ctx);
- if (rc) {
- WARN(1, "mdp::iommu device attach failed rc:%d\n", rc);
- for (i--; i >= 0; i--) {
- iomap = mdata->iommu_map + i;
- iommu_detach_device(domain, iomap->ctx);
- }
- goto end;
- }
- }
-
- mdata->iommu_attached = true;
-end:
- return rc;
-}
-
-int mdss_iommu_dettach(struct mdss_data_type *mdata)
-{
- struct iommu_domain *domain;
- struct mdss_iommu_map_type *iomap;
- int i;
-
- MDSS_XLOG(mdata->iommu_attached);
-
- if (!mdata->iommu_attached) {
- pr_debug("mdp iommu already dettached\n");
- return 0;
- }
-
- for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
- iomap = mdata->iommu_map + i;
-
- domain = msm_get_iommu_domain(iomap->domain_idx);
- if (!domain) {
- pr_err("unable to get iommu domain(%d)\n",
- iomap->domain_idx);
- continue;
- }
- iommu_detach_device(domain, iomap->ctx);
- }
-
- mdata->iommu_attached = false;
-
- return 0;
-}
-
-int mdss_iommu_init(struct mdss_data_type *mdata)
-{
- struct msm_iova_layout layout;
- struct iommu_domain *domain;
- struct mdss_iommu_map_type *iomap;
- int i;
-
- if (mdata->iommu_map) {
- pr_warn("iommu already initialized\n");
- return 0;
- }
-
- for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
- iomap = &mdss_iommu_map[i];
-
- layout.client_name = iomap->client_name;
- layout.partitions = iomap->partitions;
- layout.npartitions = iomap->npartitions;
- layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);
-
- iomap->domain_idx = msm_register_domain(&layout);
- if (IS_ERR_VALUE(iomap->domain_idx))
- return -EINVAL;
-
- domain = msm_get_iommu_domain(iomap->domain_idx);
- if (!domain) {
- pr_err("unable to get iommu domain(%d)\n",
- iomap->domain_idx);
- return -EINVAL;
- }
-
- iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
- if (!iomap->ctx) {
- pr_warn("unable to get iommu ctx(%s)\n",
- iomap->ctx_name);
- return -EINVAL;
- }
- }
-
- mdata->iommu_map = mdss_iommu_map;
-
- return 0;
-}
-
static void mdss_debug_enable_clock(int on)
{
if (on)
@@ -1206,8 +1062,6 @@ static u32 mdss_mdp_res_init(struct mdss_data_type *mdata)
mdata->iclient = NULL;
}
- rc = mdss_iommu_init(mdata);
-
return rc;
}
@@ -1480,7 +1334,7 @@ static int mdss_mdp_probe(struct platform_device *pdev)
return -ENODEV;
}
- mdss_res->mdss_util->get_iommu_domain = mdss_get_iommu_domain;
+ mdss_res->mdss_util->get_iommu_domain = mdss_smmu_get_domain_id;
mdss_res->mdss_util->iommu_attached = is_mdss_iommu_attached;
mdss_res->mdss_util->iommu_ctrl = mdss_iommu_ctrl;
mdss_res->mdss_util->bus_scale_set_quota = mdss_bus_scale_set_quota;
@@ -1586,6 +1440,11 @@ static int mdss_mdp_probe(struct platform_device *pdev)
rc = mdss_res->mdss_util->register_irq(&mdss_mdp_hw);
if (rc)
pr_err("mdss_register_irq failed.\n");
+
+ rc = mdss_smmu_init(mdata, &pdev->dev);
+ if (rc)
+ pr_err("mdss smmu init failed\n");
+
mdss_res->mdss_util->mdp_probe_done = true;
/*
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 90d5607d6ece..b619fcf9c23b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -756,6 +756,12 @@ static inline bool mdss_mdp_is_nrt_ctl_path(struct mdss_mdp_ctl *ctl)
(ctl->mixer_left && ctl->mixer_left->rotator_mode);
}
+static inline bool mdss_mdp_is_nrt_vbif_base_defined(
+ struct mdss_data_type *mdata)
+{
+ return mdata->vbif_nrt_io.base ? true : false;
+}
+
static inline bool mdss_mdp_ctl_is_power_off(struct mdss_mdp_ctl *ctl)
{
return mdss_panel_is_power_off(ctl->power_state);
@@ -825,8 +831,6 @@ static inline int mdss_mdp_is_cdm_supported(struct mdss_data_type *mdata,
}
irqreturn_t mdss_mdp_isr(int irq, void *ptr);
-int mdss_iommu_attach(struct mdss_data_type *mdata);
-int mdss_iommu_dettach(struct mdss_data_type *mdata);
void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
u32 intr_type, u32 intf_num);
int mdss_mdp_irq_enable(u32 intr_type, u32 intf_num);
@@ -1021,10 +1025,10 @@ void mdss_mdp_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
struct mdss_mdp_plane_sizes *ps, struct mdss_mdp_format_params *fmt);
struct mdss_mdp_format_params *mdss_mdp_get_format_params(u32 format);
int mdss_mdp_data_get(struct mdss_mdp_data *data, struct msmfb_data *planes,
- int num_planes, u32 flags, struct device *dev);
-int mdss_mdp_data_map(struct mdss_mdp_data *data);
-void mdss_mdp_data_free(struct mdss_mdp_data *data);
-
+ int num_planes, u32 flags, struct device *dev, bool rotator,
+ int dir);
+int mdss_mdp_data_map(struct mdss_mdp_data *data, bool rotator, int dir);
+void mdss_mdp_data_free(struct mdss_mdp_data *data, bool rotator, int dir);
u32 mdss_get_panel_framerate(struct msm_fb_data_type *mfd);
int mdss_mdp_calc_phase_step(u32 src, u32 dst, u32 *out_phase);
void mdss_mdp_intersect_rect(struct mdss_rect *res_rect,
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index c87f9796415d..9dca01bae88b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -35,6 +35,7 @@
#include "mdss_fb.h"
#include "mdss_mdp.h"
#include "mdss_mdp_rotator.h"
+#include "mdss_smmu.h"
#define VSYNC_PERIOD 16
#define BORDERFILL_NDX 0x0BF000BF
@@ -1075,7 +1076,7 @@ void mdss_mdp_overlay_buf_free(struct msm_fb_data_type *mfd,
if (!list_empty(&buf->pipe_list))
list_del_init(&buf->pipe_list);
- mdss_mdp_data_free(buf);
+ mdss_mdp_data_free(buf, false, DMA_TO_DEVICE);
buf->last_freed = local_clock();
buf->state = MDP_BUF_STATE_UNUSED;
@@ -1430,7 +1431,7 @@ static int __overlay_queue_pipes(struct msm_fb_data_type *mfd)
if (buf && (buf->state == MDP_BUF_STATE_READY)) {
buf->state = MDP_BUF_STATE_ACTIVE;
- ret = mdss_mdp_data_map(buf);
+ ret = mdss_mdp_data_map(buf, false, DMA_TO_DEVICE);
} else if (!pipe->params_changed) {
/* nothing to update so continue with next */
continue;
@@ -1927,7 +1928,7 @@ static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
ret = -ENOMEM;
} else {
ret = mdss_mdp_data_get(src_data, &req->data, 1, flags,
- &mfd->pdev->dev);
+ &mfd->pdev->dev, false, DMA_TO_DEVICE);
if (IS_ERR_VALUE(ret)) {
mdss_mdp_overlay_buf_free(mfd, src_data);
pr_err("src_data pmem error\n");
@@ -2789,6 +2790,7 @@ static int mdss_mdp_hw_cursor_pipe_update(struct msm_fb_data_type *mfd,
u32 start_x = img->dx;
u32 start_y = img->dy;
u32 left_lm_w = left_lm_w_from_mfd(mfd);
+ struct platform_device *pdev = mfd->pdev;
ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
if (ret)
@@ -2812,25 +2814,13 @@ static int mdss_mdp_hw_cursor_pipe_update(struct msm_fb_data_type *mfd,
}
if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
- mfd->cursor_buf = dma_alloc_coherent(&mfd->pdev->dev,
- MDSS_MDP_CURSOR_SIZE, (dma_addr_t *)
- &mfd->cursor_buf_phys, GFP_KERNEL);
- if (!mfd->cursor_buf) {
- pr_err("can't allocate cursor buffer\n");
- ret = -ENOMEM;
- goto done;
- }
-
- ret = msm_iommu_map_contig_buffer(mfd->cursor_buf_phys,
- mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE),
- 0, MDSS_MDP_CURSOR_SIZE, SZ_4K, 0,
- &(mfd->cursor_buf_iova));
- if (IS_ERR_VALUE(ret)) {
- dma_free_coherent(&mfd->pdev->dev, MDSS_MDP_CURSOR_SIZE,
- mfd->cursor_buf,
- (dma_addr_t) mfd->cursor_buf_phys);
- pr_err("unable to map cursor buffer to iommu(%d)\n",
- ret);
+ ret = mdss_smmu_dma_alloc_coherent(&pdev->dev,
+ MDSS_MDP_CURSOR_SIZE,
+ (dma_addr_t *) &mfd->cursor_buf_phys,
+ &mfd->cursor_buf_iova, mfd->cursor_buf,
+ GFP_KERNEL, MDSS_IOMMU_DOMAIN_UNSECURE);
+ if (ret) {
+ pr_err("can't allocate cursor buffer rc:%d\n", ret);
goto done;
}
@@ -2975,6 +2965,7 @@ static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
u32 start_x = img->dx;
u32 start_y = img->dy;
u32 left_lm_w = left_lm_w_from_mfd(mfd);
+ struct platform_device *pdev = mfd->pdev;
mixer_left = mdss_mdp_mixer_get(mdp5_data->ctl,
MDSS_MDP_MIXER_MUX_DEFAULT);
@@ -2988,24 +2979,13 @@ static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
}
if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
- mfd->cursor_buf = dma_alloc_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
- (dma_addr_t *) &mfd->cursor_buf_phys,
- GFP_KERNEL);
- if (!mfd->cursor_buf) {
- pr_err("can't allocate cursor buffer\n");
- return -ENOMEM;
- }
-
- ret = msm_iommu_map_contig_buffer(mfd->cursor_buf_phys,
- mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE),
- 0, MDSS_MDP_CURSOR_SIZE, SZ_4K, 0,
- &(mfd->cursor_buf_iova));
- if (IS_ERR_VALUE(ret)) {
- dma_free_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
- mfd->cursor_buf,
- (dma_addr_t) mfd->cursor_buf_phys);
- pr_err("unable to map cursor buffer to iommu(%d)\n",
- ret);
+ ret = mdss_smmu_dma_alloc_coherent(&pdev->dev,
+ MDSS_MDP_CURSOR_SIZE,
+ (dma_addr_t *) &mfd->cursor_buf_phys,
+ &mfd->cursor_buf_iova, mfd->cursor_buf,
+ GFP_KERNEL, MDSS_IOMMU_DOMAIN_UNSECURE);
+ if (ret) {
+ pr_err("can't allocate cursor buffer rc:%d\n", ret);
return ret;
}
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_rotator.c b/drivers/video/fbdev/msm/mdss_mdp_rotator.c
index 99258f212e97..2b7847582043 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_rotator.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_rotator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -946,8 +946,8 @@ int mdss_mdp_rotator_release(struct mdss_mdp_rotator_session *rot)
int rc;
rc = mdss_mdp_rotator_finish(rot);
- mdss_mdp_data_free(&rot->src_buf);
- mdss_mdp_data_free(&rot->dst_buf);
+ mdss_mdp_data_free(&rot->src_buf, true, DMA_TO_DEVICE);
+ mdss_mdp_data_free(&rot->dst_buf, true, DMA_FROM_DEVICE);
mdss_mdp_rotator_session_free(rot);
return rc;
@@ -997,35 +997,35 @@ int mdss_mdp_rotator_play(struct msm_fb_data_type *mfd,
mutex_lock(&rot->lock);
ret = mdss_mdp_data_get(&src_buf, &req->data, 1, flgs,
- &mfd->pdev->dev);
+ &mfd->pdev->dev, true, DMA_TO_DEVICE);
if (ret) {
pr_err("src_data pmem error\n");
goto dst_buf_fail;
}
- ret = mdss_mdp_data_map(&src_buf);
+ ret = mdss_mdp_data_map(&src_buf, true, DMA_TO_DEVICE);
if (ret) {
pr_err("unable to map source buffer\n");
- mdss_mdp_data_free(&src_buf);
+ mdss_mdp_data_free(&src_buf, true, DMA_TO_DEVICE);
goto dst_buf_fail;
}
- mdss_mdp_data_free(&rot->src_buf);
+ mdss_mdp_data_free(&rot->src_buf, true, DMA_TO_DEVICE);
memcpy(&rot->src_buf, &src_buf, sizeof(struct mdss_mdp_data));
- mdss_mdp_data_free(&rot->dst_buf);
+ mdss_mdp_data_free(&rot->dst_buf, true, DMA_FROM_DEVICE);
ret = mdss_mdp_data_get(&rot->dst_buf, &req->dst_data, 1, flgs,
- &mfd->pdev->dev);
+ &mfd->pdev->dev, true, DMA_FROM_DEVICE);
if (ret) {
pr_err("dst_data pmem error\n");
- mdss_mdp_data_free(&rot->src_buf);
+ mdss_mdp_data_free(&rot->src_buf, true, DMA_TO_DEVICE);
goto dst_buf_fail;
}
- ret = mdss_mdp_data_map(&rot->dst_buf);
+ ret = mdss_mdp_data_map(&rot->dst_buf, true, DMA_FROM_DEVICE);
if (ret) {
pr_err("unable to map destination buffer\n");
- mdss_mdp_data_free(&rot->dst_buf);
- mdss_mdp_data_free(&rot->src_buf);
+ mdss_mdp_data_free(&rot->dst_buf, true, DMA_FROM_DEVICE);
+ mdss_mdp_data_free(&rot->src_buf, true, DMA_TO_DEVICE);
goto dst_buf_fail;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
index 6632342c6db4..d91a5c892566 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c
@@ -22,13 +22,12 @@
#include <linux/of_address.h>
#include <linux/fb.h>
#include <linux/dma-buf.h>
-#include <linux/mm.h>
-#include <asm/page.h>
#include "mdss_fb.h"
#include "mdss_mdp.h"
#include "splash.h"
#include "mdss_mdp_splash_logo.h"
+#include "mdss_smmu.h"
#define INVALID_PIPE_INDEX 0xFFFF
#define MAX_FRAME_DONE_COUNT_WAIT 2
@@ -66,27 +65,28 @@ static int mdss_mdp_splash_alloc_memory(struct msm_fb_data_type *mfd,
goto imap_err;
}
- sinfo->attachment = dma_buf_attach(sinfo->dma_buf,
- &mfd->pdev->dev);
+ sinfo->attachment = mdss_smmu_dma_buf_attach(sinfo->dma_buf,
+ &mfd->pdev->dev, MDSS_IOMMU_DOMAIN_UNSECURE);
if (IS_ERR(sinfo->attachment)) {
rc = PTR_ERR(sinfo->attachment);
goto err_put;
}
sinfo->table = dma_buf_map_attachment(sinfo->attachment,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
if (IS_ERR(sinfo->table)) {
rc = PTR_ERR(sinfo->table);
goto err_detach;
}
- rc = msm_map_dma_buf(sinfo->dma_buf, sinfo->table,
- mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE),
- 0, SZ_4K, 0, &sinfo->iova, &buf_size, 0, 0);
+ rc = mdss_smmu_map_dma_buf(sinfo->dma_buf, sinfo->table,
+ MDSS_IOMMU_DOMAIN_UNSECURE, &sinfo->iova,
+ &buf_size, DMA_BIDIRECTIONAL);
if (rc) {
- pr_err("ion memory map failed\n");
+ pr_err("mdss smmu map dma buf failed!\n");
goto err_unmap;
}
+ sinfo->size = buf_size;
dma_buf_begin_cpu_access(sinfo->dma_buf, 0, size, DMA_FROM_DEVICE);
sinfo->splash_buffer = dma_buf_kmap(sinfo->dma_buf, 0);
@@ -96,18 +96,18 @@ static int mdss_mdp_splash_alloc_memory(struct msm_fb_data_type *mfd,
goto kmap_err;
}
- /*
+ /**
* dma_buf has the reference
*/
ion_free(mdata->iclient, handle);
return rc;
kmap_err:
- msm_unmap_dma_buf(sinfo->table,
- mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE), 0);
+ mdss_smmu_unmap_dma_buf(sinfo->table, MDSS_IOMMU_DOMAIN_UNSECURE,
+ DMA_BIDIRECTIONAL);
err_unmap:
dma_buf_unmap_attachment(sinfo->attachment, sinfo->table,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
err_detach:
dma_buf_detach(sinfo->dma_buf, sinfo->attachment);
err_put:
@@ -135,10 +135,9 @@ static void mdss_mdp_splash_free_memory(struct msm_fb_data_type *mfd)
dma_buf_end_cpu_access(sinfo->dma_buf, 0, sinfo->size, DMA_FROM_DEVICE);
dma_buf_kunmap(sinfo->dma_buf, 0, sinfo->splash_buffer);
- msm_unmap_dma_buf(sinfo->table,
- mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE), 0);
+ mdss_smmu_unmap_dma_buf(sinfo->table, MDSS_IOMMU_DOMAIN_UNSECURE, 0);
dma_buf_unmap_attachment(sinfo->attachment, sinfo->table,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
dma_buf_detach(sinfo->dma_buf, sinfo->attachment);
dma_buf_put(sinfo->dma_buf);
@@ -147,7 +146,6 @@ static void mdss_mdp_splash_free_memory(struct msm_fb_data_type *mfd)
static int mdss_mdp_splash_iommu_attach(struct msm_fb_data_type *mfd)
{
- struct iommu_domain *domain;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
int rc, ret;
@@ -167,14 +165,8 @@ static int mdss_mdp_splash_iommu_attach(struct msm_fb_data_type *mfd)
return -EPERM;
}
- domain = msm_get_iommu_domain(mdss_get_iommu_domain(
- MDSS_IOMMU_DOMAIN_UNSECURE));
- if (!domain) {
- pr_debug("mdss iommu domain get failed\n");
- return -EINVAL;
- }
-
- rc = iommu_map(domain, mdp5_data->splash_mem_addr,
+ rc = mdss_smmu_map(MDSS_IOMMU_DOMAIN_UNSECURE,
+ mdp5_data->splash_mem_addr,
mdp5_data->splash_mem_addr,
mdp5_data->splash_mem_size, IOMMU_READ);
if (rc) {
@@ -183,8 +175,9 @@ static int mdss_mdp_splash_iommu_attach(struct msm_fb_data_type *mfd)
ret = mdss_iommu_ctrl(1);
if (IS_ERR_VALUE(ret)) {
pr_err("mdss iommu attach failed\n");
- iommu_unmap(domain, mdp5_data->splash_mem_addr,
- mdp5_data->splash_mem_size);
+ mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
+ mdp5_data->splash_mem_addr,
+ mdp5_data->splash_mem_size);
} else {
mfd->splash_info.iommu_dynamic_attached = true;
}
@@ -195,19 +188,13 @@ static int mdss_mdp_splash_iommu_attach(struct msm_fb_data_type *mfd)
static void mdss_mdp_splash_unmap_splash_mem(struct msm_fb_data_type *mfd)
{
- struct iommu_domain *domain;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
if (mfd->splash_info.iommu_dynamic_attached) {
- domain = msm_get_iommu_domain(mdss_get_iommu_domain(
- MDSS_IOMMU_DOMAIN_UNSECURE));
- if (!domain) {
- pr_err("mdss iommu domain get failed\n");
- return;
- }
- iommu_unmap(domain, mdp5_data->splash_mem_addr,
- mdp5_data->splash_mem_size);
+ mdss_smmu_unmap(MDSS_IOMMU_DOMAIN_UNSECURE,
+ mdp5_data->splash_mem_addr,
+ mdp5_data->splash_mem_size);
mdss_iommu_ctrl(0);
mfd->splash_info.iommu_dynamic_attached = false;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c
index b90c7e2daee8..179308e1d54c 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_util.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,7 @@
#include "mdss_mdp.h"
#include "mdss_mdp_formats.h"
#include "mdss_debug.h"
+#include "mdss_smmu.h"
enum {
MDP_INTR_VSYNC_INTF_0,
@@ -660,9 +661,12 @@ void mdss_mdp_data_calc_offset(struct mdss_mdp_data *data, u16 x, u16 y,
}
}
-static int mdss_mdp_put_img(struct mdss_mdp_img_data *data)
+static int mdss_mdp_put_img(struct mdss_mdp_img_data *data, bool rotator,
+ int dir)
{
struct ion_client *iclient = mdss_get_ionclient();
+ u32 domain;
+
if (data->flags & MDP_MEMORY_ID_TYPE_FB) {
pr_debug("fb mem buf=0x%pa\n", &data->addr);
fdput(data->srcp_f);
@@ -678,25 +682,17 @@ static int mdss_mdp_put_img(struct mdss_mdp_img_data *data)
return -ENOMEM;
} else {
if (data->mapped) {
- int domain;
- if (data->flags & MDP_SECURE_OVERLAY_SESSION)
- domain = MDSS_IOMMU_DOMAIN_SECURE;
- else
- domain = MDSS_IOMMU_DOMAIN_UNSECURE;
-
- msm_unmap_dma_buf(data->srcp_table,
- mdss_get_iommu_domain(domain), 0);
-
+ domain = mdss_smmu_get_domain_type(data->flags,
+ rotator);
+ mdss_smmu_unmap_dma_buf(data->srcp_table,
+ domain, dir);
data->mapped = false;
}
-
dma_buf_unmap_attachment(data->srcp_attachment,
- data->srcp_table, DMA_BIDIRECTIONAL);
-
+ data->srcp_table, dir);
dma_buf_detach(data->srcp_dma_buf,
data->srcp_attachment);
dma_buf_put(data->srcp_dma_buf);
-
data->srcp_dma_buf = NULL;
}
@@ -708,12 +704,14 @@ static int mdss_mdp_put_img(struct mdss_mdp_img_data *data)
}
static int mdss_mdp_get_img(struct msmfb_data *img,
- struct mdss_mdp_img_data *data, struct device *dev)
+ struct mdss_mdp_img_data *data, struct device *dev,
+ bool rotator, int dir)
{
struct fd f;
int ret = -EINVAL;
int fb_num;
unsigned long *len;
+ u32 domain;
dma_addr_t *start;
struct ion_client *iclient = mdss_get_ionclient();
@@ -747,16 +745,18 @@ static int mdss_mdp_get_img(struct msmfb_data *img,
data->srcp_dma_buf = NULL;
return ret;
}
+ domain = mdss_smmu_get_domain_type(data->flags, rotator);
- data->srcp_attachment = dma_buf_attach(data->srcp_dma_buf, dev);
-
+ data->srcp_attachment =
+ mdss_smmu_dma_buf_attach(data->srcp_dma_buf, dev,
+ domain);
if (IS_ERR(data->srcp_attachment)) {
ret = PTR_ERR(data->srcp_attachment);
goto err_put;
}
- data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
- DMA_BIDIRECTIONAL);
+ data->srcp_table =
+ dma_buf_map_attachment(data->srcp_attachment, dir);
if (IS_ERR(data->srcp_table)) {
ret = PTR_ERR(data->srcp_table);
goto err_detach;
@@ -772,7 +772,7 @@ static int mdss_mdp_get_img(struct msmfb_data *img,
if (!*start) {
pr_err("start address is zero!\n");
- mdss_mdp_put_img(data);
+ mdss_mdp_put_img(data, rotator, dir);
return -ENOMEM;
}
@@ -783,7 +783,7 @@ static int mdss_mdp_get_img(struct msmfb_data *img,
pr_debug("mem=%d ihdl=%p buf=0x%pa len=0x%lu\n", img->memory_id,
data->srcp_dma_buf, &data->addr, data->len);
} else {
- mdss_mdp_put_img(data);
+ mdss_mdp_put_img(data, rotator, dir);
return ret ? : -EOVERFLOW;
}
@@ -795,42 +795,37 @@ err_put:
return ret;
}
-static int mdss_mdp_map_buffer(struct mdss_mdp_img_data *data)
+static int mdss_mdp_map_buffer(struct mdss_mdp_img_data *data, bool rotator,
+ int dir)
{
int ret = -EINVAL;
+ int domain;
if (data->addr && data->len)
return 0;
if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
if (mdss_res->mdss_util->iommu_attached()) {
- int domain;
- if (data->flags & MDP_SECURE_OVERLAY_SESSION)
- domain = MDSS_IOMMU_DOMAIN_SECURE;
- else
- domain = MDSS_IOMMU_DOMAIN_UNSECURE;
-
- ret = msm_map_dma_buf(data->srcp_dma_buf,
- data->srcp_table,
- mdss_get_iommu_domain(domain),
- 0, SZ_4K, 0, &data->addr,
- &data->len, 0, 0);
+ domain = mdss_smmu_get_domain_type(data->flags,
+ rotator);
+ ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf,
+ data->srcp_table, domain,
+ &data->addr, &data->len, dir);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("smmu map dma buf failed: (%d)\n", ret);
+ goto err_unmap;
+ }
data->mapped = true;
} else {
data->addr = sg_phys(data->srcp_table->sgl);
data->len = data->srcp_table->sgl->length;
ret = 0;
}
-
- if (IS_ERR_VALUE(ret)) {
- pr_err("failed to map ion handle (%d)\n", ret);
- goto err_unmap;
- }
}
if (!data->addr) {
pr_err("start address is zero!\n");
- mdss_mdp_put_img(data);
+ mdss_mdp_put_img(data, rotator, dir);
return -ENOMEM;
}
@@ -841,22 +836,22 @@ static int mdss_mdp_map_buffer(struct mdss_mdp_img_data *data)
pr_debug("ihdl=%p buf=0x%pa len=0x%lu\n",
data->srcp_dma_buf, &data->addr, data->len);
} else {
- mdss_mdp_put_img(data);
+ mdss_mdp_put_img(data, rotator, dir);
return ret ? : -EOVERFLOW;
}
return ret;
err_unmap:
- dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
- DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table, dir);
dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
dma_buf_put(data->srcp_dma_buf);
return ret;
}
int mdss_mdp_data_get(struct mdss_mdp_data *data, struct msmfb_data *planes,
- int num_planes, u32 flags, struct device *dev)
+ int num_planes, u32 flags, struct device *dev, bool rotator,
+ int dir)
{
int i, rc = 0;
@@ -865,12 +860,13 @@ int mdss_mdp_data_get(struct mdss_mdp_data *data, struct msmfb_data *planes,
for (i = 0; i < num_planes; i++) {
data->p[i].flags = flags;
- rc = mdss_mdp_get_img(&planes[i], &data->p[i], dev);
+ rc = mdss_mdp_get_img(&planes[i], &data->p[i], dev, rotator,
+ dir);
if (rc) {
pr_err("failed to get buf p=%d flags=%x\n", i, flags);
while (i > 0) {
i--;
- mdss_mdp_put_img(&data->p[i]);
+ mdss_mdp_put_img(&data->p[i], rotator, dir);
}
break;
}
@@ -881,7 +877,7 @@ int mdss_mdp_data_get(struct mdss_mdp_data *data, struct msmfb_data *planes,
return rc;
}
-int mdss_mdp_data_map(struct mdss_mdp_data *data)
+int mdss_mdp_data_map(struct mdss_mdp_data *data, bool rotator, int dir)
{
int i, rc = 0;
@@ -889,12 +885,12 @@ int mdss_mdp_data_map(struct mdss_mdp_data *data)
return -EINVAL;
for (i = 0; i < data->num_planes; i++) {
- rc = mdss_mdp_map_buffer(&data->p[i]);
+ rc = mdss_mdp_map_buffer(&data->p[i], rotator, dir);
if (rc) {
pr_err("failed to map buf p=%d\n", i);
while (i > 0) {
i--;
- mdss_mdp_put_img(&data->p[i]);
+ mdss_mdp_put_img(&data->p[i], rotator, dir);
}
break;
}
@@ -903,14 +899,15 @@ int mdss_mdp_data_map(struct mdss_mdp_data *data)
return rc;
}
-void mdss_mdp_data_free(struct mdss_mdp_data *data)
+void mdss_mdp_data_free(struct mdss_mdp_data *data, bool rotator, int dir)
{
int i;
mdss_iommu_ctrl(1);
for (i = 0; i < data->num_planes && data->p[i].len; i++)
- mdss_mdp_put_img(&data->p[i]);
+ mdss_mdp_put_img(&data->p[i], rotator, dir);
mdss_iommu_ctrl(0);
+
data->num_planes = 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_wb.c b/drivers/video/fbdev/msm/mdss_mdp_wb.c
index f58f122e0015..374ca8c200ff 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_wb.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_wb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,7 +27,7 @@
#include "mdss_mdp.h"
#include "mdss_fb.h"
#include "mdss_wb.h"
-
+#include "mdss_smmu.h"
enum mdss_mdp_wb_state {
WB_OPEN,
@@ -105,7 +105,7 @@ struct mdss_mdp_data *mdss_mdp_wb_debug_buffer(struct msm_fb_data_type *mfd)
if (is_mdss_iommu_attached()) {
int domain = MDSS_IOMMU_DOMAIN_UNSECURE;
rc = ion_map_iommu(iclient, ihdl,
- mdss_get_iommu_domain(domain),
+ mdss_smmu_get_domain_id(domain),
0, SZ_4K, 0,
&img->addr,
(unsigned long *) &img->len,
@@ -438,7 +438,7 @@ static struct mdss_mdp_wb_data *get_user_node(struct msm_fb_data_type *mfd,
flags |= MDP_SECURE_OVERLAY_SESSION;
ret = mdss_mdp_data_get(&node->buf_data, data, 1, flags,
- &mfd->pdev->dev);
+ &mfd->pdev->dev, true, DMA_FROM_DEVICE);
if (IS_ERR_VALUE(ret)) {
pr_err("error getting buffer info\n");
goto register_fail;
@@ -450,7 +450,7 @@ static struct mdss_mdp_wb_data *get_user_node(struct msm_fb_data_type *mfd,
goto fail_freebuf;
}
- ret = mdss_mdp_data_map(&node->buf_data);
+ ret = mdss_mdp_data_map(&node->buf_data, true, DMA_FROM_DEVICE);
if (IS_ERR_VALUE(ret)) {
pr_err("error mapping buffer\n");
mdss_iommu_ctrl(0);
@@ -473,7 +473,7 @@ static struct mdss_mdp_wb_data *get_user_node(struct msm_fb_data_type *mfd,
return node;
fail_freebuf:
- mdss_mdp_data_free(&node->buf_data);
+ mdss_mdp_data_free(&node->buf_data, true, DMA_FROM_DEVICE);
register_fail:
kfree(node);
return NULL;
@@ -490,7 +490,7 @@ static void mdss_mdp_wb_free_node(struct mdss_mdp_wb_data *node)
node->buf_info.offset,
&buf->addr);
- mdss_mdp_data_free(&node->buf_data);
+ mdss_mdp_data_free(&node->buf_data, true, DMA_FROM_DEVICE);
node->user_alloc = false;
}
}
@@ -882,7 +882,7 @@ int msm_fb_get_iommu_domain(struct fb_info *info, int domain)
pr_err("Invalid mdp iommu domain (%d)\n", domain);
return -EINVAL;
}
- return mdss_get_iommu_domain(mdss_domain);
+ return mdss_smmu_get_domain_id(mdss_domain);
}
EXPORT_SYMBOL(msm_fb_get_iommu_domain);
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
new file mode 100644
index 000000000000..7817598dea8c
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -0,0 +1,932 @@
+/* Copyright (c) 2007-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/qcom_iommu.h>
+#include <linux/msm_iommu_domains.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/of_platform.h>
+
+#include <asm/dma-iommu.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_smmu.h"
+
+struct mdss_iommu_map_type mdss_iommu_map[MDSS_IOMMU_MAX_DOMAIN] = {
+ [MDSS_IOMMU_DOMAIN_UNSECURE] = {
+ .client_name = "mdp_ns",
+ .ctx_name = "mdp_0",
+ .partitions = {
+ {
+ .start = SZ_128K,
+ .size = SZ_1G - SZ_128K,
+ },
+ },
+ .npartitions = 1,
+ },
+ [MDSS_IOMMU_DOMAIN_SECURE] = {
+ .client_name = "mdp_secure",
+ .ctx_name = "mdp_1",
+ .partitions = {
+ {
+ .start = SZ_1G,
+ .size = SZ_1G,
+ },
+ },
+ .npartitions = 1,
+ },
+};
+
+static int mdss_smmu_util_parse_dt_clock(struct platform_device *pdev,
+ struct dss_module_power *mp)
+{
+ u32 i = 0, rc = 0;
+ const char *clock_name;
+ u32 clock_rate;
+
+ mp->num_clk = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (mp->num_clk <= 0) {
+ pr_err("clocks are not defined\n");
+ goto clk_err;
+ }
+
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct dss_clk) * mp->num_clk, GFP_KERNEL);
+ if (!mp->clk_config) {
+ pr_err("clock configuration allocation failed\n");
+ rc = -ENOMEM;
+ mp->num_clk = 0;
+ goto clk_err;
+ }
+
+ for (i = 0; i < mp->num_clk; i++) {
+ of_property_read_string_index(pdev->dev.of_node, "clock-names",
+ i, &clock_name);
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+ i, &clock_rate);
+ mp->clk_config[i].rate = clock_rate;
+
+ if (!clock_rate)
+ mp->clk_config[i].type = DSS_CLK_AHB;
+ else
+ mp->clk_config[i].type = DSS_CLK_PCLK;
+ }
+
+clk_err:
+ return rc;
+}
+
+static int mdss_smmu_clk_register(struct platform_device *pdev,
+ struct dss_module_power *mp)
+{
+ int i, ret;
+ struct clk *clk;
+
+ ret = mdss_smmu_util_parse_dt_clock(pdev, mp);
+ if (ret) {
+ pr_err("unable to parse clocks\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mp->num_clk; i++) {
+ clk = devm_clk_get(&pdev->dev,
+ mp->clk_config[i].clk_name);
+ if (IS_ERR(clk)) {
+ pr_err("unable to get clk: %s\n",
+ mp->clk_config[i].clk_name);
+ return PTR_ERR(clk);
+ }
+ mp->clk_config[i].clk = clk;
+ }
+ return 0;
+}
+
+/*
+ * mdss_smmu_attach_v1()
+ *
+ * Attaches to the SMMU domain. Attaching should be done everytime before using
+ * the SMMU resources.
+ */
+static int mdss_smmu_attach_v1(struct mdss_data_type *mdata)
+{
+ struct iommu_domain *domain;
+ struct mdss_iommu_map_type *iomap;
+ int i, rc = 0;
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+
+ if (!mdss_smmu_is_valid_domain_type(mdata, i))
+ continue;
+
+ iomap = mdata->iommu_map + i;
+
+ domain = msm_get_iommu_domain(iomap->domain_idx);
+ if (!domain) {
+ WARN(1, "could not attach iommu client %s to ctx %s\n",
+ iomap->client_name, iomap->ctx_name);
+ continue;
+ }
+
+ rc = iommu_attach_device(domain, iomap->ctx);
+ if (rc) {
+ WARN(1, "mdp::iommu device attach failed rc:%d\n", rc);
+ for (i--; i >= 0; i--) {
+ if (!mdss_smmu_is_valid_domain_type(mdata, i))
+ continue;
+ iomap = mdata->iommu_map + i;
+ iommu_detach_device(domain, iomap->ctx);
+ }
+ goto end;
+ }
+ }
+end:
+ return rc;
+}
+
+/*
+ * mdss_smmu_v2_attach()
+ *
+ * Associates each configured VA range with the corresponding smmu context
+ * bank device. Enables the clks as smmu_v2 requires voting it before the usage.
+ * And iommu attach is done only once during the initial attach and it is never
+ * detached as smmu v2 uses a feature called 'retention'.
+ */
+static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
+{
+ struct mdss_smmu_client *mdss_smmu;
+ int i, rc = 0;
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ if (!mdss_smmu_is_valid_domain_type(mdata, i))
+ continue;
+
+ mdss_smmu = mdss_smmu_get_cb(i);
+ if (mdss_smmu->dev) {
+ rc = msm_dss_enable_clk(mdss_smmu->mp.clk_config,
+ mdss_smmu->mp.num_clk, 1);
+ if (rc) {
+ pr_err("clock enable failed - domain:[%i] rc:%d\n",
+ i, rc);
+ goto err;
+ }
+
+ if (!mdss_smmu->domain_attached) {
+ rc = arm_iommu_attach_device(mdss_smmu->dev,
+ mdss_smmu->mmu_mapping);
+ if (rc) {
+ pr_err("iommu attach device failed for domain[%d] with err:%d\n",
+ i, rc);
+ msm_dss_enable_clk(
+ mdss_smmu->mp.clk_config,
+ mdss_smmu->mp.num_clk, 0);
+ goto err;
+ }
+ mdss_smmu->domain_attached = true;
+ pr_debug("iommu v2 domain[%i] attached\n", i);
+ }
+ } else {
+ pr_err("iommu device not attached for domain[%d]\n", i);
+ goto err;
+ }
+ }
+ return 0;
+
+err:
+ for (i--; i >= 0; i--) {
+ mdss_smmu = mdss_smmu_get_cb(i);
+ if (mdss_smmu->dev) {
+ arm_iommu_detach_device(mdss_smmu->dev);
+ msm_dss_enable_clk(mdss_smmu->mp.clk_config,
+ mdss_smmu->mp.num_clk, 0);
+ mdss_smmu->domain_attached = false;
+ }
+ }
+ return rc;
+}
+
+/*
+ * mdss_smmu_detach_v1()
+ *
+ * Detaches from the smmu domain. Should be done immediately after the SMMU
+ * resource usage, in order to save power.
+ */
+static int mdss_smmu_detach_v1(struct mdss_data_type *mdata)
+{
+ struct iommu_domain *domain;
+ struct mdss_iommu_map_type *iomap;
+ int i;
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ if (!mdss_smmu_is_valid_domain_type(mdata, i))
+ continue;
+
+ iomap = mdata->iommu_map + i;
+
+ domain = msm_get_iommu_domain(iomap->domain_idx);
+ if (!domain) {
+ pr_err("unable to get iommu domain(%d)\n",
+ iomap->domain_idx);
+ continue;
+ }
+ iommu_detach_device(domain, iomap->ctx);
+ }
+ return 0;
+}
+
+/*
+ * mdss_smmu_v2_detach()
+ *
+ * Only disables the clks as it is not required to detach the iommu mapped
+ * VA range from the device in smmu_v2 as explained in the mdss_smmu_v2_attach
+ */
+static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
+{
+ struct mdss_smmu_client *mdss_smmu;
+ int i;
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ if (!mdss_smmu_is_valid_domain_type(mdata, i))
+ continue;
+
+ mdss_smmu = mdss_smmu_get_cb(i);
+ if (mdss_smmu->dev) {
+ msm_dss_enable_clk(mdss_smmu->mp.clk_config,
+ mdss_smmu->mp.num_clk, 0);
+ }
+ }
+ return 0;
+}
+
+static int mdss_smmu_get_domain_id_v1(u32 type)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ return mdata->iommu_map[type].domain_idx;
+}
+
+static int mdss_smmu_get_domain_id_v2(u32 type)
+{
+ return type;
+}
+
+/*
+ * mdss_smmu_dma_buf_attach_v1()
+ *
+ * Helps in attaching the dma buffer to the device. This api returns reference
+ * to an attachment structure, which is then used for scatterlist operations.
+ */
+static struct dma_buf_attachment *mdss_smmu_dma_buf_attach_v1(
+ struct dma_buf *dma_buf, struct device *dev, int domain)
+{
+ return dma_buf_attach(dma_buf, dev);
+}
+
+/*
+ * mdss_smmu_dma_buf_attach_v2()
+ *
+ * Same as mdss_smmu_dma_buf_attach except that the device is got from
+ * the configured smmu v2 context banks.
+ */
+static struct dma_buf_attachment *mdss_smmu_dma_buf_attach_v2(
+ struct dma_buf *dma_buf, struct device *dev, int domain)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return NULL;
+ }
+
+ return dma_buf_attach(dma_buf, mdss_smmu->dev);
+}
+
+/*
+ * mdss_smmu_map_dma_buf_v1()
+ *
+ * Maps existing buffer into the SMMU domain and sets the sets the virtual
+ * address in @iova
+ */
+static int mdss_smmu_map_dma_buf_v1(struct dma_buf *dma_buf,
+ struct sg_table *table, int domain, dma_addr_t *iova,
+ unsigned long *size, int dir)
+{
+ return msm_map_dma_buf(dma_buf, table, mdss_smmu_get_domain_id(domain),
+ 0, SZ_4K, 0, iova, size, 0, 0);
+}
+
+/*
+ * mdss_smmu_map_dma_buf_v2()
+ *
+ * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
+ * From which we can take the virtual address and size allocated.
+ * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
+ */
+static int mdss_smmu_map_dma_buf_v2(struct dma_buf *dma_buf,
+ struct sg_table *table, int domain, dma_addr_t *iova,
+ unsigned long *size, int dir)
+{
+ int rc;
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return -EINVAL;
+ }
+
+ rc = dma_map_sg(mdss_smmu->dev, table->sgl, table->nents, dir);
+ if (!rc) {
+ pr_err("dma map sg failed\n");
+ return -ENOMEM;
+ }
+ *iova = table->sgl->dma_address;
+ *size = table->sgl->dma_length;
+ return 0;
+}
+
+static void mdss_smmu_unmap_dma_buf_v1(struct sg_table *table, int domain,
+ int dir)
+{
+ msm_unmap_dma_buf(table, mdss_smmu_get_domain_id(domain), 0);
+}
+
+static void mdss_smmu_unmap_dma_buf_v2(struct sg_table *table, int domain,
+ int dir)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return;
+ }
+
+ dma_unmap_sg(mdss_smmu->dev, table->sgl, table->nents, dir);
+}
+
+/*
+ * mdss_smmu_dma_alloc_coherent_v1()
+ *
+ * This routine allocates a region of @size bytes of consistent memory. It also
+ * returns a dma_handle which can be used as the physical address.
+ * dma_alloc_coherent returns a pointer to the allocated region (@cpu_addr) in
+ * the processor's virtual address space. This subroutine also takes care of the
+ * mapping of the buffer to the SMMU domain which sets the (@iova) the virtual
+ * address
+ */
+static int mdss_smmu_dma_alloc_coherent_v1(struct device *dev, size_t size,
+ dma_addr_t *phys, dma_addr_t *iova, void *cpu_addr,
+ gfp_t gfp, int domain)
+{
+ int ret = 0;
+
+ cpu_addr = dma_alloc_coherent(dev, size, phys, gfp);
+ if (!cpu_addr) {
+ pr_err("dma alloc coherent failed!\n");
+ return -ENOMEM;
+ }
+
+ ret = msm_iommu_map_contig_buffer(*phys,
+ mdss_smmu_get_domain_id(domain), 0,
+ size, SZ_4K, 0, iova);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("map contig buffer failed rc:%d\n", ret);
+ dma_free_coherent(dev, size, cpu_addr, *phys);
+ }
+ return ret;
+}
+
+/*
+ * mdss_smmu_dma_alloc_coherent_v2()
+ *
+ * Allocates buffer same as mdss_smmu_dma_alloc_coherent_v1, but in addition it
+ * also maps to the SMMU domain with the help of the respective SMMU context
+ * bank device
+ */
+static int mdss_smmu_dma_alloc_coherent_v2(struct device *dev, size_t size,
+ dma_addr_t *phys, dma_addr_t *iova, void *cpu_addr,
+ gfp_t gfp, int domain)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return -EINVAL;
+ }
+
+ cpu_addr = dma_alloc_coherent(mdss_smmu->dev, size, iova, gfp);
+ if (!cpu_addr) {
+ pr_err("dma alloc coherent failed!\n");
+ return -ENOMEM;
+ }
+ *phys = iommu_iova_to_phys(mdss_smmu->mmu_mapping->domain,
+ *iova);
+ return 0;
+}
+
+static void mdss_smmu_dma_free_coherent_v1(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
+{
+ msm_iommu_unmap_contig_buffer(phys, mdss_smmu_get_domain_id(domain),
+ 0, size);
+ dma_free_coherent(dev, size, cpu_addr, phys);
+}
+
+static void mdss_smmu_dma_free_coherent_v2(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return;
+ }
+
+ dma_free_coherent(mdss_smmu->dev, size, cpu_addr, iova);
+}
+
+/*
+ * mdss_smmu_map_v1()
+ *
+ * Maps the address to the SMMU domain. Both the virtual address and the
+ * physical one, as well as the size of the mapping should be aligned (atleast)
+ * to the size of the smallest page supported by the hardware.
+ */
+static int mdss_smmu_map_v1(int domain, phys_addr_t iova, phys_addr_t phys,
+ int gfp_order, int prot)
+{
+ struct iommu_domain *iommu_domain = msm_get_iommu_domain(
+ mdss_smmu_get_domain_id(domain));
+ if (!iommu_domain) {
+ pr_err("mdss iommu domain get failed in smmu map\n");
+ return -EINVAL;
+ }
+ return iommu_map(iommu_domain, iova, phys, gfp_order, prot);
+}
+
+/*
+ * mdss_smmu_map_v1()
+ *
+ * Same as mdss_smmu_map_v1, just that it maps to the appropriate domain
+ * referred by the smmu context bank handles.
+ */
+static int mdss_smmu_map_v2(int domain, phys_addr_t iova, phys_addr_t phys,
+ int gfp_order, int prot)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return -EINVAL;
+ }
+
+ return iommu_map(mdss_smmu->mmu_mapping->domain,
+ iova, phys, gfp_order, prot);
+}
+
+static void mdss_smmu_unmap_v1(int domain, unsigned long iova, int gfp_order)
+{
+ struct iommu_domain *iommu_domain = msm_get_iommu_domain(
+ mdss_smmu_get_domain_id(domain));
+ if (!iommu_domain) {
+ pr_err("mdss iommu domain get failed in smmu unmap\n");
+ return;
+ }
+ iommu_unmap(iommu_domain, iova, gfp_order);
+}
+
+static void mdss_smmu_unmap_v2(int domain, unsigned long iova, int gfp_order)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return;
+ }
+
+ iommu_unmap(mdss_smmu->mmu_mapping->domain, iova, gfp_order);
+}
+
+/*
+ * mdss_smmu_dsi_alloc_buf_v1()
+ *
+ * Allocates the buffer and mapping is handled later
+ */
+static char *mdss_smmu_dsi_alloc_buf_v1(struct device *dev, int size,
+ dma_addr_t *dmap, gfp_t gfp)
+{
+ return dma_alloc_writecombine(dev, size, dmap, GFP_KERNEL);
+}
+
+/*
+ * mdss_smmUdsi_alloc_buf_v2()
+ *
+ * Allocates the buffer and mapping is done later
+ */
+static char *mdss_smmu_dsi_alloc_buf_v2(struct device *dev, int size,
+ dma_addr_t *dmap, gfp_t gfp)
+{
+ return kzalloc(size, GFP_KERNEL);
+}
+
+/*
+ * mdss_smmu_dsi_map_buffer_v1()
+ *
+ * Maps the buffer allocated with mdss_smmu_dsi_alloc_buf_v1 to the SMMU domain
+ */
+static int mdss_smmu_dsi_map_buffer_v1(phys_addr_t phys, unsigned int domain,
+ unsigned long size, dma_addr_t *dma_addr, void *cpu_addr,
+ int dir)
+{
+ msm_iommu_map_contig_buffer(phys, mdss_smmu_get_domain_id(domain), 0,
+ size, SZ_4K, 0, dma_addr);
+ if (IS_ERR_VALUE(*dma_addr)) {
+ pr_err("dma map contig buffer failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
+ * mdss_smmu_dsi_map_buffer_v2()
+ *
+ * Maps the buffer allocated in mdss_smmu_dsi_alloc_buffer_v2 with the SMMU
+ * domain and uses dma_map_single as msm_iommu_map_contig_buffer is depricated
+ * in smmu v2.
+ */
+static int mdss_smmu_dsi_map_buffer_v2(phys_addr_t phys, unsigned int domain,
+ unsigned long size, dma_addr_t *dma_addr, void *cpu_addr,
+ int dir)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return -EINVAL;
+ }
+
+ *dma_addr = dma_map_single(mdss_smmu->dev, cpu_addr, size, dir);
+ if (IS_ERR_VALUE(*dma_addr)) {
+ pr_err("dma map single failed\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void mdss_smmu_dsi_unmap_buffer_v1(dma_addr_t dma_addr, int domain,
+ unsigned long size, int dir)
+{
+ if (is_mdss_iommu_attached())
+ msm_iommu_unmap_contig_buffer(dma_addr,
+ mdss_smmu_get_domain_id(domain), 0, size);
+}
+
+static void mdss_smmu_dsi_unmap_buffer_v2(dma_addr_t dma_addr, int domain,
+ unsigned long size, int dir)
+{
+ struct mdss_smmu_client *mdss_smmu = mdss_smmu_get_cb(domain);
+ if (!mdss_smmu) {
+ pr_err("not able to get smmu context\n");
+ return;
+ }
+
+ if (is_mdss_iommu_attached())
+ dma_unmap_single(mdss_smmu->dev, dma_addr, size, dir);
+}
+
+static void mdss_smmu_deinit_v1(struct mdss_data_type *mdata)
+{
+ struct iommu_domain *domain;
+ struct mdss_iommu_map_type *iomap;
+ int i;
+
+ if (!mdata->iommu_map) {
+ pr_warn("iommu not initialized\n");
+ return;
+ }
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ if (!mdss_smmu_is_valid_domain_type(mdata, i))
+ continue;
+
+ iomap = &mdss_iommu_map[i];
+
+ domain = msm_get_iommu_domain(iomap->domain_idx);
+ if (!domain) {
+ pr_err("unable to get iommu domain(%d)\n",
+ iomap->domain_idx);
+ return;
+ }
+ iomap->domain_idx = msm_unregister_domain(domain);
+ }
+ mdata->iommu_map = NULL;
+}
+
+static void mdss_smmu_deinit_v2(struct mdss_data_type *mata)
+{
+ int i;
+ struct mdss_smmu_client *mdss_smmu;
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ mdss_smmu = mdss_smmu_get_cb(i);
+ if (mdss_smmu->dev)
+ arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
+ }
+}
+
+static void mdss_smmu_ops_init(struct mdss_data_type *mdata, int smmu_version)
+{
+ switch (smmu_version) {
+ case MDSS_SMMU_V1:
+ mdata->smmu_ops.smmu_attach = mdss_smmu_attach_v1;
+ mdata->smmu_ops.smmu_detach = mdss_smmu_detach_v1;
+ mdata->smmu_ops.smmu_get_domain_id = mdss_smmu_get_domain_id_v1;
+ mdata->smmu_ops.smmu_dma_buf_attach =
+ mdss_smmu_dma_buf_attach_v1;
+ mdata->smmu_ops.smmu_map_dma_buf = mdss_smmu_map_dma_buf_v1;
+ mdata->smmu_ops.smmu_unmap_dma_buf = mdss_smmu_unmap_dma_buf_v1;
+ mdata->smmu_ops.smmu_dma_alloc_coherent =
+ mdss_smmu_dma_alloc_coherent_v1;
+ mdata->smmu_ops.smmu_dma_free_coherent =
+ mdss_smmu_dma_free_coherent_v1;
+ mdata->smmu_ops.smmu_map = mdss_smmu_map_v1;
+ mdata->smmu_ops.smmu_unmap = mdss_smmu_unmap_v1;
+ mdata->smmu_ops.smmu_dsi_alloc_buf = mdss_smmu_dsi_alloc_buf_v1;
+ mdata->smmu_ops.smmu_dsi_map_buffer =
+ mdss_smmu_dsi_map_buffer_v1;
+ mdata->smmu_ops.smmu_dsi_unmap_buffer =
+ mdss_smmu_dsi_unmap_buffer_v1;
+ mdata->smmu_ops.smmu_deinit = mdss_smmu_deinit_v1;
+ break;
+
+ case MDSS_SMMU_V2:
+ mdata->smmu_ops.smmu_attach = mdss_smmu_attach_v2;
+ mdata->smmu_ops.smmu_detach = mdss_smmu_detach_v2;
+ mdata->smmu_ops.smmu_get_domain_id = mdss_smmu_get_domain_id_v2;
+ mdata->smmu_ops.smmu_dma_buf_attach =
+ mdss_smmu_dma_buf_attach_v2;
+ mdata->smmu_ops.smmu_map_dma_buf = mdss_smmu_map_dma_buf_v2;
+ mdata->smmu_ops.smmu_unmap_dma_buf = mdss_smmu_unmap_dma_buf_v2;
+ mdata->smmu_ops.smmu_dma_alloc_coherent =
+ mdss_smmu_dma_alloc_coherent_v2;
+ mdata->smmu_ops.smmu_dma_free_coherent =
+ mdss_smmu_dma_free_coherent_v2;
+ mdata->smmu_ops.smmu_map = mdss_smmu_map_v2;
+ mdata->smmu_ops.smmu_unmap = mdss_smmu_unmap_v2;
+ mdata->smmu_ops.smmu_dsi_alloc_buf = mdss_smmu_dsi_alloc_buf_v2;
+ mdata->smmu_ops.smmu_dsi_map_buffer =
+ mdss_smmu_dsi_map_buffer_v2;
+ mdata->smmu_ops.smmu_dsi_unmap_buffer =
+ mdss_smmu_dsi_unmap_buffer_v2;
+ mdata->smmu_ops.smmu_deinit = mdss_smmu_deinit_v2;
+ break;
+
+ default:
+ pr_err("smmu ops init failed - invalid smmu version:%d",
+ smmu_version);
+ }
+}
+
+/*
+ * mdss_smmu_find_version()
+ * @dev: mdss_mdp device
+ *
+ * It parses through the child devices of mdss_mdp device which is passed
+ * to this function and finds for smmu v2 related devices. If it exists it is
+ * termed as MDSS_SMMU_V2 else MDSS_SMMU_V1.
+ */
+static int mdss_smmu_find_version(struct device *dev)
+{
+ struct device_node *parent, *child;
+ int version = MDSS_SMMU_V1;
+
+ parent = dev->of_node;
+ for_each_child_of_node(parent, child) {
+ if (is_mdss_smmu_compatible_device(child->name)) {
+ version = MDSS_SMMU_V2;
+ break;
+ }
+ }
+ return version;
+}
+
+/*
+ * mdss_smmu_device_create()
+ * @dev: mdss_mdp device
+ *
+ * For smmu_v2, each context bank is a seperate child device of mdss_mdp.
+ * Platform devices are created for those smmu related child devices of
+ * mdss_mdp here. This would facilitate probes to happen for these devices in
+ * which the smmu mapping and initilization is handled.
+ */
+void mdss_smmu_device_create(struct device *dev)
+{
+ struct device_node *parent, *child;
+ parent = dev->of_node;
+ for_each_child_of_node(parent, child) {
+ if (is_mdss_smmu_compatible_device(child->name))
+ of_platform_device_create(child, NULL, dev);
+ }
+}
+
+int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev)
+{
+ struct msm_iova_layout layout;
+ struct iommu_domain *domain;
+ struct mdss_iommu_map_type *iomap;
+ int i, smmu_version;
+
+ smmu_version = mdss_smmu_find_version(dev);
+
+ if (smmu_version == MDSS_SMMU_V2) {
+ mdss_smmu_device_create(dev);
+ goto end;
+ }
+
+ if (mdata->iommu_map) {
+ pr_warn("iommu already initialized\n");
+ return 0;
+ }
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ if (!mdss_smmu_is_valid_domain_type(mdata, i))
+ continue;
+
+ iomap = &mdss_iommu_map[i];
+
+ layout.client_name = iomap->client_name;
+ layout.partitions = iomap->partitions;
+ layout.npartitions = iomap->npartitions;
+ layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);
+
+ iomap->domain_idx = msm_register_domain(&layout);
+ if (IS_ERR_VALUE(iomap->domain_idx))
+ return -EINVAL;
+
+ domain = msm_get_iommu_domain(iomap->domain_idx);
+ if (!domain) {
+ pr_err("unable to get iommu domain(%d)\n",
+ iomap->domain_idx);
+ return -EINVAL;
+ }
+
+ iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
+ if (!iomap->ctx) {
+ pr_warn("unable to get iommu ctx(%s)\n",
+ iomap->ctx_name);
+ return -EINVAL;
+ }
+ }
+ mdata->iommu_map = mdss_iommu_map;
+
+end:
+ mdss_smmu_ops_init(mdata, smmu_version);
+ return 0;
+}
+
+static int mdss_mdp_unsec = MDSS_IOMMU_DOMAIN_UNSECURE;
+static int mdss_rot_unsec = MDSS_IOMMU_DOMAIN_ROT_UNSECURE;
+static int mdss_mdp_sec = MDSS_IOMMU_DOMAIN_SECURE;
+static int mdss_rot_sec = MDSS_IOMMU_DOMAIN_ROT_SECURE;
+
+static const struct of_device_id mdss_smmu_dt_match[] = {
+ { .compatible = "qcom,smmu_mdp_unsec", .data = &mdss_mdp_unsec},
+ { .compatible = "qcom,smmu_rot_unsec", .data = &mdss_rot_unsec},
+ { .compatible = "qcom,smmu_mdp_sec", .data = &mdss_mdp_sec},
+ { .compatible = "qcom,smmu_rot_sec", .data = &mdss_rot_sec},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mdss_smmu_dt_match);
+
+/*
+ * mdss_smmu_probe()
+ * @pdev: platform device
+ *
+ * Each smmu context acts as a separate device and the context banks are
+ * configured with a VA range.
+ * Registeres the clks as each context bank has its own clks, for which voting
+ * has to be done everytime before using that context bank.
+ */
+int mdss_smmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ struct mdss_smmu_client *mdss_smmu;
+ int order = 0, rc = 0;
+ u32 domain;
+ size_t va_start, va_size;
+ const struct of_device_id *match;
+
+ if (!mdata) {
+ pr_err("probe failed as mdata is not initialized\n");
+ return -EPROBE_DEFER;
+ }
+
+ match = of_match_device(mdss_smmu_dt_match, &pdev->dev);
+ if (!match || !match->data) {
+ pr_err("probe failed as match data is invalid\n");
+ return -EINVAL;
+ }
+
+ domain = *(int *) (match->data);
+ if (domain >= MDSS_IOMMU_MAX_DOMAIN) {
+ pr_err("no matching device found\n");
+ return -EINVAL;
+ }
+
+ mdss_smmu = &mdata->mdss_smmu[domain];
+
+ if (domain == MDSS_IOMMU_DOMAIN_UNSECURE ||
+ domain == MDSS_IOMMU_DOMAIN_ROT_UNSECURE) {
+ va_start = SZ_128K;
+ va_size = SZ_1G - SZ_128K;
+ } else if (domain == MDSS_IOMMU_DOMAIN_SECURE ||
+ domain == MDSS_IOMMU_DOMAIN_ROT_SECURE) {
+ va_start = SZ_1G;
+ va_size = SZ_2G;
+ } else {
+ pr_err("invalid smmu domain type\n");
+ return -EINVAL;
+ }
+
+ mdss_smmu->mmu_mapping = arm_iommu_create_mapping(
+ &platform_bus_type, va_start, va_size, order);
+ if (IS_ERR(mdss_smmu->mmu_mapping)) {
+ pr_err("iommu create mapping failed for domain[%d]\n", domain);
+ return PTR_ERR(mdss_smmu->mmu_mapping);
+ }
+
+ rc = mdss_smmu_clk_register(pdev, &mdss_smmu->mp);
+ if (rc) {
+ pr_err("smmu clk register failed for domain[%d] with err:%d\n",
+ domain, rc);
+ arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
+ return rc;
+ }
+ mdss_smmu->dev = dev;
+ pr_info("iommu v2 domain[%d] mapping and clk register successful!\n",
+ domain);
+ return 0;
+}
+
+int mdss_smmu_remove(struct platform_device *pdev)
+{
+ int i;
+ struct mdss_smmu_client *mdss_smmu;
+
+ for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
+ mdss_smmu = mdss_smmu_get_cb(i);
+ if (mdss_smmu->dev && mdss_smmu->dev == &pdev->dev)
+ arm_iommu_release_mapping(mdss_smmu->mmu_mapping);
+ }
+ return 0;
+}
+
+static struct platform_driver mdss_smmu_driver = {
+ .probe = mdss_smmu_probe,
+ .remove = mdss_smmu_remove,
+ .shutdown = NULL,
+ .driver = {
+ .name = "mdss_smmu",
+ .of_match_table = mdss_smmu_dt_match,
+ },
+};
+
+static int mdss_smmu_register_driver(void)
+{
+ return platform_driver_register(&mdss_smmu_driver);
+}
+
+static int __init mdss_smmu_driver_init(void)
+{
+ int ret;
+ ret = mdss_smmu_register_driver();
+ if (ret)
+ pr_err("mdss_smmu_register_driver() failed!\n");
+
+ return ret;
+}
+module_init(mdss_smmu_driver_init);
+
+static void __exit mdss_smmu_driver_cleanup(void)
+{
+ platform_driver_unregister(&mdss_smmu_driver);
+}
+module_exit(mdss_smmu_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MDSS SMMU driver");
diff --git a/drivers/video/fbdev/msm/mdss_smmu.h b/drivers/video/fbdev/msm/mdss_smmu.h
new file mode 100644
index 000000000000..6f1571f6d8e0
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_smmu.h
@@ -0,0 +1,251 @@
+/* Copyright (c) 2007-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MDSS_SMMU_H
+#define MDSS_SMMU_H
+
+#include <linux/msm_ion.h>
+#include <linux/msm_mdp.h>
+#include <linux/mdss_io_util.h>
+#include <linux/msm_iommu_domains.h>
+
+#include "mdss.h"
+#include "mdss_mdp.h"
+#include "mdss_debug.h"
+
+#define MDSS_SMMU_COMPATIBLE "qcom,smmu"
+
+enum mdss_smmu_version {
+ MDSS_SMMU_V1,
+ MDSS_SMMU_V2
+};
+
+void mdss_smmu_register(struct device *dev);
+int mdss_smmu_init(struct mdss_data_type *mdata, struct device *dev);
+
+static inline bool is_mdss_smmu_compatible_device(const char *str)
+{
+ /* check the prefix */
+ return (!strncmp(str, MDSS_SMMU_COMPATIBLE,
+ strlen(MDSS_SMMU_COMPATIBLE))) ? true : false;
+}
+
+static inline struct mdss_smmu_client *mdss_smmu_get_cb(u32 domain)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ return (domain >= MDSS_IOMMU_MAX_DOMAIN) ? NULL :
+ &mdata->mdss_smmu[domain];
+}
+
+static inline struct ion_client *mdss_get_ionclient(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ return mdata ? mdata->iclient : NULL;
+}
+
+static inline int is_mdss_iommu_attached(void)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ return mdata ? mdata->iommu_attached : false;
+}
+
+/*
+ * mdss_smmu_is_valid_domain_type()
+ *
+ * Used to check if rotator smmu domain is defined or not by checking if
+ * vbif base is defined. As those are associated.
+ */
+static inline bool mdss_smmu_is_valid_domain_type(struct mdss_data_type *mdata,
+ int domain_type)
+{
+ if ((domain_type == MDSS_IOMMU_DOMAIN_ROT_UNSECURE ||
+ domain_type == MDSS_IOMMU_DOMAIN_ROT_SECURE) &&
+ !mdss_mdp_is_nrt_vbif_base_defined(mdata))
+ return false;
+ return true;
+}
+
+static inline int mdss_smmu_get_domain_type(u32 flags, bool rotator)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int type;
+
+ if (flags & MDP_SECURE_OVERLAY_SESSION) {
+ type = (rotator &&
+ mdata->mdss_smmu[MDSS_IOMMU_DOMAIN_ROT_SECURE].dev) ?
+ MDSS_IOMMU_DOMAIN_ROT_SECURE : MDSS_IOMMU_DOMAIN_SECURE;
+ } else {
+ type = (rotator &&
+ mdata->mdss_smmu[MDSS_IOMMU_DOMAIN_ROT_UNSECURE].dev) ?
+ MDSS_IOMMU_DOMAIN_ROT_UNSECURE :
+ MDSS_IOMMU_DOMAIN_UNSECURE;
+ }
+ return type;
+}
+
+static inline int mdss_smmu_attach(struct mdss_data_type *mdata)
+{
+ int rc;
+
+ MDSS_XLOG(mdata->iommu_attached);
+ if (mdata->iommu_attached) {
+ pr_debug("mdp iommu already attached\n");
+ return 0;
+ }
+
+ if (!mdata->smmu_ops.smmu_attach)
+ return -ENOSYS;
+
+ rc = mdata->smmu_ops.smmu_attach(mdata);
+ if (!rc)
+ mdata->iommu_attached = true;
+ return rc;
+}
+
+static inline int mdss_smmu_detach(struct mdss_data_type *mdata)
+{
+ int rc;
+
+ MDSS_XLOG(mdata->iommu_attached);
+
+ if (!mdata->iommu_attached) {
+ pr_debug("mdp iommu already dettached\n");
+ return 0;
+ }
+
+ if (!mdata->smmu_ops.smmu_detach)
+ return -ENOSYS;
+
+ rc = mdata->smmu_ops.smmu_detach(mdata);
+ if (!rc)
+ mdata->iommu_attached = false;
+ return rc;
+}
+
+static inline int mdss_smmu_get_domain_id(u32 type)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (!mdata || !mdata->smmu_ops.smmu_get_domain_id
+ || type >= MDSS_IOMMU_MAX_DOMAIN)
+ return -ENODEV;
+
+ return mdata->smmu_ops.smmu_get_domain_id(type);
+}
+
+static inline struct dma_buf_attachment *mdss_smmu_dma_buf_attach(
+ struct dma_buf *dma_buf, struct device *dev, int domain)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (!mdata->smmu_ops.smmu_dma_buf_attach)
+ return NULL;
+
+ return mdata->smmu_ops.smmu_dma_buf_attach(dma_buf, dev, domain);
+}
+
+static inline int mdss_smmu_map_dma_buf(struct dma_buf *dma_buf,
+ struct sg_table *table, int domain, dma_addr_t *iova,
+ unsigned long *size, int dir)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (!mdata->smmu_ops.smmu_map_dma_buf)
+ return -ENOSYS;
+
+ return mdata->smmu_ops.smmu_map_dma_buf(dma_buf, table,
+ domain, iova, size, dir);
+}
+
+static inline void mdss_smmu_unmap_dma_buf(struct sg_table *table, int domain,
+ int dir)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (mdata->smmu_ops.smmu_unmap_dma_buf)
+ mdata->smmu_ops.smmu_unmap_dma_buf(table, domain, dir);
+}
+
+static inline int mdss_smmu_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *phys, dma_addr_t *iova, void *cpu_addr,
+ gfp_t gfp, int domain)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (!mdata->smmu_ops.smmu_dma_alloc_coherent)
+ return -ENOSYS;
+
+ return mdata->smmu_ops.smmu_dma_alloc_coherent(dev, size,
+ phys, iova, cpu_addr, gfp, domain);
+}
+
+static inline void mdss_smmu_dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t phys, dma_addr_t iova, int domain)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (mdata->smmu_ops.smmu_dma_free_coherent)
+ mdata->smmu_ops.smmu_dma_free_coherent(dev, size, cpu_addr,
+ phys, iova, domain);
+}
+
+static inline int mdss_smmu_map(int domain, phys_addr_t iova, phys_addr_t phys,
+ int gfp_order, int prot)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (!mdata->smmu_ops.smmu_map)
+ return -ENOSYS;
+
+ return mdata->smmu_ops.smmu_map(domain, iova, phys, gfp_order, prot);
+}
+
+static inline void mdss_smmu_unmap(int domain, unsigned long iova,
+ int gfp_order)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (mdata->smmu_ops.smmu_unmap)
+ mdata->smmu_ops.smmu_unmap(domain, iova, gfp_order);
+}
+
+static inline char *mdss_smmu_dsi_alloc_buf(struct device *dev, int size,
+ dma_addr_t *dmap, gfp_t gfp)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (!mdata->smmu_ops.smmu_dsi_alloc_buf)
+ return NULL;
+
+ return mdata->smmu_ops.smmu_dsi_alloc_buf(dev, size, dmap, gfp);
+}
+
+static inline int mdss_smmu_dsi_map_buffer(phys_addr_t phys,
+ unsigned int domain, unsigned long size, dma_addr_t *dma_addr,
+ void *cpu_addr, int dir)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (!mdata->smmu_ops.smmu_dsi_map_buffer)
+ return -ENOSYS;
+
+ return mdata->smmu_ops.smmu_dsi_map_buffer(phys, domain, size,
+ dma_addr, cpu_addr, dir);
+}
+
+static inline void mdss_smmu_dsi_unmap_buffer(dma_addr_t dma_addr, int domain,
+ unsigned long size, int dir)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (mdata->smmu_ops.smmu_dsi_unmap_buffer)
+ mdata->smmu_ops.smmu_dsi_unmap_buffer(dma_addr, domain,
+ size, dir);
+}
+
+static inline void mdss_smmu_deinit(struct mdss_data_type *mdata)
+{
+ if (mdata->smmu_ops.smmu_deinit)
+ mdata->smmu_ops.smmu_deinit(mdata);
+}
+
+#endif /* MDSS_SMMU_H */