summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gem.c
diff options
context:
space:
mode:
authorJordan Crouse <jcrouse@codeaurora.org>2017-02-13 10:14:11 -0700
committerJordan Crouse <jcrouse@codeaurora.org>2017-02-19 16:07:56 -0700
commitffd2f3eb423812150b36e4c8cbd711f6d220c321 (patch)
treefd4b3869015093b68fd9d143e677ceef1598d95b /drivers/gpu/drm/msm/msm_gem.c
parent1de066f01417351a1c833ae183805005601a9b85 (diff)
drm/msm: Support different SMMU backends for address spaces
SDE and the GPU have different requirements for the SMMU backends - the SDE generates its own iova addresses and needs special support for DMA buffers and the GPU does its own IOMMU operations. Add a shim layer to aspace to break out the address generation and call the appropriate SMMU functions. There is probably consolidation that can be done, but for now this is the best way to deal with the two use cases. Change-Id: Ic0dedbadc6dc03504ef7dffded18ba09fb3ef291 Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 43cef983dabc..5aa08cf4d6d8 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -24,6 +24,11 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
+static void *get_dmabuf_ptr(struct drm_gem_object *obj)
+{
+ return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
+}
+
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -279,8 +284,8 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- msm_gem_unmap_vma(priv->aspace[id],
- &msm_obj->domain[id], msm_obj->sgt);
+ msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id],
+ msm_obj->sgt, get_dmabuf_ptr(obj));
}
}
@@ -305,12 +310,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
return PTR_ERR(pages);
if (iommu_present(&platform_bus_type)) {
- ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
- msm_obj->sgt, obj->size >> PAGE_SHIFT);
- } else {
- WARN_ONCE(1, "physical address being used\n");
+ ret = msm_gem_map_vma(priv->aspace[id],
+ &msm_obj->domain[id], msm_obj->sgt,
+ get_dmabuf_ptr(obj));
+ } else
msm_obj->domain[id].iova = physaddr(obj);
- }
}
if (!ret)
@@ -485,7 +489,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
uint64_t off = drm_vma_node_start(&obj->vma_node);
- unsigned id;
+ int id;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p\t",
@@ -496,6 +500,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
for (id = 0; id < priv->num_aspaces; id++)
seq_printf(m, " %08llx", msm_obj->domain[id].iova);
+
+ seq_puts(m, "\n");
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -532,7 +538,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
if (obj->import_attach) {
if (msm_obj->vaddr)
- dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+ dma_buf_vunmap(obj->import_attach->dmabuf,
+ msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated: