summaryrefslogtreecommitdiff
path: root/drivers/gpu/msm/kgsl_iommu.c
diff options
context:
space:
mode:
authorHarshdeep Dhatt <hdhatt@codeaurora.org>2016-05-11 09:47:08 -0600
committerCarter Cooper <ccooper@codeaurora.org>2016-08-03 10:34:03 -0600
commit3dd8da5917d63f88e4b0ba6f2ab1e56b0177f66f (patch)
treeced098c06b894523c1e595a2755cfa981f0fcd64 /drivers/gpu/msm/kgsl_iommu.c
parent8e3020fadb2a19eac5557b91f95353e0ab87356b (diff)
msm: kgsl: Add array of page pointers to memdesc
This is done to improve the kgsl vmfault routine. Currently, it traverses the sglist to find the faulted page, which takes linear time. By having an array of all the page pointers, this operation will be completed in constant time. Also, allocate sgt only for mapping this memory to the GPU. Since this optimization is not needed for secure/global or imported memory, we will not keep this array but keep the sgt instead. CRs-Fixed: 1006012 Change-Id: I221fce9082da0bdd59842455221b896a33a6ce42 Signed-off-by: Harshdeep Dhatt <hdhatt@codeaurora.org>
Diffstat (limited to 'drivers/gpu/msm/kgsl_iommu.c')
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c50
1 files changed, 44 insertions, 6 deletions
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 865cd9d8f498..b467ef81d257 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1627,16 +1627,34 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
uint64_t addr = memdesc->gpuaddr;
uint64_t size = memdesc->size;
unsigned int flags = _get_protection_flags(memdesc);
+ struct sg_table *sgt = NULL;
- ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, memdesc->sgt->sgl,
- memdesc->sgt->nents, flags);
+ /*
+ * For paged memory allocated through kgsl, memdesc->pages is not NULL.
+ * Allocate sgt here just for its map operation. Contiguous memory
+ * already has its sgt, so no need to allocate it here.
+ */
+ if (memdesc->pages != NULL)
+ sgt = kgsl_alloc_sgt_from_pages(memdesc);
+ else
+ sgt = memdesc->sgt;
+
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt->sgl,
+ sgt->nents, flags);
if (ret)
- return ret;
+ goto done;
ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
if (ret)
_iommu_unmap_sync_pc(pt, memdesc, addr, size);
+done:
+ if (memdesc->pages != NULL)
+ kgsl_free_sgt(sgt);
+
return ret;
}
@@ -1647,6 +1665,8 @@ static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
{
int pg_sz;
unsigned int protflags = _get_protection_flags(memdesc);
+ int ret;
+ struct sg_table *sgt = NULL;
pg_sz = (1 << kgsl_memdesc_get_align(memdesc));
if (!IS_ALIGNED(virtaddr | virtoffset | physoffset | size, pg_sz))
@@ -1655,9 +1675,27 @@ static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
if (size == 0)
return -EINVAL;
- return _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
- memdesc, memdesc->sgt->sgl, memdesc->sgt->nents,
- physoffset, size, protflags);
+ /*
+ * For paged memory allocated through kgsl, memdesc->pages is not NULL.
+ * Allocate sgt here just for its map operation. Contiguous memory
+ * already has its sgt, so no need to allocate it here.
+ */
+ if (memdesc->pages != NULL)
+ sgt = kgsl_alloc_sgt_from_pages(memdesc);
+ else
+ sgt = memdesc->sgt;
+
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
+ memdesc, sgt->sgl, sgt->nents,
+ physoffset, size, protflags);
+
+ if (memdesc->pages != NULL)
+ kgsl_free_sgt(sgt);
+
+ return ret;
}
/* This function must be called with context bank attached */