diff options
| author | Linux Build Service Account <lnxbuild@localhost> | 2017-02-11 01:25:05 -0800 |
|---|---|---|
| committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2017-02-11 01:25:04 -0800 |
| commit | 30acf3e7e9510937d89660a5c7b469e30ceb02a7 (patch) | |
| tree | d7131192634bcc78a6495f730b00d6ec4bf8bff7 /drivers/gpu | |
| parent | d42f45cccebc3eaae18657a6cd0d17713d25372b (diff) | |
| parent | a808f9895c87773833650271eb7c5281fbc0d8ff (diff) | |
Merge "drm/msm: Use dma_sync_sg_for_device() to flush cache for new buffers"
Diffstat (limited to 'drivers/gpu')
| -rw-r--r-- | drivers/gpu/drm/msm/msm_gem.c | 28 |
1 files changed, 15 insertions, 13 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 6fa56abf0c78..351985327214 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -97,12 +97,13 @@ static struct page **get_pages(struct drm_gem_object *obj) msm_obj->pages = p; - /* For non-cached buffers, ensure the new pages are clean - * because display controller, GPU, etc. are not coherent: + /* + * Make sure to flush the CPU cache for newly allocated memory + * so we don't get ourselves into trouble with a dirty cache */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_map_sg(dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); } return msm_obj->pages; @@ -113,12 +114,6 @@ static void put_pages(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); if (msm_obj->pages) { - /* For non-cached buffers, ensure the new pages are clean - * because display controller, GPU, etc. are not coherent: - */ - if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); @@ -307,9 +302,14 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, DRM_ERROR("Unable to map dma buf\n"); return ret; } + } else { + ret = mmu->funcs->map_sg(mmu, msm_obj->sgt, + DMA_BIDIRECTIONAL); } - msm_obj->domain[id].iova = - sg_dma_address(msm_obj->sgt->sgl); + + if (!ret) + msm_obj->domain[id].iova = + sg_dma_address(msm_obj->sgt->sgl); } else { WARN_ONCE(1, "physical address being used\n"); msm_obj->domain[id].iova = physaddr(obj); @@ -535,7 +535,9 @@ void msm_gem_free_object(struct drm_gem_object *obj) mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt, obj->import_attach->dmabuf, DMA_BIDIRECTIONAL); - } + } else + mmu->funcs->unmap_sg(mmu, msm_obj->sgt, + DMA_BIDIRECTIONAL); } } |
