summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHareesh Gundu <hareeshg@codeaurora.org>2017-04-21 15:38:13 +0530
committerHareesh Gundu <hareeshg@codeaurora.org>2017-04-26 11:00:09 +0530
commitc1540a6bfe26515c166d6926c0c44db64c78ab33 (patch)
tree54fca35edf168a69940c573e2fd4f04828aa1c5b
parent048c70075fecb8f92d923fc5595efa00f2a00082 (diff)
msm: kgsl: Perform cache operation with kernel address
Kernel should never access untrusted pointers directly. If the address is not mapped to kernel, map to kernel address space and perform cache related operation. Change-Id: I433befcde620e51b8ec17954ddb710f6084e0592 Signed-off-by: Hareesh Gundu <hareeshg@codeaurora.org>
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c133
1 files changed, 104 insertions, 29 deletions
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 6e2a0e3f2645..fa95b4dfe718 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -560,16 +560,70 @@ static inline unsigned int _fixup_cache_range_op(unsigned int op)
}
#endif
-int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
- uint64_t size, unsigned int op)
+static int kgsl_do_cache_op(struct page *page, void *addr,
+ uint64_t offset, uint64_t size, unsigned int op)
{
+ void (*cache_op)(const void *, const void *);
+
/*
- * If the buffer is mapped in the kernel operate on that address
- * otherwise use the user address
+ * The dmac_xxx_range functions handle addresses and sizes that
+ * are not aligned to the cacheline size correctly.
*/
+ switch (_fixup_cache_range_op(op)) {
+ case KGSL_CACHE_OP_FLUSH:
+ cache_op = dmac_flush_range;
+ break;
+ case KGSL_CACHE_OP_CLEAN:
+ cache_op = dmac_clean_range;
+ break;
+ case KGSL_CACHE_OP_INV:
+ cache_op = dmac_inv_range;
+ break;
+ default:
+ return -EINVAL;
+ }
- void *addr = (memdesc->hostptr) ?
- memdesc->hostptr : (void *) memdesc->useraddr;
+ if (page != NULL) {
+ unsigned long pfn = page_to_pfn(page) + offset / PAGE_SIZE;
+ /*
+ * page_address() returns the kernel virtual address of page.
+ * For high memory kernel virtual address exists only if page
+ * has been mapped. So use a version of kmap rather than
+ * page_address() for high memory.
+ */
+ if (PageHighMem(page)) {
+ offset &= ~PAGE_MASK;
+
+ do {
+ unsigned int len = size;
+
+ if (len + offset > PAGE_SIZE)
+ len = PAGE_SIZE - offset;
+
+ page = pfn_to_page(pfn++);
+ addr = kmap_atomic(page);
+ cache_op(addr + offset, addr + offset + len);
+ kunmap_atomic(addr);
+
+ size -= len;
+ offset = 0;
+ } while (size);
+
+ return 0;
+ }
+
+ addr = page_address(page);
+ }
+
+ cache_op(addr + offset, addr + offset + (size_t) size);
+ return 0;
+}
+
+int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
+ uint64_t size, unsigned int op)
+{
+ void *addr = NULL;
+ int ret = 0;
if (size == 0 || size > UINT_MAX)
return -EINVAL;
@@ -578,38 +632,59 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset,
if ((offset + size < offset) || (offset + size < size))
return -ERANGE;
- /* Make sure the offset + size do not overflow the address */
- if (addr + ((size_t) offset + (size_t) size) < addr)
- return -ERANGE;
-
/* Check that offset+length does not exceed memdesc->size */
if (offset + size > memdesc->size)
return -ERANGE;
- /* Return quietly if the buffer isn't mapped on the CPU */
- if (addr == NULL)
- return 0;
+ if (memdesc->hostptr) {
+ addr = memdesc->hostptr;
+ /* Make sure the offset + size do not overflow the address */
+ if (addr + ((size_t) offset + (size_t) size) < addr)
+ return -ERANGE;
- addr = addr + offset;
+ ret = kgsl_do_cache_op(NULL, addr, offset, size, op);
+ return ret;
+ }
/*
- * The dmac_xxx_range functions handle addresses and sizes that
- * are not aligned to the cacheline size correctly.
+ * If the buffer is not to mapped to kernel, perform cache
+ * operations after mapping to kernel.
*/
+ if (memdesc->sgt != NULL) {
+ struct scatterlist *sg;
+ unsigned int i, pos = 0;
- switch (_fixup_cache_range_op(op)) {
- case KGSL_CACHE_OP_FLUSH:
- dmac_flush_range(addr, addr + (size_t) size);
- break;
- case KGSL_CACHE_OP_CLEAN:
- dmac_clean_range(addr, addr + (size_t) size);
- break;
- case KGSL_CACHE_OP_INV:
- dmac_inv_range(addr, addr + (size_t) size);
- break;
- }
+ for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
+ uint64_t sg_offset, sg_left;
- return 0;
+ if (offset >= (pos + sg->length)) {
+ pos += sg->length;
+ continue;
+ }
+ sg_offset = offset > pos ? offset - pos : 0;
+ sg_left = (sg->length - sg_offset > size) ? size :
+ sg->length - sg_offset;
+ ret = kgsl_do_cache_op(sg_page(sg), NULL, sg_offset,
+ sg_left, op);
+ size -= sg_left;
+ if (size == 0)
+ break;
+ pos += sg->length;
+ }
+ } else if (memdesc->pages != NULL) {
+ addr = vmap(memdesc->pages, memdesc->page_count,
+ VM_IOREMAP, pgprot_writecombine(PAGE_KERNEL));
+ if (addr == NULL)
+ return -ENOMEM;
+
+ /* Make sure the offset + size do not overflow the address */
+ if (addr + ((size_t) offset + (size_t) size) < addr)
+ return -ERANGE;
+
+ ret = kgsl_do_cache_op(NULL, addr, offset, size, op);
+ vunmap(addr);
+ }
+ return ret;
}
EXPORT_SYMBOL(kgsl_cache_range_op);