summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarter Cooper <ccooper@codeaurora.org>2015-09-15 14:13:35 -0600
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:13:39 -0700
commit048fc95836a4fd0509a5a378b59b24bffb7c3b6c (patch)
tree4286b5d539331356de7b8d8d14da4ece9ffa2301
parenteaa62a6847a4a26f53cf371a04c3671fcd802bab (diff)
msm: kgsl: Rework page allocation sizes
Alignment checks only need to be done once and can be moved down to the lower layers. Change-Id: Ia4683cf9db08506db810e80854c006d94dc80310 Signed-off-by: Carter Cooper <ccooper@codeaurora.org>
-rw-r--r--drivers/gpu/msm/kgsl.c2
-rw-r--r--drivers/gpu/msm/kgsl.h2
-rw-r--r--drivers/gpu/msm/kgsl_mmu.c4
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c54
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.h44
5 files changed, 31 insertions, 75 deletions
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2f28a6f604ba..c820c9ca7a3a 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2960,7 +2960,7 @@ static struct kgsl_mem_entry *gpumem_alloc_entry(
mmapsize = size;
/* For now only allow allocations up to 4G */
- if (size > UINT_MAX)
+ if (size == 0 || size > UINT_MAX)
return ERR_PTR(-EINVAL);
/* Only allow a mmap size that we can actually mmap */
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 757c07e6da86..b8bc155cd335 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -354,8 +354,6 @@ kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr);
struct kgsl_mem_entry * __must_check
kgsl_sharedmem_find_id(struct kgsl_process_private *process, unsigned int id);
-void kgsl_get_memory_usage(char *str, size_t len, uint64_t memflags);
-
extern const struct dev_pm_ops kgsl_pm_ops;
int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index 5876106d9969..065d34f0aab7 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -535,8 +535,8 @@ int kgsl_mmu_init(struct kgsl_device *device, char *mmutype)
* setstate block. Allocate the memory here and map it later
*/
- status = kgsl_allocate_contiguous(device, &mmu->setstate_memory,
- PAGE_SIZE);
+ status = kgsl_sharedmem_alloc_contig(device, &mmu->setstate_memory,
+ NULL, PAGE_SIZE);
if (status)
return status;
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 9032035fe966..3ee782e96f6c 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -26,6 +26,7 @@
#include "kgsl_cffdump.h"
#include "kgsl_device.h"
#include "kgsl_log.h"
+#include "kgsl_mmu.h"
/*
* The user can set this from debugfs to force failed memory allocations to
@@ -311,6 +312,13 @@ kgsl_sharedmem_init_sysfs(void)
drv_attr_list);
}
+static int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ uint64_t size);
+
+static int kgsl_cma_alloc_secure(struct kgsl_device *device,
+ struct kgsl_memdesc *memdesc, uint64_t size);
+
static int kgsl_allocate_secure(struct kgsl_device *device,
struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
@@ -332,13 +340,11 @@ int kgsl_allocate_user(struct kgsl_device *device,
{
int ret;
- if (size == 0)
- return -EINVAL;
-
memdesc->flags = flags;
if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
- ret = kgsl_cma_alloc_coherent(device, memdesc, pagetable, size);
+ ret = kgsl_sharedmem_alloc_contig(device, memdesc,
+ pagetable, size);
else if (flags & KGSL_MEMFLAGS_SECURE)
ret = kgsl_allocate_secure(device, memdesc, pagetable, size);
else
@@ -672,7 +678,7 @@ static inline int get_page_size(size_t size, unsigned int align)
#endif
static int
-_kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
+kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
uint64_t size)
{
@@ -685,6 +691,10 @@ _kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
unsigned int align;
unsigned int step = ((VMALLOC_END - VMALLOC_START)/8) >> PAGE_SHIFT;
+ size = PAGE_ALIGN(size);
+ if (size == 0 || size > UINT_MAX)
+ return -EINVAL;
+
align = (memdesc->flags & KGSL_MEMALIGN_MASK) >> KGSL_MEMALIGN_SHIFT;
page_size = get_page_size(size, align);
@@ -697,9 +707,6 @@ _kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
if (align < ilog2(page_size))
kgsl_memdesc_set_align(memdesc, ilog2(page_size));
- if (size > SIZE_MAX)
- return -EINVAL;
-
/*
* There needs to be enough room in the page array to be able to
* service the allocation entirely with PAGE_SIZE sized chunks
@@ -874,19 +881,6 @@ done:
return ret;
}
-int
-kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
- struct kgsl_pagetable *pagetable,
- uint64_t size)
-{
- size = PAGE_ALIGN(size);
- if (size == 0)
- return -EINVAL;
-
- return _kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
-}
-EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
-
void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
{
if (memdesc == NULL || memdesc->size == 0)
@@ -1071,14 +1065,13 @@ void kgsl_get_memory_usage(char *name, size_t name_size, uint64_t memflags)
}
EXPORT_SYMBOL(kgsl_get_memory_usage);
-int kgsl_cma_alloc_coherent(struct kgsl_device *device,
+int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, uint64_t size)
{
int result = 0;
- size = ALIGN(size, PAGE_SIZE);
-
+ size = PAGE_ALIGN(size);
if (size == 0 || size > SIZE_MAX)
return -EINVAL;
@@ -1101,6 +1094,9 @@ int kgsl_cma_alloc_coherent(struct kgsl_device *device,
/* Record statistics */
+ if (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE)
+ memdesc->gpuaddr = memdesc->physaddr;
+
KGSL_STATS_ADD(size, &kgsl_driver.stats.coherent,
&kgsl_driver.stats.coherent_max);
@@ -1110,7 +1106,7 @@ err:
return result;
}
-EXPORT_SYMBOL(kgsl_cma_alloc_coherent);
+EXPORT_SYMBOL(kgsl_sharedmem_alloc_contig);
static int scm_lock_chunk(struct kgsl_memdesc *memdesc, int lock)
{
@@ -1166,7 +1162,7 @@ static int scm_lock_chunk(struct kgsl_memdesc *memdesc, int lock)
return result;
}
-int kgsl_cma_alloc_secure(struct kgsl_device *device,
+static int kgsl_cma_alloc_secure(struct kgsl_device *device,
struct kgsl_memdesc *memdesc, uint64_t size)
{
struct kgsl_iommu *iommu = device->mmu.priv;
@@ -1174,14 +1170,11 @@ int kgsl_cma_alloc_secure(struct kgsl_device *device,
struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
size_t aligned;
- if (size == 0)
- return -EINVAL;
-
/* Align size to 1M boundaries */
aligned = ALIGN(size, SZ_1M);
/* The SCM call uses an unsigned int for the size */
- if (aligned > UINT_MAX)
+ if (aligned == 0 || aligned > UINT_MAX)
return -EINVAL;
/*
@@ -1232,7 +1225,6 @@ err:
return result;
}
-EXPORT_SYMBOL(kgsl_cma_alloc_secure);
/**
* kgsl_cma_unlock_secure() - Unlock secure memory by calling TZ
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 9e5651d18df8..b4895500bcff 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -13,15 +13,9 @@
#ifndef __KGSL_SHAREDMEM_H
#define __KGSL_SHAREDMEM_H
-#include <linux/slab.h>
#include <linux/dma-mapping.h>
-#include "kgsl_mmu.h"
-#include <linux/slab.h>
-#include <linux/kmemleak.h>
-#include <linux/iommu.h>
#include "kgsl_mmu.h"
-#include "kgsl_log.h"
struct kgsl_device;
struct kgsl_process_private;
@@ -30,17 +24,10 @@ struct kgsl_process_private;
#define KGSL_CACHE_OP_FLUSH 0x02
#define KGSL_CACHE_OP_CLEAN 0x03
-int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
- struct kgsl_pagetable *pagetable,
- uint64_t size);
-
-int kgsl_cma_alloc_coherent(struct kgsl_device *device,
+int kgsl_sharedmem_alloc_contig(struct kgsl_device *device,
struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, uint64_t size);
-int kgsl_cma_alloc_secure(struct kgsl_device *device,
- struct kgsl_memdesc *memdesc, uint64_t size);
-
void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
@@ -82,6 +69,8 @@ int kgsl_allocate_user(struct kgsl_device *device,
struct kgsl_pagetable *pagetable,
uint64_t size, uint64_t mmapsize, uint64_t flags);
+void kgsl_get_memory_usage(char *str, size_t len, uint64_t memflags);
+
#define MEMFLAGS(_flags, _mask, _shift) \
((unsigned int) (((_flags) & (_mask)) >> (_shift)))
@@ -125,10 +114,8 @@ kgsl_memdesc_get_memtype(const struct kgsl_memdesc *memdesc)
static inline int
kgsl_memdesc_set_align(struct kgsl_memdesc *memdesc, unsigned int align)
{
- if (align > 32) {
- KGSL_CORE_ERR("Alignment too big, restricting to 2^32\n");
+ if (align > 32)
align = 32;
- }
memdesc->flags &= ~KGSL_MEMALIGN_MASK;
memdesc->flags |= (align << KGSL_MEMALIGN_SHIFT) & KGSL_MEMALIGN_MASK;
@@ -256,21 +243,6 @@ kgsl_memdesc_mmapsize(const struct kgsl_memdesc *memdesc)
return size;
}
-static inline int
-kgsl_allocate_contiguous(struct kgsl_device *device,
- struct kgsl_memdesc *memdesc, size_t size)
-{
- int ret;
-
- size = ALIGN(size, PAGE_SIZE);
-
- ret = kgsl_cma_alloc_coherent(device, memdesc, NULL, size);
- if (!ret && (kgsl_mmu_get_mmutype() == KGSL_MMU_TYPE_NONE))
- memdesc->gpuaddr = memdesc->physaddr;
-
- return ret;
-}
-
/*
* kgsl_allocate_global() - Allocate GPU accessible memory that will be global
* across all processes
@@ -291,16 +263,10 @@ static inline int kgsl_allocate_global(struct kgsl_device *device,
{
int ret;
- BUG_ON(size > SIZE_MAX);
-
- if (size == 0)
- return -EINVAL;
-
memdesc->flags = flags;
memdesc->priv = priv;
- ret = kgsl_allocate_contiguous(device, memdesc, (size_t) size);
-
+ ret = kgsl_sharedmem_alloc_contig(device, memdesc, NULL, (size_t) size);
if (!ret) {
ret = kgsl_add_global_pt_entry(device, memdesc);
if (ret)