summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/soc/qcom/msm_smem.c170
1 files changed, 129 insertions, 41 deletions
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index cf3e0e084ab4..0a4e10256998 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -182,6 +182,20 @@ static struct restart_notifier_block restart_notifiers[] = {
static int init_smem_remote_spinlock(void);
/**
+ * smem_get_toc() - Used for getting partitions TOC
+ *
+ * @return - Base address off partitions TOC
+ *
+ * Helper function to get base address of partition TOC,
+ * that is present in top 4K of first smem region.
+ */
+static struct smem_toc __iomem *smem_get_toc(void)
+{
+ return smem_areas[0].virt_addr +
+ smem_areas[0].size - 4 * 1024;
+}
+
+/**
* is_probe_done() - Did the probe function successfully complete
*
* @return - true if probe successfully completed, false if otherwise
@@ -315,6 +329,7 @@ static void *__smem_get_entry_nonsecure(unsigned id, unsigned *size,
int use_spinlocks = spinlocks_initialized && use_rspinlock;
void *ret = 0;
unsigned long flags = 0;
+ uint32_t e_size;
int rc;
if (!skip_init_check && !smem_initialized_check())
@@ -333,7 +348,11 @@ static void *__smem_get_entry_nonsecure(unsigned id, unsigned *size,
if (toc[id].allocated) {
phys_addr_t phys_base;
- *size = toc[id].size;
+ e_size = toc[id].size;
+ if (e_size > smem_ram_size)
+ return ret;
+ *size = e_size;
+
barrier();
phys_base = toc[id].reserved & BASE_ADDR_MASK;
@@ -368,12 +387,19 @@ static void *__smem_get_entry_secure(unsigned id,
bool skip_init_check,
bool use_rspinlock)
{
+ struct smem_partition_allocation_header *alloc_hdr;
struct smem_partition_header *hdr;
+ uint32_t offset_free_uncached;
+ struct smem_toc __iomem *toc;
+ uint32_t offset_free_cached;
unsigned long lflags = 0;
- void *item = NULL;
- struct smem_partition_allocation_header *alloc_hdr;
+ uint32_t partition_size;
uint32_t partition_num;
+ uint32_t padding_data;
+ uint32_t padding_hdr;
uint32_t a_hdr_size;
+ uint32_t item_size;
+ void *item = NULL;
int rc;
SMEM_DBG("%s(%u, %u, %u, %d, %d)\n", __func__, id, to_proc,
@@ -393,9 +419,13 @@ static void *__smem_get_entry_secure(unsigned id,
return NULL;
}
+ toc = smem_get_toc();
+
if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset) {
if (use_comm_partition) {
partition_num = comm_partition.partition_num;
+ partition_size =
+ readl_relaxed(&toc->entry[partition_num].size);
hdr = smem_areas[0].virt_addr + comm_partition.offset;
} else {
return __smem_get_entry_nonsecure(id, size,
@@ -403,6 +433,7 @@ static void *__smem_get_entry_secure(unsigned id,
}
} else {
partition_num = partitions[to_proc].partition_num;
+ partition_size = readl_relaxed(&toc->entry[partition_num].size);
hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
}
if (unlikely(!spinlocks_initialized)) {
@@ -433,11 +464,20 @@ static void *__smem_get_entry_secure(unsigned id,
if (flags & SMEM_ITEM_CACHED_FLAG) {
a_hdr_size = ALIGN(sizeof(*alloc_hdr),
partitions[to_proc].size_cacheline);
- for (alloc_hdr = (void *)(hdr) + hdr->size - a_hdr_size;
+ offset_free_cached = hdr->offset_free_cached;
+ if (WARN_ON(offset_free_cached > partition_size))
+ return NULL;
+
+ for (alloc_hdr = (void *)(hdr) + partition_size - a_hdr_size;
(void *)(alloc_hdr) > (void *)(hdr) +
- hdr->offset_free_cached;
+ offset_free_cached;
alloc_hdr = (void *)(alloc_hdr) -
- alloc_hdr->size - a_hdr_size) {
+ item_size - a_hdr_size) {
+ item_size = alloc_hdr->size;
+ padding_data = alloc_hdr->padding_data;
+ if (WARN_ON(padding_data > item_size
+ || item_size > partition_size))
+ return NULL;
if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
LOG_ERR(
"%s: SMEM corruption detected. Partition %d to %d at %p\n",
@@ -450,20 +490,30 @@ static void *__smem_get_entry_secure(unsigned id,
}
if (alloc_hdr->smem_type == id) {
/* 8 byte alignment to match legacy */
- *size = ALIGN(alloc_hdr->size -
- alloc_hdr->padding_data, 8);
- item = (void *)(alloc_hdr) - alloc_hdr->size;
+ *size = ALIGN(item_size - padding_data, 8);
+ item = (void *)(alloc_hdr) - item_size;
break;
}
}
} else {
+ offset_free_uncached = hdr->offset_free_uncached;
+ if (WARN_ON(offset_free_uncached > partition_size))
+ return NULL;
+
for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
(void *)(alloc_hdr) < (void *)(hdr) +
- hdr->offset_free_uncached;
+ offset_free_uncached;
alloc_hdr = (void *)(alloc_hdr) +
sizeof(*alloc_hdr) +
- alloc_hdr->padding_hdr +
- alloc_hdr->size) {
+ padding_hdr +
+ item_size) {
+ padding_hdr = alloc_hdr->padding_hdr;
+ padding_data = alloc_hdr->padding_data;
+ item_size = alloc_hdr->size;
+ if (WARN_ON(padding_hdr > partition_size
+ || item_size > partition_size
+ || padding_data > item_size))
+ return NULL;
if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
LOG_ERR(
"%s: SMEM corruption detected. Partition %d to %d at %p\n",
@@ -476,11 +526,10 @@ static void *__smem_get_entry_secure(unsigned id,
}
if (alloc_hdr->smem_type == id) {
/* 8 byte alignment to match legacy */
- *size = ALIGN(alloc_hdr->size -
- alloc_hdr->padding_data, 8);
+ *size = ALIGN(item_size - padding_data, 8);
item = (void *)(alloc_hdr) +
sizeof(*alloc_hdr) +
- alloc_hdr->padding_hdr;
+ padding_hdr;
break;
}
}
@@ -569,10 +618,17 @@ static void *alloc_item_nonsecure(unsigned id, unsigned size_in)
void *smem_base = smem_ram_base;
struct smem_shared *shared = smem_base;
struct smem_heap_entry *toc = shared->heap_toc;
+ uint32_t free_offset, heap_remaining;
void *ret = NULL;
- if (shared->heap_info.heap_remaining >= size_in) {
- toc[id].offset = shared->heap_info.free_offset;
+ heap_remaining = shared->heap_info.heap_remaining;
+ free_offset = shared->heap_info.free_offset;
+ if (WARN_ON(heap_remaining > smem_ram_size
+ || free_offset > smem_ram_size))
+ return NULL;
+
+ if (heap_remaining >= size_in) {
+ toc[id].offset = free_offset;
toc[id].size = size_in;
/*
* wmb() is necessary to ensure the allocation data is
@@ -584,7 +640,7 @@ static void *alloc_item_nonsecure(unsigned id, unsigned size_in)
shared->heap_info.free_offset += size_in;
shared->heap_info.heap_remaining -= size_in;
- ret = smem_base + toc[id].offset;
+ ret = smem_base + free_offset;
/*
* wmb() is necessary to ensure the heap data is consistent
* before continuing to prevent race conditions with remote
@@ -620,11 +676,15 @@ static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
void *smem_base = smem_ram_base;
struct smem_partition_header *hdr;
struct smem_partition_allocation_header *alloc_hdr;
+ uint32_t offset_free_uncached;
+ struct smem_toc __iomem *toc;
+ uint32_t offset_free_cached;
+ uint32_t partition_size;
+ uint32_t partition_num;
uint32_t a_hdr_size;
uint32_t a_data_size;
uint32_t size_cacheline;
uint32_t free_space;
- uint32_t partition_num;
void *ret = NULL;
if (to_proc == SMEM_COMM_HOST) {
@@ -651,27 +711,35 @@ static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
BUG();
}
- free_space = hdr->offset_free_cached -
- hdr->offset_free_uncached;
+ toc = smem_get_toc();
+ partition_size = readl_relaxed(&toc->entry[partition_num].size);
+
+ offset_free_cached = hdr->offset_free_cached;
+ offset_free_uncached = hdr->offset_free_uncached;
+ if (WARN_ON(offset_free_uncached > offset_free_cached
+ || offset_free_cached > partition_size))
+ return NULL;
+
+ free_space = offset_free_cached - offset_free_uncached;
if (flags & SMEM_ITEM_CACHED_FLAG) {
a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
a_data_size = ALIGN(size_in, size_cacheline);
- if (free_space < a_hdr_size + a_data_size) {
+ if (free_space < a_hdr_size + a_data_size
+ || free_space < size_in) {
SMEM_INFO(
- "%s: id %u not enough memory %u (required %u)\n",
- __func__, id, free_space,
- a_hdr_size + a_data_size);
+ "%s: id %u not enough memory %u (required %u), (size_in %u)\n",
+ __func__, id, free_space,
+ a_hdr_size + a_data_size, size_in);
return ret;
}
- alloc_hdr = (void *)(hdr) + hdr->offset_free_cached -
- a_hdr_size;
+ alloc_hdr = (void *)(hdr) + offset_free_cached - a_hdr_size;
alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
alloc_hdr->smem_type = id;
alloc_hdr->size = a_data_size;
alloc_hdr->padding_data = a_data_size - size_in;
alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
- hdr->offset_free_cached = hdr->offset_free_cached -
+ hdr->offset_free_cached = offset_free_cached -
a_hdr_size - a_data_size;
ret = (void *)(alloc_hdr) - a_data_size;
/*
@@ -686,20 +754,21 @@ static void *alloc_item_secure(unsigned id, unsigned size_in, unsigned to_proc,
} else {
a_hdr_size = sizeof(*alloc_hdr);
a_data_size = ALIGN(size_in, 8);
- if (free_space < a_hdr_size + a_data_size) {
+ if (free_space < a_hdr_size + a_data_size
+ || free_space < size_in) {
SMEM_INFO(
- "%s: id %u not enough memory %u (required %u)\n",
- __func__, id, free_space,
- a_hdr_size + a_data_size);
+ "%s: id %u not enough memory %u (required %u) (size_in %u)\n",
+ __func__, id, free_space,
+ a_hdr_size + a_data_size, size_in);
return ret;
}
- alloc_hdr = (void *)(hdr) + hdr->offset_free_uncached;
+ alloc_hdr = (void *)(hdr) + offset_free_uncached;
alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
alloc_hdr->smem_type = id;
alloc_hdr->size = a_data_size;
alloc_hdr->padding_data = a_data_size - size_in;
alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
- hdr->offset_free_uncached = hdr->offset_free_uncached +
+ hdr->offset_free_uncached = offset_free_uncached +
a_hdr_size + a_data_size;
ret = alloc_hdr + 1;
}
@@ -891,6 +960,12 @@ unsigned smem_get_free_space(unsigned to_proc)
{
struct smem_partition_header *hdr;
struct smem_shared *shared;
+ uint32_t offset_free_uncached;
+ struct smem_toc __iomem *toc;
+ uint32_t offset_free_cached;
+ uint32_t heap_remaining;
+ uint32_t p_size;
+ uint32_t p_num;
if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
@@ -905,11 +980,24 @@ unsigned smem_get_free_space(unsigned to_proc)
return UINT_MAX;
}
hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
- return hdr->offset_free_cached - hdr->offset_free_uncached;
- } else {
- shared = smem_ram_base;
- return shared->heap_info.heap_remaining;
+ offset_free_cached = hdr->offset_free_cached;
+ offset_free_uncached = hdr->offset_free_uncached;
+
+ toc = smem_get_toc();
+ p_num = partitions[to_proc].partition_num;
+ p_size = readl_relaxed(&toc->entry[p_num].size);
+ if (WARN_ON(offset_free_uncached > offset_free_cached
+ || offset_free_cached > p_size))
+ return -EINVAL;
+
+ return offset_free_cached - offset_free_uncached;
}
+ shared = smem_ram_base;
+ heap_remaining = shared->heap_info.heap_remaining;
+ if (WARN_ON(heap_remaining > smem_ram_size))
+ return -EINVAL;
+
+ return heap_remaining;
}
EXPORT_SYMBOL(smem_get_free_space);
@@ -1214,8 +1302,8 @@ static void smem_init_security_partition(struct smem_toc_entry *entry,
LOG_ERR("Smem partition %d hdr magic is bad\n", num);
BUG();
}
- if (!hdr->size) {
- LOG_ERR("Smem partition %d size is 0\n", num);
+ if (hdr->size != entry->size) {
+ LOG_ERR("Smem partition %d size is invalid\n", num);
BUG();
}
if (hdr->offset_free_uncached > hdr->size) {