diff options
| author | Patrick Daly <pdaly@codeaurora.org> | 2016-08-03 19:24:55 -0700 |
|---|---|---|
| committer | Patrick Daly <pdaly@codeaurora.org> | 2016-08-17 12:47:21 -0700 |
| commit | 641a99ef2f8d8ba648cc3db61f2f288532af4831 (patch) | |
| tree | 27c46e2fd49aa6557a3bb56c1201b889739138c0 /drivers/soc | |
| parent | ff15045bed7be4da29773fa03b18da8b40a1d952 (diff) | |
msm: secure_buffer: Limit continuous time spent in hypervisor
hyp_assign_table() is a costly operation during which nonsecure interrupts
are disabled. Split this operation into multiple parts for better
real-time behavior.
Splitting is done by the following criteria:
maximum number of physically contiguous memory regions
maximum total memory size.
Here is a chart showing the average performance of hyp_assign_table() with
N physically contiguous chunks each with size X.
#chunks chunk_size(pages) total_memory(pages) time(ms)
2 1 2 3.354
2 4 8 12.979
2 512 1024 4.349
8 1 8 4.714
8 4 32 26.781
8 512 4096 8.724
32 1 32 17.093
32 4 128 50.700
32 512 16384 26.717
128 1 128 71.076
128 4 512 126.305
Based on the above, select a maximum of 32 chunks or 512 total pages
as the limits.
Change-Id: I530cfdce76c8a2c38f60d6118647eaefd269e693
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
Diffstat (limited to 'drivers/soc')
| -rw-r--r-- | drivers/soc/qcom/secure_buffer.c | 45 |
1 files changed, 40 insertions, 5 deletions
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c index 148515a1dd81..e7dbcac064aa 100644 --- a/drivers/soc/qcom/secure_buffer.c +++ b/drivers/soc/qcom/secure_buffer.c @@ -252,6 +252,9 @@ static struct mem_prot_info *get_info_list_from_table(struct sg_table *table, return info; } +#define BATCH_MAX_SIZE SZ_2M +#define BATCH_MAX_SECTIONS 32 + int hyp_assign_table(struct sg_table *table, u32 *source_vm_list, int source_nelems, int *dest_vmids, int *dest_perms, @@ -266,6 +269,9 @@ int hyp_assign_table(struct sg_table *table, struct mem_prot_info *sg_table_copy; size_t sg_table_copy_size; + int batch_start, batch_end; + u64 batch_size; + /* * We can only pass cache-aligned sizes to hypervisor, so we need * to kmalloc and memcpy the source_vm_list here. @@ -311,11 +317,40 @@ int hyp_assign_table(struct sg_table *table, dmac_flush_range(dest_vm_copy, (void *)dest_vm_copy + dest_vm_copy_size); - ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, - MEM_PROT_ASSIGN_ID), &desc); - if (ret) - pr_info("%s: Failed to assign memory protection, ret = %d\n", - __func__, ret); + batch_start = 0; + while (batch_start < table->nents) { + /* Ensure no size zero batches */ + batch_size = sg_table_copy[batch_start].size; + batch_end = batch_start + 1; + while (1) { + u64 size; + + if (batch_end >= table->nents) + break; + if (batch_end - batch_start >= BATCH_MAX_SECTIONS) + break; + + size = sg_table_copy[batch_end].size; + if (size + batch_size >= BATCH_MAX_SIZE) + break; + + batch_size += size; + batch_end++; + } + + desc.args[0] = virt_to_phys(&sg_table_copy[batch_start]); + desc.args[1] = (batch_end - batch_start) * + sizeof(sg_table_copy[0]); + + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, + MEM_PROT_ASSIGN_ID), &desc); + if (ret) { + pr_info("%s: Failed to assign memory protection, ret = %d\n", + __func__, ret); + break; + } + batch_start = batch_end; + } out_unlock: mutex_unlock(&secure_buffer_mutex); |
