diff options
23 files changed, 4390 insertions, 228 deletions
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig index 356e10969272..51ce60b3e2a4 100644 --- a/drivers/staging/android/ion/Kconfig +++ b/drivers/staging/android/ion/Kconfig @@ -40,3 +40,17 @@ config ION_POOL_CACHE_POLICY help Choose this option if need to explicity set cache policy of the pages in the page pool. + +config ION_MSM + tristate "Ion for MSM" + depends on ARCH_QCOM && ION + select MSM_SECURE_BUFFER + help + Choose this option if you wish to use ion on an MSM target. + +config ALLOC_BUFFERS_IN_4K_CHUNKS + bool "Turns off allocation optimization and allocate only 4K pages" + depends on ARCH_QCOM && ION + help + Choose this option if you want ION to allocate buffers in + only 4KB chunks. diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile index b56fd2bf2b4f..04eda9821bb0 100644 --- a/drivers/staging/android/ion/Makefile +++ b/drivers/staging/android/ion/Makefile @@ -1,10 +1,10 @@ obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \ - ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o + ion_carveout_heap.o ion_chunk_heap.o +obj-$(CONFIG_CMA) += ion_cma_heap.o ion_cma_secure_heap.o obj-$(CONFIG_ION_TEST) += ion_test.o ifdef CONFIG_COMPAT obj-$(CONFIG_ION) += compat_ion.o endif - obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o obj-$(CONFIG_ION_TEGRA) += tegra/ - +obj-$(CONFIG_ION_MSM) += msm/ diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h index 9da8f917670b..1ee0fe06f2df 100644 --- a/drivers/staging/android/ion/compat_ion.h +++ b/drivers/staging/android/ion/compat_ion.h @@ -21,6 +21,8 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +#define compat_ion_user_handle_t compat_int_t + #else #define compat_ion_ioctl NULL diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index e237e9f3312d..5cfa495909bc 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -3,6 +3,7 @@ * drivers/staging/android/ion/ion.c * * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -15,7 +16,6 @@ * */ -#include <linux/device.h> #include <linux/err.h> #include <linux/file.h> #include <linux/freezer.h> @@ -23,6 +23,7 @@ #include <linux/anon_inodes.h> #include <linux/kthread.h> #include <linux/list.h> +#include <linux/list_sort.h> #include <linux/memblock.h> #include <linux/miscdevice.h> #include <linux/export.h> @@ -36,6 +37,9 @@ #include <linux/debugfs.h> #include <linux/dma-buf.h> #include <linux/idr.h> +#include <linux/msm_ion.h> +#include <trace/events/kmem.h> + #include "ion.h" #include "ion_priv.h" @@ -86,7 +90,7 @@ struct ion_client { struct rb_root handles; struct idr idr; struct mutex lock; - const char *name; + char *name; char *display_name; int display_serial; struct task_struct *task; @@ -207,6 +211,8 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, buffer->dev = dev; buffer->size = len; + buffer->flags = flags; + INIT_LIST_HEAD(&buffer->vmas); table = heap->ops->map_dma(heap, buffer); if (WARN_ONCE(table == NULL, @@ -237,9 +243,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, } } - buffer->dev = dev; - buffer->size = len; - INIT_LIST_HEAD(&buffer->vmas); mutex_init(&buffer->lock); /* * this will set up dma addresses for the sglist -- it is not @@ -253,6 +256,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, */ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) sg_dma_address(sg) = sg_phys(sg); + mutex_lock(&dev->buffer_lock); ion_buffer_add(dev, buffer); mutex_unlock(&dev->buffer_lock); @@ -272,6 +276,7 @@ void ion_buffer_destroy(struct ion_buffer *buffer) if (WARN_ON(buffer->kmap_cnt > 0)) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->unmap_dma(buffer->heap, buffer); + buffer->heap->ops->free(buffer); vfree(buffer->pages); kfree(buffer); @@ -385,7 +390,7 @@ static void ion_handle_get(struct ion_handle *handle) kref_get(&handle->ref); } -static int ion_handle_put(struct ion_handle *handle) +int ion_handle_put(struct ion_handle *handle) { struct ion_client *client = handle->client; int ret; @@ -415,7 +420,7 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client, return ERR_PTR(-EINVAL); } -static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, +struct ion_handle *ion_handle_get_by_id(struct ion_client *client, int id) { struct ion_handle *handle; @@ -476,6 +481,20 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, struct ion_buffer *buffer = NULL; struct ion_heap *heap; int ret; + unsigned long secure_allocation = flags & ION_FLAG_SECURE; + const unsigned int MAX_DBG_STR_LEN = 64; + char dbg_str[MAX_DBG_STR_LEN]; + unsigned int dbg_str_idx = 0; + + dbg_str[0] = '\0'; + + /* + * For now, we don't want to fault in pages individually since + * clients are already doing manual cache maintenance. In + * other words, the implicit caching infrastructure is in + * place (in code) but should not be used. + */ + flags |= ION_FLAG_CACHED_NEEDS_SYNC; pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, len, align, heap_id_mask, flags); @@ -495,17 +514,53 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, /* if the caller didn't specify this heap id */ if (!((1 << heap->id) & heap_id_mask)) continue; + /* Do not allow un-secure heap if secure is specified */ + if (secure_allocation && + !ion_heap_allow_secure_allocation(heap->type)) + continue; + trace_ion_alloc_buffer_start(client->name, heap->name, len, + heap_id_mask, flags); buffer = ion_buffer_create(heap, dev, len, align, flags); + trace_ion_alloc_buffer_end(client->name, heap->name, len, + heap_id_mask, flags); if (!IS_ERR(buffer)) break; + + trace_ion_alloc_buffer_fallback(client->name, heap->name, len, + heap_id_mask, flags, + PTR_ERR(buffer)); + if (dbg_str_idx < MAX_DBG_STR_LEN) { + unsigned int len_left = MAX_DBG_STR_LEN-dbg_str_idx-1; + int ret_value = snprintf(&dbg_str[dbg_str_idx], + len_left, "%s ", heap->name); + if (ret_value >= len_left) { + /* overflow */ + dbg_str[MAX_DBG_STR_LEN-1] = '\0'; + dbg_str_idx = MAX_DBG_STR_LEN; + } else if (ret_value >= 0) { + dbg_str_idx += ret_value; + } else { + /* error */ + dbg_str[MAX_DBG_STR_LEN-1] = '\0'; + } + } } up_read(&dev->lock); - if (buffer == NULL) + if (buffer == NULL) { + trace_ion_alloc_buffer_fail(client->name, dbg_str, len, + heap_id_mask, flags, -ENODEV); return ERR_PTR(-ENODEV); + } - if (IS_ERR(buffer)) + if (IS_ERR(buffer)) { + trace_ion_alloc_buffer_fail(client->name, dbg_str, len, + heap_id_mask, flags, + PTR_ERR(buffer)); + pr_debug("ION is unable to allocate 0x%zx bytes (alignment: 0x%zx) from heap(s) %sfor client %s\n", + len, align, dbg_str, client->name); return ERR_CAST(buffer); + } handle = ion_handle_create(client, buffer); @@ -538,7 +593,6 @@ void ion_free(struct ion_client *client, struct ion_handle *handle) mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); - if (!valid_handle) { WARN(1, "%s: invalid handle passed to free.\n", __func__); mutex_unlock(&client->lock); @@ -679,28 +733,25 @@ static int ion_debug_client_show(struct seq_file *s, void *unused) { struct ion_client *client = s->private; struct rb_node *n; - size_t sizes[ION_NUM_HEAP_IDS] = {0}; - const char *names[ION_NUM_HEAP_IDS] = {NULL}; - int i; + + seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n", + "heap_name", "size_in_bytes", "handle refcount", + "buffer"); mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); - unsigned int id = handle->buffer->heap->id; - if (!names[id]) - names[id] = handle->buffer->heap->name; - sizes[id] += handle->buffer->size; - } - mutex_unlock(&client->lock); + seq_printf(s, "%16.16s: %16zx : %16d : %12p", + handle->buffer->heap->name, + handle->buffer->size, + atomic_read(&handle->ref.refcount), + handle->buffer); - seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); - for (i = 0; i < ION_NUM_HEAP_IDS; i++) { - if (!names[i]) - continue; - seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); + seq_printf(s, "\n"); } + mutex_unlock(&client->lock); return 0; } @@ -771,6 +822,7 @@ struct ion_client *ion_client_create(struct ion_device *dev, client->handles = RB_ROOT; idr_init(&client->idr); mutex_init(&client->lock); + client->task = task; client->pid = pid; client->name = kstrdup(name, GFP_KERNEL); @@ -843,6 +895,7 @@ void ion_client_destroy(struct ion_client *client) put_task_struct(client->task); rb_erase(&client->node, &dev->clients); debugfs_remove_recursive(client->debug_root); + up_write(&dev->lock); kfree(client->display_name); @@ -851,6 +904,50 @@ void ion_client_destroy(struct ion_client *client) } EXPORT_SYMBOL(ion_client_destroy); +int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle, + unsigned long *flags) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to %s.\n", + __func__, __func__); + mutex_unlock(&client->lock); + return -EINVAL; + } + buffer = handle->buffer; + mutex_lock(&buffer->lock); + *flags = buffer->flags; + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + + return 0; +} +EXPORT_SYMBOL(ion_handle_get_flags); + +int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle, + unsigned long *size) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to %s.\n", + __func__, __func__); + mutex_unlock(&client->lock); + return -EINVAL; + } + buffer = handle->buffer; + mutex_lock(&buffer->lock); + *size = buffer->size; + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + + return 0; +} +EXPORT_SYMBOL(ion_handle_get_size); + struct sg_table *ion_sg_table(struct ion_client *client, struct ion_handle *handle) { @@ -871,6 +968,36 @@ struct sg_table *ion_sg_table(struct ion_client *client, } EXPORT_SYMBOL(ion_sg_table); +struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base, + size_t chunk_size, size_t total_size) +{ + struct sg_table *table; + int i, n_chunks, ret; + struct scatterlist *sg; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return ERR_PTR(-ENOMEM); + + n_chunks = DIV_ROUND_UP(total_size, chunk_size); + pr_debug("creating sg_table with %d chunks\n", n_chunks); + + ret = sg_alloc_table(table, n_chunks, GFP_KERNEL); + if (ret) + goto err0; + + for_each_sg(table->sgl, sg, table->nents, i) { + dma_addr_t addr = buffer_base + i * chunk_size; + sg_dma_address(sg) = addr; + sg->length = chunk_size; + } + + return table; +err0: + kfree(table); + return ERR_PTR(ret); +} + static void ion_buffer_sync_for_device(struct ion_buffer *buffer, struct device *dev, enum dma_data_direction direction); @@ -995,6 +1122,9 @@ static void ion_vm_close(struct vm_area_struct *vma) break; } mutex_unlock(&buffer->lock); + + if (buffer->heap->ops->unmap_user) + buffer->heap->ops->unmap_user(buffer->heap, buffer); } static const struct vm_operations_struct ion_vma_ops = { @@ -1019,6 +1149,7 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) VM_DONTDUMP; vma->vm_private_data = buffer; vma->vm_ops = &ion_vma_ops; + vma->vm_flags |= VM_MIXEDMAP; ion_vm_open(vma); return 0; } @@ -1146,7 +1277,6 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) fd = dma_buf_fd(dmabuf, O_CLOEXEC); if (fd < 0) dma_buf_put(dmabuf); - return fd; } EXPORT_SYMBOL(ion_share_dma_buf_fd); @@ -1326,6 +1456,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) data.custom.arg); break; } + case ION_IOC_CLEAN_CACHES: + return client->dev->custom_ioctl(client, + ION_IOC_CLEAN_CACHES, arg); + case ION_IOC_INV_CACHES: + return client->dev->custom_ioctl(client, + ION_IOC_INV_CACHES, arg); + case ION_IOC_CLEAN_INV_CACHES: + return client->dev->custom_ioctl(client, + ION_IOC_CLEAN_INV_CACHES, arg); default: return -ENOTTY; } @@ -1392,6 +1531,106 @@ static size_t ion_debug_heap_total(struct ion_client *client, return size; } +/** + * Create a mem_map of the heap. + * @param s seq_file to log error message to. + * @param heap The heap to create mem_map for. + * @param mem_map The mem map to be created. + */ +void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap, + struct list_head *mem_map) +{ + struct ion_device *dev = heap->dev; + struct rb_node *cnode; + size_t size; + struct ion_client *client; + + if (!heap->ops->phys) + return; + + down_read(&dev->lock); + for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) { + struct rb_node *hnode; + client = rb_entry(cnode, struct ion_client, node); + + mutex_lock(&client->lock); + for (hnode = rb_first(&client->handles); + hnode; + hnode = rb_next(hnode)) { + struct ion_handle *handle = rb_entry( + hnode, struct ion_handle, node); + if (handle->buffer->heap == heap) { + struct mem_map_data *data = + kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto inner_error; + heap->ops->phys(heap, handle->buffer, + &(data->addr), &size); + data->size = (unsigned long) size; + data->addr_end = data->addr + data->size - 1; + data->client_name = kstrdup(client->name, + GFP_KERNEL); + if (!data->client_name) { + kfree(data); + goto inner_error; + } + list_add(&data->node, mem_map); + } + } + mutex_unlock(&client->lock); + } + up_read(&dev->lock); + return; + +inner_error: + seq_puts(s, + "ERROR: out of memory. Part of memory map will not be logged\n"); + mutex_unlock(&client->lock); + up_read(&dev->lock); +} + +/** + * Free the memory allocated by ion_debug_mem_map_create + * @param mem_map The mem map to free. + */ +static void ion_debug_mem_map_destroy(struct list_head *mem_map) +{ + if (mem_map) { + struct mem_map_data *data, *tmp; + list_for_each_entry_safe(data, tmp, mem_map, node) { + list_del(&data->node); + kfree(data->client_name); + kfree(data); + } + } +} + +static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct mem_map_data *d1, *d2; + d1 = list_entry(a, struct mem_map_data, node); + d2 = list_entry(b, struct mem_map_data, node); + if (d1->addr == d2->addr) + return d1->size - d2->size; + return d1->addr - d2->addr; +} + +/** + * Print heap debug information. + * @param s seq_file to log message to. + * @param heap pointer to heap that we will print debug information for. + */ +static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap) +{ + if (heap->ops->print_debug) { + struct list_head mem_map = LIST_HEAD_INIT(mem_map); + ion_debug_mem_map_create(s, heap, &mem_map); + list_sort(NULL, &mem_map, mem_map_cmp); + heap->ops->print_debug(heap, s, &mem_map); + ion_debug_mem_map_destroy(&mem_map); + } +} + static int ion_debug_heap_show(struct seq_file *s, void *unused) { struct ion_heap *heap = s->private; @@ -1403,6 +1642,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); seq_puts(s, "----------------------------------------------------\n"); + down_read(&dev->lock); for (n = rb_first(&dev->clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client, node); @@ -1421,6 +1661,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) client->pid, size); } } + up_read(&dev->lock); seq_puts(s, "----------------------------------------------------\n"); seq_puts(s, "orphaned allocations (info is from last known client):\n"); mutex_lock(&dev->buffer_lock); @@ -1451,6 +1692,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) if (heap->debug_show) heap->debug_show(heap, s, unused); + ion_heap_print_debug(s, heap); return 0; } @@ -1500,6 +1742,7 @@ static int debug_shrink_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, debug_shrink_set, "%llu\n"); +#endif void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { @@ -1539,6 +1782,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) path, heap->name); } +#ifdef DEBUG_HEAP_SHRINKER if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { char debug_name[64]; @@ -1554,11 +1798,34 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) path, debug_name); } } +#endif up_write(&dev->lock); } EXPORT_SYMBOL(ion_device_add_heap); +int ion_walk_heaps(struct ion_client *client, int heap_id, void *data, + int (*f)(struct ion_heap *heap, void *data)) +{ + int ret_val = -EINVAL; + struct ion_heap *heap; + struct ion_device *dev = client->dev; + /* + * traverse the list of heaps available in this system + * and find the heap that is specified. + */ + down_write(&dev->lock); + plist_for_each_entry(heap, &dev->heaps, node) { + if (ION_HEAP(heap->id) != heap_id) + continue; + ret_val = f(heap, data); + break; + } + up_write(&dev->lock); + return ret_val; +} +EXPORT_SYMBOL(ion_walk_heaps); + struct ion_device *ion_device_create(long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, @@ -1642,13 +1909,13 @@ void __init ion_reserve(struct ion_platform_data *data) int ret = memblock_reserve(data->heaps[i].base, data->heaps[i].size); if (ret) - pr_err("memblock reserve of %zx@%lx failed\n", + pr_err("memblock reserve of %zx@%pa failed\n", data->heaps[i].size, - data->heaps[i].base); + &data->heaps[i].base); } - pr_info("%s: %s reserved base %lx size %zu\n", __func__, + pr_info("%s: %s reserved base %pa size %zu\n", __func__, data->heaps[i].name, - data->heaps[i].base, + &data->heaps[i].base, data->heaps[i].size); } } diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index b860c5f579f5..5f99ea16617a 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -2,6 +2,7 @@ * drivers/staging/android/ion/ion.h * * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -17,8 +18,7 @@ #ifndef _LINUX_ION_H #define _LINUX_ION_H -#include <linux/types.h> - +#include <linux/err.h> #include "../uapi/ion.h" struct ion_handle; @@ -28,13 +28,11 @@ struct ion_mapper; struct ion_client; struct ion_buffer; -/* - * This should be removed some day when phys_addr_t's are fully - * plumbed in the kernel, and all instances of ion_phys_addr_t should - * be converted to phys_addr_t. For the time being many kernel interfaces - * do not accept phys_addr_t's that would have to - */ -#define ion_phys_addr_t unsigned long +/* This should be removed some day when phys_addr_t's are fully + plumbed in the kernel, and all instances of ion_phys_addr_t should + be converted to phys_addr_t. For the time being many kernel interfaces + do not accept phys_addr_t's that would have to */ +#define ion_phys_addr_t dma_addr_t /** * struct ion_platform_heap - defines a heap in the given platform @@ -45,6 +43,9 @@ struct ion_buffer; * @name: used for debug purposes * @base: base address of heap in physical memory if applicable * @size: size of the heap in bytes if applicable + * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. + * @extra_data: Extra data specific to each heap type + * @priv: heap private data * @align: required alignment in physical memory if applicable * @priv: private info passed from the board file * @@ -56,22 +57,28 @@ struct ion_platform_heap { const char *name; ion_phys_addr_t base; size_t size; + unsigned int has_outer_cache; + void *extra_data; ion_phys_addr_t align; void *priv; }; /** * struct ion_platform_data - array of platform heaps passed from board file - * @nr: number of structures in the array - * @heaps: array of platform_heap structions + * @has_outer_cache: set to 1 if outer cache is used, 0 otherwise. + * @nr: number of structures in the array + * @heaps: array of platform_heap structions * * Provided by the board file in the form of platform data to a platform device. */ struct ion_platform_data { + unsigned int has_outer_cache; int nr; struct ion_platform_heap *heaps; }; +#ifdef CONFIG_ION + /** * ion_reserve() - reserve memory for ion heaps if applicable * @data: platform data specifying starting physical address and @@ -202,4 +209,68 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); */ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); +#else +static inline void ion_reserve(struct ion_platform_data *data) +{ + +} + +static inline struct ion_client *ion_client_create( + struct ion_device *dev, unsigned int heap_id_mask, const char *name) +{ + return ERR_PTR(-ENODEV); +} + +static inline void ion_client_destroy(struct ion_client *client) { } + +static inline struct ion_handle *ion_alloc(struct ion_client *client, + size_t len, size_t align, + unsigned int heap_id_mask, + unsigned int flags) +{ + return ERR_PTR(-ENODEV); +} + +static inline void ion_free(struct ion_client *client, + struct ion_handle *handle) { } + + +static inline int ion_phys(struct ion_client *client, + struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len) +{ + return -ENODEV; +} + +static inline struct sg_table *ion_sg_table(struct ion_client *client, + struct ion_handle *handle) +{ + return ERR_PTR(-ENODEV); +} + +static inline void *ion_map_kernel(struct ion_client *client, + struct ion_handle *handle) +{ + return ERR_PTR(-ENODEV); +} + +static inline void ion_unmap_kernel(struct ion_client *client, + struct ion_handle *handle) { } + +static inline int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) +{ + return -ENODEV; +} + +static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) +{ + return ERR_PTR(-ENODEV); +} + +static inline int ion_handle_get_flags(struct ion_client *client, + struct ion_handle *handle, unsigned long *flags) +{ + return -ENODEV; +} + +#endif /* CONFIG_ION */ #endif /* _LINUX_ION_H */ diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index a3446da4fdc2..5a1e2be57f19 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -20,49 +20,62 @@ #include <linux/errno.h> #include <linux/err.h> #include <linux/dma-mapping.h> +#include <linux/msm_ion.h> + +#include <asm/cacheflush.h> #include "ion.h" #include "ion_priv.h" #define ION_CMA_ALLOCATE_FAILED -1 -struct ion_cma_heap { - struct ion_heap heap; - struct device *dev; -}; - -#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) - struct ion_cma_buffer_info { void *cpu_addr; dma_addr_t handle; struct sg_table *table; + bool is_cached; }; +static int cma_heap_has_outer_cache; +/* + * Create scatter-list for the already allocated DMA buffer. + * This function could be replace by dma_common_get_sgtable + * as soon as it will avalaible. + */ +static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size) +{ + struct page *page = pfn_to_page(PFN_DOWN(handle)); + int ret; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + return 0; +} /* ION CMA heap operations functions */ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long len, unsigned long align, unsigned long flags) { - struct ion_cma_heap *cma_heap = to_cma_heap(heap); - struct device *dev = cma_heap->dev; + struct device *dev = heap->priv; struct ion_cma_buffer_info *info; dev_dbg(dev, "Request buffer allocation len %ld\n", len); - if (buffer->flags & ION_FLAG_CACHED) - return -EINVAL; - - if (align > PAGE_SIZE) - return -EINVAL; - info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL); if (!info) return ION_CMA_ALLOCATE_FAILED; - info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), - GFP_HIGHUSER | __GFP_ZERO); + if (!ION_IS_CACHED(flags)) + info->cpu_addr = dma_alloc_writecombine(dev, len, + &(info->handle), GFP_KERNEL); + else + info->cpu_addr = dma_alloc_nonconsistent(dev, len, + &(info->handle), GFP_KERNEL); if (!info->cpu_addr) { dev_err(dev, "Fail to allocate buffer\n"); @@ -71,20 +84,18 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!info->table) - goto free_mem; + goto err; + + info->is_cached = ION_IS_CACHED(flags); + + ion_cma_get_sgtable(dev, + info->table, info->cpu_addr, info->handle, len); - if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle, - len)) - goto free_table; /* keep this for memory release */ buffer->priv_virt = info; dev_dbg(dev, "Allocate buffer %p\n", buffer); return 0; -free_table: - kfree(info->table); -free_mem: - dma_free_coherent(dev, len, info->cpu_addr, info->handle); err: kfree(info); return ION_CMA_ALLOCATE_FAILED; @@ -92,15 +103,14 @@ err: static void ion_cma_free(struct ion_buffer *buffer) { - struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); - struct device *dev = cma_heap->dev; + struct device *dev = buffer->heap->priv; struct ion_cma_buffer_info *info = buffer->priv_virt; dev_dbg(dev, "Release buffer %p\n", buffer); /* release memory */ dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); - /* release sg table */ sg_free_table(info->table); + /* release sg table */ kfree(info->table); kfree(info); } @@ -109,8 +119,7 @@ static void ion_cma_free(struct ion_buffer *buffer) static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, ion_phys_addr_t *addr, size_t *len) { - struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); - struct device *dev = cma_heap->dev; + struct device *dev = heap->priv; struct ion_cma_buffer_info *info = buffer->priv_virt; dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer, @@ -138,25 +147,56 @@ static void ion_cma_heap_unmap_dma(struct ion_heap *heap, static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, struct vm_area_struct *vma) { - struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); - struct device *dev = cma_heap->dev; + struct device *dev = buffer->heap->priv; struct ion_cma_buffer_info *info = buffer->priv_virt; - return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, - buffer->size); + if (info->is_cached) + return dma_mmap_nonconsistent(dev, vma, info->cpu_addr, + info->handle, buffer->size); + else + return dma_mmap_writecombine(dev, vma, info->cpu_addr, + info->handle, buffer->size); } static void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer) { struct ion_cma_buffer_info *info = buffer->priv_virt; - /* kernel memory mapping has been done at allocation time */ + return info->cpu_addr; } static void ion_cma_unmap_kernel(struct ion_heap *heap, - struct ion_buffer *buffer) + struct ion_buffer *buffer) { + return; +} + +static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s, + const struct list_head *mem_map) +{ + if (mem_map) { + struct mem_map_data *data; + + seq_printf(s, "\nMemory Map\n"); + seq_printf(s, "%16.s %14.s %14.s %14.s\n", + "client", "start address", "end address", + "size"); + + list_for_each_entry(data, mem_map, node) { + const char *client_name = "(null)"; + + + if (data->client_name) + client_name = data->client_name; + + seq_printf(s, "%16.s 0x%14pa 0x%14pa %14lu (0x%lx)\n", + client_name, &data->addr, + &data->addr_end, + data->size, data->size); + } + } + return 0; } static struct ion_heap_ops ion_cma_ops = { @@ -168,30 +208,28 @@ static struct ion_heap_ops ion_cma_ops = { .map_user = ion_cma_mmap, .map_kernel = ion_cma_map_kernel, .unmap_kernel = ion_cma_unmap_kernel, + .print_debug = ion_cma_print_debug, }; struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) { - struct ion_cma_heap *cma_heap; + struct ion_heap *heap; - cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL); + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); - if (!cma_heap) + if (!heap) return ERR_PTR(-ENOMEM); - cma_heap->heap.ops = &ion_cma_ops; - /* - * get device from private heaps data, later it will be - * used to make the link with reserved CMA memory - */ - cma_heap->dev = data->priv; - cma_heap->heap.type = ION_HEAP_TYPE_DMA; - return &cma_heap->heap; + heap->ops = &ion_cma_ops; + /* set device as private heaps data, later it will be + * used to make the link with reserved CMA memory */ + heap->priv = data->priv; + heap->type = ION_HEAP_TYPE_DMA; + cma_heap_has_outer_cache = data->has_outer_cache; + return heap; } void ion_cma_heap_destroy(struct ion_heap *heap) { - struct ion_cma_heap *cma_heap = to_cma_heap(heap); - - kfree(cma_heap); + kfree(heap); } diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c new file mode 100644 index 000000000000..c7eca7c68b09 --- /dev/null +++ b/drivers/staging/android/ion/ion_cma_secure_heap.c @@ -0,0 +1,724 @@ +/* + * drivers/staging/android/ion/ion_cma_secure_heap.c + * + * Copyright (C) Linaro 2012 + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/ion.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/dma-mapping.h> +#include <linux/msm_ion.h> +#include <trace/events/kmem.h> + +#include <asm/cacheflush.h> + +/* for ion_heap_ops structure */ +#include "ion_priv.h" +#include "msm/ion_cp_common.h" + +#define ION_CMA_ALLOCATE_FAILED NULL + +struct ion_secure_cma_buffer_info { + dma_addr_t phys; + struct sg_table *table; + bool is_cached; +}; + +struct ion_cma_alloc_chunk { + void *cpu_addr; + struct list_head entry; + dma_addr_t handle; + unsigned long chunk_size; + atomic_t cnt; +}; + +struct ion_cma_secure_heap { + struct device *dev; + /* + * Protects against races between threads allocating memory/adding to + * pool at the same time. (e.g. thread 1 adds to pool, thread 2 + * allocates thread 1's memory before thread 1 knows it needs to + * allocate more. + * Admittedly this is fairly coarse grained right now but the chance for + * contention on this lock is unlikely right now. This can be changed if + * this ever changes in the future + */ + struct mutex alloc_lock; + /* + * protects the list of memory chunks in this pool + */ + struct mutex chunk_lock; + struct ion_heap heap; + /* + * Bitmap for allocation. This contains the aggregate of all chunks. */ + unsigned long *bitmap; + /* + * List of all allocated chunks + * + * This is where things get 'clever'. Individual allocations from + * dma_alloc_coherent must be allocated and freed in one chunk. + * We don't just want to limit the allocations to those confined + * within a single chunk (if clients allocate n small chunks we would + * never be able to use the combined size). The bitmap allocator is + * used to find the contiguous region and the parts of the chunks are + * marked off as used. The chunks won't be freed in the shrinker until + * the usage is actually zero. + */ + struct list_head chunks; + int npages; + ion_phys_addr_t base; + struct work_struct work; + unsigned long last_alloc; + struct shrinker shrinker; + atomic_t total_allocated; + atomic_t total_pool_size; + unsigned long heap_size; + unsigned long default_prefetch_size; +}; + +static void ion_secure_pool_pages(struct work_struct *work); + +/* + * Create scatter-list for the already allocated DMA buffer. + * This function could be replace by dma_common_get_sgtable + * as soon as it will avalaible. + */ +int ion_secure_cma_get_sgtable(struct device *dev, struct sg_table *sgt, + dma_addr_t handle, size_t size) +{ + struct page *page = pfn_to_page(PFN_DOWN(handle)); + int ret; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + sg_dma_address(sgt->sgl) = handle; + return 0; +} + +static int ion_secure_cma_add_to_pool( + struct ion_cma_secure_heap *sheap, + unsigned long len, + bool prefetch) +{ + void *cpu_addr; + dma_addr_t handle; + DEFINE_DMA_ATTRS(attrs); + int ret = 0; + struct ion_cma_alloc_chunk *chunk; + + + trace_ion_secure_cma_add_to_pool_start(len, + atomic_read(&sheap->total_pool_size), prefetch); + mutex_lock(&sheap->chunk_lock); + + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + ret = -ENOMEM; + goto out; + } + + dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); +/* dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs); */ + + cpu_addr = dma_alloc_attrs(sheap->dev, len, &handle, GFP_KERNEL, + &attrs); + + if (!cpu_addr) { + ret = -ENOMEM; + goto out_free; + } + + chunk->cpu_addr = cpu_addr; + chunk->handle = handle; + chunk->chunk_size = len; + atomic_set(&chunk->cnt, 0); + list_add(&chunk->entry, &sheap->chunks); + atomic_add(len, &sheap->total_pool_size); + /* clear the bitmap to indicate this region can be allocated from */ + bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT, + len >> PAGE_SHIFT); + goto out; + +out_free: + kfree(chunk); +out: + mutex_unlock(&sheap->chunk_lock); + + trace_ion_secure_cma_add_to_pool_end(len, + atomic_read(&sheap->total_pool_size), prefetch); + + return ret; +} + +static void ion_secure_pool_pages(struct work_struct *work) +{ + struct ion_cma_secure_heap *sheap = container_of(work, + struct ion_cma_secure_heap, work); + + ion_secure_cma_add_to_pool(sheap, sheap->last_alloc, true); +} +/* + * @s1: start of the first region + * @l1: length of the first region + * @s2: start of the second region + * @l2: length of the second region + * + * Returns the total number of bytes that intersect. + * + * s1 is the region we are trying to clear so s2 may be subsumed by s1 but the + * maximum size to clear should only ever be l1 + * + */ +static unsigned int intersect(unsigned long s1, unsigned long l1, + unsigned long s2, unsigned long l2) +{ + unsigned long base1 = s1; + unsigned long end1 = s1 + l1; + unsigned long base2 = s2; + unsigned long end2 = s2 + l2; + + /* Case 0: The regions don't overlap at all */ + if (!(base1 < end2 && base2 < end1)) + return 0; + + /* Case 1: region 2 is subsumed by region 1 */ + if (base1 <= base2 && end2 <= end1) + return l2; + + /* case 2: region 1 is subsumed by region 2 */ + if (base2 <= base1 && end1 <= end2) + return l1; + + /* case 3: region1 overlaps region2 on the bottom */ + if (base2 < end1 && base2 > base1) + return end1 - base2; + + /* case 4: region 2 overlaps region1 on the bottom */ + if (base1 < end2 && base1 > base2) + return end2 - base1; + + pr_err("Bad math! Did not detect chunks correctly! %lx %lx %lx %lx\n", + s1, l1, s2, l2); + BUG(); +} + +int ion_secure_cma_prefetch(struct ion_heap *heap, void *data) +{ + unsigned long len = (unsigned long)data; + struct ion_cma_secure_heap *sheap = + container_of(heap, struct ion_cma_secure_heap, heap); + unsigned long diff; + + if ((int) heap->type != ION_HEAP_TYPE_SECURE_DMA) + return -EINVAL; + + if (len == 0) + len = sheap->default_prefetch_size; + + /* + * Only prefetch as much space as there is left in the pool so + * check against the current free size of the heap. + * This is slightly racy if someone else is allocating at the same + * time. CMA has a restricted size for the heap so worst case + * the prefetch doesn't work because the allocation fails. + */ + diff = sheap->heap_size - atomic_read(&sheap->total_pool_size); + + if (len > diff) + len = diff; + + sheap->last_alloc = len; + trace_ion_prefetching(sheap->last_alloc); + schedule_work(&sheap->work); + + return 0; +} + +static void bad_math_dump(unsigned long len, int total_overlap, + struct ion_cma_secure_heap *sheap, + bool alloc, dma_addr_t paddr) +{ + struct list_head *entry; + + pr_err("Bad math! expected total was %lx actual was %x\n", + len, total_overlap); + pr_err("attempted %s address was %pa len %lx\n", + alloc ? "allocation" : "free", &paddr, len); + pr_err("chunks:\n"); + list_for_each(entry, &sheap->chunks) { + struct ion_cma_alloc_chunk *chunk = + container_of(entry, + struct ion_cma_alloc_chunk, entry); + pr_info("--- pa %pa len %lx\n", + &chunk->handle, chunk->chunk_size); + } + BUG(); + +} + +static int ion_secure_cma_alloc_from_pool( + struct ion_cma_secure_heap *sheap, + dma_addr_t *phys, + unsigned long len) +{ + dma_addr_t paddr; + unsigned long page_no; + int ret = 0; + int total_overlap = 0; + struct list_head *entry; + + mutex_lock(&sheap->chunk_lock); + + page_no = bitmap_find_next_zero_area(sheap->bitmap, + sheap->npages, 0, len >> PAGE_SHIFT, 0); + if (page_no >= sheap->npages) { + ret = -ENOMEM; + goto out; + } + bitmap_set(sheap->bitmap, page_no, len >> PAGE_SHIFT); + paddr = sheap->base + (page_no << PAGE_SHIFT); + + + list_for_each(entry, &sheap->chunks) { + struct ion_cma_alloc_chunk *chunk = container_of(entry, + struct ion_cma_alloc_chunk, entry); + int overlap = intersect(chunk->handle, + chunk->chunk_size, paddr, len); + + atomic_add(overlap, &chunk->cnt); + total_overlap += overlap; + } + + if (total_overlap != len) + bad_math_dump(len, total_overlap, sheap, 1, paddr); + + *phys = paddr; +out: + mutex_unlock(&sheap->chunk_lock); + return ret; +} + +static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap, + struct ion_cma_alloc_chunk *chunk) +{ + DEFINE_DMA_ATTRS(attrs); + + dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); + /* This region is 'allocated' and not available to allocate from */ + bitmap_set(sheap->bitmap, (chunk->handle - sheap->base) >> PAGE_SHIFT, + chunk->chunk_size >> PAGE_SHIFT); + dma_free_attrs(sheap->dev, chunk->chunk_size, chunk->cpu_addr, + chunk->handle, &attrs); + atomic_sub(chunk->chunk_size, &sheap->total_pool_size); + list_del(&chunk->entry); + kfree(chunk); + +} + +void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr) +{ + struct list_head *entry, *_n; + unsigned long drained_size = 0, skipped_size = 0; + + trace_ion_secure_cma_shrink_pool_start(drained_size, skipped_size); + + list_for_each_safe(entry, _n, &sheap->chunks) { + struct ion_cma_alloc_chunk *chunk = container_of(entry, + struct ion_cma_alloc_chunk, entry); + + if (max_nr < 0) + break; + + if (atomic_read(&chunk->cnt) == 0) { + max_nr -= chunk->chunk_size; + drained_size += chunk->chunk_size; + ion_secure_cma_free_chunk(sheap, chunk); + } else { + skipped_size += chunk->chunk_size; + } + } + + trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size); +} + +int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused) +{ + struct ion_cma_secure_heap *sheap = + container_of(heap, struct ion_cma_secure_heap, heap); + + mutex_lock(&sheap->chunk_lock); + __ion_secure_cma_shrink_pool(sheap, INT_MAX); + mutex_unlock(&sheap->chunk_lock); + + return 0; +} + +static int ion_secure_cma_shrinker(struct shrinker *shrinker, + struct shrink_control *sc) +{ + struct ion_cma_secure_heap *sheap = container_of(shrinker, + struct ion_cma_secure_heap, shrinker); + int nr_to_scan = sc->nr_to_scan; + + if (nr_to_scan == 0) + return atomic_read(&sheap->total_pool_size); + + /* + * Allocation path may invoke the shrinker. Proceeding any further + * would cause a deadlock in several places so don't shrink if that + * happens. + */ + if (!mutex_trylock(&sheap->chunk_lock)) + return -1; + + __ion_secure_cma_shrink_pool(sheap, nr_to_scan); + + mutex_unlock(&sheap->chunk_lock); + + return atomic_read(&sheap->total_pool_size); +} + +static void ion_secure_cma_free_from_pool(struct ion_cma_secure_heap *sheap, + dma_addr_t handle, + unsigned long len) +{ + struct list_head *entry, *_n; + int total_overlap = 0; + + mutex_lock(&sheap->chunk_lock); + bitmap_clear(sheap->bitmap, (handle - sheap->base) >> PAGE_SHIFT, + len >> PAGE_SHIFT); + + list_for_each_safe(entry, _n, &sheap->chunks) { + struct ion_cma_alloc_chunk *chunk = container_of(entry, + struct ion_cma_alloc_chunk, entry); + int overlap = intersect(chunk->handle, + chunk->chunk_size, handle, len); + + /* + * Don't actually free this from the pool list yet, let either + * an explicit drain call or the shrinkers take care of the + * pool. + */ + atomic_sub_return(overlap, &chunk->cnt); + BUG_ON(atomic_read(&chunk->cnt) < 0); + + total_overlap += overlap; + } + + BUG_ON(atomic_read(&sheap->total_pool_size) < 0); + + if (total_overlap != len) + bad_math_dump(len, total_overlap, sheap, 0, handle); + + mutex_unlock(&sheap->chunk_lock); +} + +/* ION CMA heap operations functions */ +static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate( + struct ion_heap *heap, struct ion_buffer *buffer, + unsigned long len, unsigned long align, + unsigned long flags) +{ + struct ion_cma_secure_heap *sheap = + container_of(heap, struct ion_cma_secure_heap, heap); + struct ion_secure_cma_buffer_info *info; + int ret; + + dev_dbg(sheap->dev, "Request buffer allocation len %ld\n", len); + + info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL); + if (!info) { + dev_err(sheap->dev, "Can't allocate buffer info\n"); + return ION_CMA_ALLOCATE_FAILED; + } + + mutex_lock(&sheap->alloc_lock); + ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len); + + if (ret) { +retry: + ret = ion_secure_cma_add_to_pool(sheap, len, false); + if (ret) { + mutex_unlock(&sheap->alloc_lock); + dev_err(sheap->dev, "Fail to allocate buffer\n"); + goto err; + } + ret = ion_secure_cma_alloc_from_pool(sheap, &info->phys, len); + if (ret) { + /* + * Lost the race with the shrinker, try again + */ + goto retry; + } + } + mutex_unlock(&sheap->alloc_lock); + + atomic_add(len, &sheap->total_allocated); + info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!info->table) { + dev_err(sheap->dev, "Fail to allocate sg table\n"); + goto err; + } + + ion_secure_cma_get_sgtable(sheap->dev, + info->table, info->phys, len); + + /* keep this for memory release */ + buffer->priv_virt = info; + dev_dbg(sheap->dev, "Allocate buffer %p\n", buffer); + return info; + +err: + kfree(info); + return ION_CMA_ALLOCATE_FAILED; +} + +static int ion_secure_cma_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long len, unsigned long align, + unsigned long flags) +{ + unsigned long secure_allocation = flags & ION_FLAG_SECURE; + struct ion_secure_cma_buffer_info *buf = NULL; + + if (!secure_allocation) { + pr_err("%s: non-secure allocation disallowed from heap %s %lx\n", + __func__, heap->name, flags); + return -ENOMEM; + } + + if (ION_IS_CACHED(flags)) { + pr_err("%s: cannot allocate cached memory from secure heap %s\n", + __func__, heap->name); + return -ENOMEM; + } + + if (!IS_ALIGNED(len, SZ_1M)) { + pr_err("%s: length of allocation from %s must be a multiple of 1MB\n", + __func__, heap->name); + return -ENOMEM; + } + + trace_ion_secure_cma_allocate_start(heap->name, len, align, flags); + buf = __ion_secure_cma_allocate(heap, buffer, len, align, flags); + trace_ion_secure_cma_allocate_end(heap->name, len, align, flags); + + if (buf) { + int ret; + + if (!msm_secure_v2_is_supported()) { + pr_debug("%s: securing buffers is not supported on this platform\n", + __func__); + ret = 1; + } else { + trace_ion_cp_secure_buffer_start(heap->name, len, align, + flags); + ret = msm_ion_secure_table(buf->table, 0, 0); + trace_ion_cp_secure_buffer_end(heap->name, len, align, + flags); + } + if (ret) { + /* + * Don't treat the secure buffer failing here as an + * error for backwards compatibility reasons. If + * the secure fails, the map will also fail so there + * is no security risk. + */ + pr_debug("%s: failed to secure buffer\n", __func__); + } + return 0; + } else { + return -ENOMEM; + } +} + + +static void ion_secure_cma_free(struct ion_buffer *buffer) +{ + struct ion_cma_secure_heap *sheap = + container_of(buffer->heap, struct ion_cma_secure_heap, heap); + struct ion_secure_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(sheap->dev, "Release buffer %p\n", buffer); + if (msm_secure_v2_is_supported()) + msm_ion_unsecure_table(info->table); + atomic_sub(buffer->size, &sheap->total_allocated); + BUG_ON(atomic_read(&sheap->total_allocated) < 0); + /* release memory */ + ion_secure_cma_free_from_pool(sheap, info->phys, buffer->size); + /* release sg table */ + sg_free_table(info->table); + kfree(info->table); + kfree(info); +} + +static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_cma_secure_heap *sheap = + container_of(heap, struct ion_cma_secure_heap, heap); + struct ion_secure_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(sheap->dev, "Return buffer %p physical address 0x%pa\n", buffer, + &info->phys); + + *addr = info->phys; + *len = buffer->size; + + return 0; +} + +struct sg_table *ion_secure_cma_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_secure_cma_buffer_info *info = buffer->priv_virt; + + return info->table; +} + +void ion_secure_cma_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static int ion_secure_cma_mmap(struct ion_heap *mapper, + struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + pr_info("%s: mmaping from secure heap %s disallowed\n", + __func__, mapper->name); + return -EINVAL; +} + +static void *ion_secure_cma_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + pr_info("%s: kernel mapping from secure heap %s disallowed\n", + __func__, heap->name); + return ERR_PTR(-EINVAL); +} + +static void ion_secure_cma_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static int ion_secure_cma_print_debug(struct ion_heap *heap, struct seq_file *s, + const struct list_head *mem_map) +{ + struct ion_cma_secure_heap *sheap = + container_of(heap, struct ion_cma_secure_heap, heap); + + if (mem_map) { + struct mem_map_data *data; + + seq_printf(s, "\nMemory Map\n"); + seq_printf(s, "%16.s %14.s %14.s %14.s\n", + "client", "start address", "end address", + "size"); + + list_for_each_entry(data, mem_map, node) { + const char *client_name = "(null)"; + + + if (data->client_name) + client_name = data->client_name; + + seq_printf(s, "%16.s 0x%14pa 0x%14pa %14lu (0x%lx)\n", + client_name, &data->addr, + &data->addr_end, + data->size, data->size); + } + } + seq_printf(s, "Total allocated: 0x%x\n", + atomic_read(&sheap->total_allocated)); + seq_printf(s, "Total pool size: 0x%x\n", + atomic_read(&sheap->total_pool_size)); + return 0; +} + +static struct ion_heap_ops ion_secure_cma_ops = { + .allocate = ion_secure_cma_allocate, + .free = ion_secure_cma_free, + .map_dma = ion_secure_cma_heap_map_dma, + .unmap_dma = ion_secure_cma_heap_unmap_dma, + .phys = ion_secure_cma_phys, + .map_user = ion_secure_cma_mmap, + .map_kernel = ion_secure_cma_map_kernel, + .unmap_kernel = ion_secure_cma_unmap_kernel, + .print_debug = ion_secure_cma_print_debug, +}; + +struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *data) +{ + struct ion_cma_secure_heap *sheap; + int map_size = BITS_TO_LONGS(data->size >> PAGE_SHIFT) * sizeof(long); + + sheap = kzalloc(sizeof(*sheap), GFP_KERNEL); + if (!sheap) + return ERR_PTR(-ENOMEM); + + sheap->dev = data->priv; + mutex_init(&sheap->chunk_lock); + mutex_init(&sheap->alloc_lock); + sheap->heap.ops = &ion_secure_cma_ops; + sheap->heap.type = ION_HEAP_TYPE_SECURE_DMA; + sheap->npages = data->size >> PAGE_SHIFT; + sheap->base = data->base; + sheap->heap_size = data->size; + sheap->bitmap = kmalloc(map_size, GFP_KERNEL); + INIT_LIST_HEAD(&sheap->chunks); + INIT_WORK(&sheap->work, ion_secure_pool_pages); + sheap->shrinker.seeks = DEFAULT_SEEKS; + sheap->shrinker.batch = 0; +/* sheap->shrinker.shrink = ion_secure_cma_shrinker; */ + sheap->default_prefetch_size = sheap->heap_size; + register_shrinker(&sheap->shrinker); + + if (!sheap->bitmap) { + kfree(sheap); + return ERR_PTR(-ENOMEM); + } + + if (data->extra_data) { + struct ion_cma_pdata *extra = data->extra_data; + sheap->default_prefetch_size = extra->default_prefetch_size; + } + + /* + * we initially mark everything in the allocator as being free so that + * allocations can come in later + */ + bitmap_fill(sheap->bitmap, sheap->npages); + + return &sheap->heap; +} + +void ion_secure_cma_heap_destroy(struct ion_heap *heap) +{ + struct ion_cma_secure_heap *sheap = + container_of(heap, struct ion_cma_secure_heap, heap); + + kfree(sheap); +} diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index ca15a87f6fd3..c034fbe91eef 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -2,6 +2,7 @@ * drivers/staging/android/ion/ion_heap.c * * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -22,6 +23,9 @@ #include <linux/sched.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/dma-mapping.h> #include "ion.h" #include "ion_priv.h" @@ -38,7 +42,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap, struct page **tmp = pages; if (!pages) - return NULL; + return ERR_PTR(-ENOMEM); if (buffer->flags & ION_FLAG_CACHED) pgprot = PAGE_KERNEL; @@ -342,14 +346,15 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) } if (IS_ERR_OR_NULL(heap)) { - pr_err("%s: error creating heap %s type %d base %lu size %zu\n", + pr_err("%s: error creating heap %s type %d base %pa size %zu\n", __func__, heap_data->name, heap_data->type, - heap_data->base, heap_data->size); + &heap_data->base, heap_data->size); return ERR_PTR(-EINVAL); } heap->name = heap_data->name; heap->id = heap_data->id; + heap->priv = heap_data->priv; return heap; } EXPORT_SYMBOL(ion_heap_create); diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index 59ee2f8f6761..c3a26f077364 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -1,5 +1,5 @@ /* - * drivers/staging/android/ion/ion_mem_pool.c + * drivers/staging/android/ion/ion_page_pool.c * * Copyright (C) 2011 Google, Inc. * @@ -22,19 +22,33 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/swap.h> +#include <linux/vmalloc.h> #include "ion_priv.h" static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) { - struct page *page = alloc_pages(pool->gfp_mask, pool->order); + struct page *page; + + page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order); if (!page) return NULL; + ion_page_pool_alloc_set_cache_policy(pool, page); +/* TODO QCOM - Identify if this sync is needed */ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, DMA_BIDIRECTIONAL); + + if (pool->gfp_mask & __GFP_ZERO) { + if (msm_ion_heap_high_order_page_zero(page, pool->order)) + goto error_free_pages; + } + return page; +error_free_pages: + __free_pages(page, pool->order); + return NULL; } static void ion_page_pool_free_pages(struct ion_page_pool *pool, @@ -76,22 +90,25 @@ static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) return page; } -struct page *ion_page_pool_alloc(struct ion_page_pool *pool) +void *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool) { struct page *page = NULL; BUG_ON(!pool); - mutex_lock(&pool->mutex); - if (pool->high_count) - page = ion_page_pool_remove(pool, true); - else if (pool->low_count) - page = ion_page_pool_remove(pool, false); - mutex_unlock(&pool->mutex); + *from_pool = true; - if (!page) + if (mutex_trylock(&pool->mutex)) { + if (pool->high_count) + page = ion_page_pool_remove(pool, true); + else if (pool->low_count) + page = ion_page_pool_remove(pool, false); + mutex_unlock(&pool->mutex); + } + if (!page) { page = ion_page_pool_alloc_pages(pool); - + *from_pool = false; + } return page; } @@ -124,7 +141,11 @@ static int ion_page_pool_total(struct ion_page_pool *pool, bool high) int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, int nr_to_scan) { +<<<<<<< HEAD int freed = 0; +======= + int i; +>>>>>>> 9cf42e3... ion: add snapshot of ion support for MSM bool high; if (current_is_kswapd()) @@ -132,10 +153,14 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, else high = !!(gfp_mask & __GFP_HIGHMEM); +<<<<<<< HEAD if (nr_to_scan == 0) return ion_page_pool_total(pool, high); while (freed < nr_to_scan) { +======= + for (i = 0; i < nr_to_scan; i++) { +>>>>>>> 9cf42e3... ion: add snapshot of ion support for MSM struct page *page; mutex_lock(&pool->mutex); @@ -152,7 +177,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, freed += (1 << pool->order); } - return freed; + return ion_page_pool_total(pool, high); } struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h index 6f59a2d36567..7864f5b3e436 100644 --- a/drivers/staging/android/ion/ion_priv.h +++ b/drivers/staging/android/ion/ion_priv.h @@ -2,6 +2,7 @@ * drivers/staging/android/ion/ion_priv.h * * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -23,12 +24,16 @@ #include <linux/mm_types.h> #include <linux/mutex.h> #include <linux/rbtree.h> +#include <linux/seq_file.h> + +#include "msm_ion_priv.h" #include <linux/sched.h> #include <linux/shrinker.h> #include <linux/types.h> #ifdef CONFIG_ION_POOL_CACHE_POLICY #include <asm/cacheflush.h> #endif +#include <linux/device.h> #include "ion.h" @@ -93,7 +98,11 @@ void ion_buffer_destroy(struct ion_buffer *buffer); /** * struct ion_heap_ops - ops to operate on a given heap * @allocate: allocate memory - * @free: free memory + * @free: free memory. Will be called with + * ION_PRIV_FLAG_SHRINKER_FREE set in buffer flags when + * called from a shrinker. In that case, the pages being + * free'd must be truly free'd back to the system, not put + * in a page pool or otherwise cached. * @phys get physical address of a buffer (only define on * physically contiguous heaps) * @map_dma map the memory for dma to a scatterlist @@ -101,6 +110,7 @@ void ion_buffer_destroy(struct ion_buffer *buffer); * @map_kernel map memory to the kernel * @unmap_kernel unmap memory to the kernel * @map_user map memory to userspace + * @unmap_user unmap memory to userspace * * allocate, phys, and map_user return 0 on success, -errno on error. * map_dma and map_kernel return pointer on success, ERR_PTR on @@ -124,6 +134,9 @@ struct ion_heap_ops { int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, struct vm_area_struct *vma); int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); + void (*unmap_user) (struct ion_heap *mapper, struct ion_buffer *buffer); + int (*print_debug)(struct ion_heap *heap, struct seq_file *s, + const struct list_head *mem_map); }; /** @@ -154,6 +167,7 @@ struct ion_heap_ops { * MUST be unique * @name: used for debugging * @shrinker: a shrinker for the heap + * @priv: private heap data * @free_list: free list head if deferred free is used * @free_list_size size of the deferred free list in bytes * @lock: protects the free list @@ -176,6 +190,7 @@ struct ion_heap { unsigned int id; const char *name; struct shrinker shrinker; + void *priv; struct list_head free_list; size_t free_list_size; spinlock_t free_lock; @@ -226,6 +241,12 @@ void ion_device_destroy(struct ion_device *dev); */ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); +struct pages_mem { + struct page **pages; + u32 size; + void (*free_fn) (const void *); +}; + /** * some helpers for common operations on buffers using the sg_table * and vaddr fields @@ -237,6 +258,22 @@ int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, int ion_heap_buffer_zero(struct ion_buffer *buffer); int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); +int msm_ion_heap_high_order_page_zero(struct page *page, int order); +int msm_ion_heap_buffer_zero(struct ion_buffer *buffer); +int msm_ion_heap_pages_zero(struct page **pages, int num_pages); +int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem); +void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem); + +/** + * ion_heap_init_shrinker + * @heap: the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op + * this function will be called to setup a shrinker to shrink the freelists + * and call the heap's shrink op. + */ +void ion_heap_init_shrinker(struct ion_heap *heap); + /** * ion_heap_init_shrinker * @heap: the heap @@ -279,7 +316,7 @@ void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); /** - * ion_heap_freelist_shrink - drain the deferred free + * ion_heap_freelist_drain_from_shrinker - drain the deferred free * list, skipping any heap-specific * pooling or caching mechanisms * @@ -295,10 +332,10 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); * page pools or otherwise cache the pages. Everything must be * genuinely free'd back to the system. If you're free'ing from a * shrinker you probably want to use this. Note that this relies on - * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE - * flag. + * the heap.ops.free callback honoring the + * ION_PRIV_FLAG_SHRINKER_FREE flag. */ -size_t ion_heap_freelist_shrink(struct ion_heap *heap, +size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap, size_t size); /** @@ -382,7 +419,7 @@ struct ion_page_pool { struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); void ion_page_pool_destroy(struct ion_page_pool *); -struct page *ion_page_pool_alloc(struct ion_page_pool *); +void *ion_page_pool_alloc(struct ion_page_pool *, bool *from_pool); void ion_page_pool_free(struct ion_page_pool *, struct page *); void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *); @@ -437,4 +474,12 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, void ion_pages_sync_for_device(struct device *dev, struct page *page, size_t size, enum dma_data_direction dir); +int ion_walk_heaps(struct ion_client *client, int heap_id, void *data, + int (*f)(struct ion_heap *heap, void *data)); + +struct ion_handle *ion_handle_get_by_id(struct ion_client *client, + int id); + +int ion_handle_put(struct ion_handle *handle); + #endif /* _ION_PRIV_H */ diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 57d115d0f179..1f5fe4701dae 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -2,6 +2,7 @@ * drivers/staging/android/ion/ion_system_heap.c * * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -25,16 +26,24 @@ #include <linux/vmalloc.h> #include "ion.h" #include "ion_priv.h" +#include <linux/dma-mapping.h> +#include <trace/events/kmem.h> + +static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_NOWARN | + __GFP_NORETRY) + & ~__GFP_DIRECT_RECLAIM; +static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_NOWARN); + +#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS +static const unsigned int orders[] = {9, 8, 4, 0}; +#else +static const unsigned int orders[] = {0}; +#endif -static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | - __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM; -static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN); -static const unsigned int orders[] = {8, 4, 0}; static const int num_orders = ARRAY_SIZE(orders); static int order_to_index(unsigned int order) { int i; - for (i = 0; i < num_orders; i++) if (order == orders[i]) return i; @@ -42,66 +51,76 @@ static int order_to_index(unsigned int order) return -1; } -static inline unsigned int order_to_size(int order) +static unsigned int order_to_size(int order) { return PAGE_SIZE << order; } struct ion_system_heap { struct ion_heap heap; - struct ion_page_pool *pools[0]; + struct ion_page_pool **uncached_pools; + struct ion_page_pool **cached_pools; +}; + +struct page_info { + struct page *page; + bool from_pool; + unsigned int order; + struct list_head list; }; static struct page *alloc_buffer_page(struct ion_system_heap *heap, struct ion_buffer *buffer, - unsigned long order) + unsigned long order, + bool *from_pool) { bool cached = ion_buffer_cached(buffer); - struct ion_page_pool *pool = heap->pools[order_to_index(order)]; struct page *page; + struct ion_page_pool *pool; - if (!cached) { - page = ion_page_pool_alloc(pool); - } else { - gfp_t gfp_flags = low_order_gfp_flags; - - if (order > 4) - gfp_flags = high_order_gfp_flags; - page = alloc_pages(gfp_flags | __GFP_COMP, order); - if (!page) - return NULL; - ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, - DMA_BIDIRECTIONAL); - } + if (!cached) + pool = heap->uncached_pools[order_to_index(order)]; + else + pool = heap->cached_pools[order_to_index(order)]; + page = ion_page_pool_alloc(pool, from_pool); + if (!page) + return 0; return page; } static void free_buffer_page(struct ion_system_heap *heap, - struct ion_buffer *buffer, struct page *page) + struct ion_buffer *buffer, struct page *page, + unsigned int order) { - unsigned int order = compound_order(page); bool cached = ion_buffer_cached(buffer); - if (!cached) { - struct ion_page_pool *pool = heap->pools[order_to_index(order)]; - if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) - ion_page_pool_free_immediate(pool, page); + if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) { + struct ion_page_pool *pool; + if (cached) + pool = heap->cached_pools[order_to_index(order)]; else - ion_page_pool_free(pool, page); + pool = heap->uncached_pools[order_to_index(order)]; + ion_page_pool_free(pool, page); } else { __free_pages(page, order); } } -static struct page *alloc_largest_available(struct ion_system_heap *heap, - struct ion_buffer *buffer, - unsigned long size, - unsigned int max_order) +static struct page_info *alloc_largest_available(struct ion_system_heap *heap, + struct ion_buffer *buffer, + unsigned long size, + unsigned int max_order) { struct page *page; + struct page_info *info; int i; + bool from_pool; + + info = kmalloc(sizeof(struct page_info), GFP_KERNEL); + if (!info) + return NULL; for (i = 0; i < num_orders; i++) { if (size < order_to_size(orders[i])) @@ -109,15 +128,47 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap, if (max_order < orders[i]) continue; - page = alloc_buffer_page(heap, buffer, orders[i]); + page = alloc_buffer_page(heap, buffer, orders[i], &from_pool); if (!page) continue; - return page; + info->page = page; + info->order = orders[i]; + info->from_pool = from_pool; + INIT_LIST_HEAD(&info->list); + return info; } + kfree(info); return NULL; } +static unsigned int process_info(struct page_info *info, + struct scatterlist *sg, + struct scatterlist *sg_sync, + struct pages_mem *data, unsigned int i) +{ + struct page *page = info->page; + unsigned int j; + + if (sg_sync) { + sg_set_page(sg_sync, page, (1 << info->order) * PAGE_SIZE, 0); + sg_dma_address(sg_sync) = page_to_phys(page); + } + sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0); + /* + * This is not correct - sg_dma_address needs a dma_addr_t + * that is valid for the the targeted device, but this works + * on the currently targeted hardware. + */ + sg_dma_address(sg) = page_to_phys(page); + if (data) { + for (j = 0; j < (1 << info->order); ++j) + data->pages[i++] = nth_page(page, j); + } + list_del(&info->list); + kfree(info); + return i; +} static int ion_system_heap_allocate(struct ion_heap *heap, struct ion_buffer *buffer, @@ -128,12 +179,19 @@ static int ion_system_heap_allocate(struct ion_heap *heap, struct ion_system_heap, heap); struct sg_table *table; + struct sg_table table_sync; struct scatterlist *sg; + struct scatterlist *sg_sync; + int ret; struct list_head pages; - struct page *page, *tmp_page; + struct list_head pages_from_pool; + struct page_info *info, *tmp_info; int i = 0; + unsigned int nents_sync = 0; unsigned long size_remaining = PAGE_ALIGN(size); unsigned int max_order = orders[0]; + struct pages_mem data; + unsigned int sz; if (align > PAGE_SIZE) return -EINVAL; @@ -141,74 +199,152 @@ static int ion_system_heap_allocate(struct ion_heap *heap, if (size / PAGE_SIZE > totalram_pages / 2) return -ENOMEM; + data.size = 0; INIT_LIST_HEAD(&pages); + INIT_LIST_HEAD(&pages_from_pool); while (size_remaining > 0) { - page = alloc_largest_available(sys_heap, buffer, size_remaining, + info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order); - if (!page) - goto free_pages; - list_add_tail(&page->lru, &pages); - size_remaining -= PAGE_SIZE << compound_order(page); - max_order = compound_order(page); + if (!info) + goto err; + + sz = (1 << info->order) * PAGE_SIZE; + + if (info->from_pool) { + list_add_tail(&info->list, &pages_from_pool); + } else { + list_add_tail(&info->list, &pages); + data.size += sz; + ++nents_sync; + } + size_remaining -= sz; + max_order = info->order; i++; } - table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + + ret = msm_ion_heap_alloc_pages_mem(&data); + + if (ret) + goto err; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!table) - goto free_pages; + goto err_free_data_pages; + + ret = sg_alloc_table(table, i, GFP_KERNEL); + if (ret) + goto err1; - if (sg_alloc_table(table, i, GFP_KERNEL)) - goto free_table; + if (nents_sync) { + ret = sg_alloc_table(&table_sync, nents_sync, GFP_KERNEL); + if (ret) + goto err_free_sg; + } + i = 0; sg = table->sgl; - list_for_each_entry_safe(page, tmp_page, &pages, lru) { - sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0); + sg_sync = table_sync.sgl; + + /* + * We now have two separate lists. One list contains pages from the + * pool and the other pages from buddy. We want to merge these + * together while preserving the ordering of the pages (higher order + * first). + */ + do { + info = list_first_entry_or_null(&pages, struct page_info, list); + tmp_info = list_first_entry_or_null(&pages_from_pool, + struct page_info, list); + if (info && tmp_info) { + if (info->order >= tmp_info->order) { + i = process_info(info, sg, sg_sync, &data, i); + sg_sync = sg_next(sg_sync); + } else { + i = process_info(tmp_info, sg, 0, 0, i); + } + } else if (info) { + i = process_info(info, sg, sg_sync, &data, i); + sg_sync = sg_next(sg_sync); + } else if (tmp_info) { + i = process_info(tmp_info, sg, 0, 0, i); + } else { + BUG(); + } sg = sg_next(sg); - list_del(&page->lru); + + } while (sg); + + ret = msm_ion_heap_pages_zero(data.pages, data.size >> PAGE_SHIFT); + if (ret) { + pr_err("Unable to zero pages\n"); + goto err_free_sg2; } + if (nents_sync) + dma_sync_sg_for_device(NULL, table_sync.sgl, table_sync.nents, + DMA_BIDIRECTIONAL); + buffer->priv_virt = table; + if (nents_sync) + sg_free_table(&table_sync); + msm_ion_heap_free_pages_mem(&data); return 0; +err_free_sg2: + /* We failed to zero buffers. Bypass pool */ + buffer->flags |= ION_PRIV_FLAG_SHRINKER_FREE; -free_table: + for_each_sg(table->sgl, sg, table->nents, i) + free_buffer_page(sys_heap, buffer, sg_page(sg), + get_order(sg->length)); + if (nents_sync) + sg_free_table(&table_sync); +err_free_sg: + sg_free_table(table); +err1: kfree(table); -free_pages: - list_for_each_entry_safe(page, tmp_page, &pages, lru) - free_buffer_page(sys_heap, buffer, page); +err_free_data_pages: + msm_ion_heap_free_pages_mem(&data); +err: + list_for_each_entry_safe(info, tmp_info, &pages, list) { + free_buffer_page(sys_heap, buffer, info->page, info->order); + kfree(info); + } + list_for_each_entry_safe(info, tmp_info, &pages_from_pool, list) { + free_buffer_page(sys_heap, buffer, info->page, info->order); + kfree(info); + } return -ENOMEM; } -static void ion_system_heap_free(struct ion_buffer *buffer) +void ion_system_heap_free(struct ion_buffer *buffer) { - struct ion_system_heap *sys_heap = container_of(buffer->heap, + struct ion_heap *heap = buffer->heap; + struct ion_system_heap *sys_heap = container_of(heap, struct ion_system_heap, heap); struct sg_table *table = buffer->sg_table; - bool cached = ion_buffer_cached(buffer); struct scatterlist *sg; + LIST_HEAD(pages); int i; - /* - * uncached pages come from the page pools, zero them before returning - * for security purposes (other allocations are zerod at - * alloc time - */ - if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) - ion_heap_buffer_zero(buffer); + if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) + msm_ion_heap_buffer_zero(buffer); for_each_sg(table->sgl, sg, table->nents, i) - free_buffer_page(sys_heap, buffer, sg_page(sg)); + free_buffer_page(sys_heap, buffer, sg_page(sg), + get_order(sg->length)); sg_free_table(table); kfree(table); } -static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, - struct ion_buffer *buffer) +struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) { return buffer->priv_virt; } -static void ion_system_heap_unmap_dma(struct ion_heap *heap, - struct ion_buffer *buffer) +void ion_system_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) { } @@ -217,7 +353,7 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, { struct ion_system_heap *sys_heap; int nr_total = 0; - int i, nr_freed; + int i; int only_scan = 0; sys_heap = container_of(heap, struct ion_system_heap, heap); @@ -226,13 +362,13 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, only_scan = 1; for (i = 0; i < num_orders; i++) { - struct ion_page_pool *pool = sys_heap->pools[i]; - - nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); - nr_total += nr_freed; + struct ion_page_pool *pool = sys_heap->uncached_pools[i]; + nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); + pool = sys_heap->cached_pools[i]; + nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); if (!only_scan) { - nr_to_scan -= nr_freed; + nr_to_scan -= nr_total; /* shrink completed */ if (nr_to_scan <= 0) break; @@ -261,52 +397,105 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, struct ion_system_heap, heap); int i; + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool = sys_heap->uncached_pools[i]; + seq_printf(s, + "%d order %u highmem pages in uncached pool = %lu total\n", + pool->high_count, pool->order, + (1 << pool->order) * PAGE_SIZE * pool->high_count); + seq_printf(s, + "%d order %u lowmem pages in uncached pool = %lu total\n", + pool->low_count, pool->order, + (1 << pool->order) * PAGE_SIZE * pool->low_count); + } for (i = 0; i < num_orders; i++) { - struct ion_page_pool *pool = sys_heap->pools[i]; - - seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", - pool->high_count, pool->order, - (PAGE_SIZE << pool->order) * pool->high_count); - seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", - pool->low_count, pool->order, - (PAGE_SIZE << pool->order) * pool->low_count); + struct ion_page_pool *pool = sys_heap->cached_pools[i]; + seq_printf(s, + "%d order %u highmem pages in cached pool = %lu total\n", + pool->high_count, pool->order, + (1 << pool->order) * PAGE_SIZE * pool->high_count); + seq_printf(s, + "%d order %u lowmem pages in cached pool = %lu total\n", + pool->low_count, pool->order, + (1 << pool->order) * PAGE_SIZE * pool->low_count); } + return 0; } -struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) + +static void ion_system_heap_destroy_pools(struct ion_page_pool **pools) { - struct ion_system_heap *heap; int i; + for (i = 0; i < num_orders; i++) + if (pools[i]) + ion_page_pool_destroy(pools[i]); +} - heap = kzalloc(sizeof(struct ion_system_heap) + - sizeof(struct ion_page_pool *) * num_orders, - GFP_KERNEL); - if (!heap) - return ERR_PTR(-ENOMEM); - heap->heap.ops = &system_heap_ops; - heap->heap.type = ION_HEAP_TYPE_SYSTEM; - heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; - +/** + * ion_system_heap_create_pools - Creates pools for all orders + * + * If this fails you don't need to destroy any pools. It's all or + * nothing. If it succeeds you'll eventually need to use + * ion_system_heap_destroy_pools to destroy the pools. + */ +static int ion_system_heap_create_pools(struct ion_page_pool **pools) +{ + int i; for (i = 0; i < num_orders; i++) { struct ion_page_pool *pool; gfp_t gfp_flags = low_order_gfp_flags; - if (orders[i] > 4) + if (orders[i]) gfp_flags = high_order_gfp_flags; pool = ion_page_pool_create(gfp_flags, orders[i]); if (!pool) - goto destroy_pools; - heap->pools[i] = pool; + goto err_create_pool; + pools[i] = pool; } + return 0; +err_create_pool: + ion_system_heap_destroy_pools(pools); + return 1; +} + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) +{ + struct ion_system_heap *heap; + int pools_size = sizeof(struct ion_page_pool *) * num_orders; + + heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->heap.ops = &system_heap_ops; + heap->heap.type = ION_HEAP_TYPE_SYSTEM; + heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + + heap->uncached_pools = kzalloc(pools_size, GFP_KERNEL); + if (!heap->uncached_pools) + goto err_alloc_uncached_pools; + + heap->cached_pools = kzalloc(pools_size, GFP_KERNEL); + if (!heap->cached_pools) + goto err_alloc_cached_pools; + + if (ion_system_heap_create_pools(heap->uncached_pools)) + goto err_create_uncached_pools; + + if (ion_system_heap_create_pools(heap->cached_pools)) + goto err_create_cached_pools; heap->heap.debug_show = ion_system_heap_debug_show; return &heap->heap; -destroy_pools: - while (i--) - ion_page_pool_destroy(heap->pools[i]); +err_create_cached_pools: + ion_system_heap_destroy_pools(heap->uncached_pools); +err_create_uncached_pools: + kfree(heap->cached_pools); +err_alloc_cached_pools: + kfree(heap->uncached_pools); +err_alloc_uncached_pools: kfree(heap); return ERR_PTR(-ENOMEM); } @@ -316,10 +505,11 @@ void ion_system_heap_destroy(struct ion_heap *heap) struct ion_system_heap *sys_heap = container_of(heap, struct ion_system_heap, heap); - int i; - for (i = 0; i < num_orders; i++) - ion_page_pool_destroy(sys_heap->pools[i]); + ion_system_heap_destroy_pools(sys_heap->uncached_pools); + ion_system_heap_destroy_pools(sys_heap->cached_pools); + kfree(sys_heap->uncached_pools); + kfree(sys_heap->cached_pools); kfree(sys_heap); } @@ -338,7 +528,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, if (align > (PAGE_SIZE << order)) return -EINVAL; - page = alloc_pages(low_order_gfp_flags, order); + page = alloc_pages(low_order_gfp_flags | __GFP_ZERO, order); if (!page) return -ENOMEM; @@ -348,15 +538,15 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, for (i = len >> PAGE_SHIFT; i < (1 << order); i++) __free_page(page + i); - table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!table) { ret = -ENOMEM; - goto free_pages; + goto out; } ret = sg_alloc_table(table, 1, GFP_KERNEL); if (ret) - goto free_table; + goto out; sg_set_page(table->sgl, page, len, 0); @@ -366,16 +556,14 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, return 0; -free_table: - kfree(table); -free_pages: +out: for (i = 0; i < len >> PAGE_SHIFT; i++) __free_page(page + i); - + kfree(table); return ret; } -static void ion_system_contig_heap_free(struct ion_buffer *buffer) +void ion_system_contig_heap_free(struct ion_buffer *buffer) { struct sg_table *table = buffer->priv_virt; struct page *page = sg_page(table->sgl); @@ -399,14 +587,14 @@ static int ion_system_contig_heap_phys(struct ion_heap *heap, return 0; } -static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, +struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, struct ion_buffer *buffer) { return buffer->priv_virt; } -static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, - struct ion_buffer *buffer) +void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) { } diff --git a/drivers/staging/android/ion/msm/Makefile b/drivers/staging/android/ion/msm/Makefile new file mode 100644 index 000000000000..886727f55441 --- /dev/null +++ b/drivers/staging/android/ion/msm/Makefile @@ -0,0 +1,4 @@ +obj-y += msm_ion.o secure_buffer.o +ifdef CONFIG_COMPAT +obj-y += compat_msm_ion.o +endif diff --git a/drivers/staging/android/ion/msm/compat_msm_ion.c b/drivers/staging/android/ion/msm/compat_msm_ion.c new file mode 100644 index 000000000000..c34b3a7827d2 --- /dev/null +++ b/drivers/staging/android/ion/msm/compat_msm_ion.c @@ -0,0 +1,157 @@ +/* Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/compat.h> +#include <linux/ion.h> +#include <linux/msm_ion.h> +#include <linux/uaccess.h> +#include "../ion_priv.h" +#include "../compat_ion.h" + +struct compat_ion_flush_data { + compat_ion_user_handle_t handle; + compat_int_t fd; + compat_uptr_t vaddr; + compat_uint_t offset; + compat_uint_t length; +}; + +struct compat_ion_prefetch_data { + compat_int_t heap_id; + compat_ulong_t len; +}; + +#define COMPAT_ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MSM_MAGIC, 0, \ + struct compat_ion_flush_data) +#define COMPAT_ION_IOC_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 1, \ + struct compat_ion_flush_data) +#define COMPAT_ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 2, \ + struct compat_ion_flush_data) +#define COMPAT_ION_IOC_PREFETCH _IOWR(ION_IOC_MSM_MAGIC, 3, \ + struct compat_ion_prefetch_data) +#define COMPAT_ION_IOC_DRAIN _IOWR(ION_IOC_MSM_MAGIC, 4, \ + struct compat_ion_prefetch_data) + +static int compat_get_ion_flush_data( + struct compat_ion_flush_data __user *data32, + struct ion_flush_data __user *data) +{ + compat_ion_user_handle_t h; + compat_int_t i; + compat_uptr_t u; + compat_ulong_t l; + int err; + + err = get_user(h, &data32->handle); + err |= put_user(h, &data->handle); + err |= get_user(i, &data32->fd); + err |= put_user(i, &data->fd); + err |= get_user(u, &data32->vaddr); + /* upper bits won't get set, zero them */ + data->vaddr = NULL; + err |= put_user(u, (compat_uptr_t *)&data->vaddr); + err |= get_user(l, &data32->offset); + err |= put_user(l, &data->offset); + err |= get_user(l, &data32->length); + err |= put_user(l, &data->length); + + return err; +} + +static int compat_get_ion_prefetch_data( + struct compat_ion_prefetch_data __user *data32, + struct ion_prefetch_data __user *data) +{ + compat_int_t i; + compat_ulong_t l; + int err; + + err = get_user(i, &data32->heap_id); + err |= put_user(i, &data->heap_id); + err |= get_user(l, &data32->len); + err |= put_user(l, &data->len); + + return err; +} + + + +static unsigned int convert_cmd(unsigned int cmd) +{ + switch (cmd) { + case COMPAT_ION_IOC_CLEAN_CACHES: + return ION_IOC_CLEAN_CACHES; + case COMPAT_ION_IOC_INV_CACHES: + return ION_IOC_INV_CACHES; + case COMPAT_ION_IOC_CLEAN_INV_CACHES: + return ION_IOC_CLEAN_INV_CACHES; + case COMPAT_ION_IOC_PREFETCH: + return ION_IOC_PREFETCH; + case COMPAT_ION_IOC_DRAIN: + return ION_IOC_DRAIN; + default: + return cmd; + } +} + +long compat_msm_ion_ioctl(struct ion_client *client, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case COMPAT_ION_IOC_CLEAN_CACHES: + case COMPAT_ION_IOC_INV_CACHES: + case COMPAT_ION_IOC_CLEAN_INV_CACHES: + { + struct compat_ion_flush_data __user *data32; + struct ion_flush_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_flush_data(data32, data); + if (err) + return err; + + return msm_ion_custom_ioctl(client, convert_cmd(cmd), + (unsigned long)data); + } + case COMPAT_ION_IOC_PREFETCH: + case COMPAT_ION_IOC_DRAIN: + { + struct compat_ion_prefetch_data __user *data32; + struct ion_prefetch_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_prefetch_data(data32, data); + if (err) + return err; + + return msm_ion_custom_ioctl(client, convert_cmd(cmd), + (unsigned long)data); + + } + default: + if (is_compat_task()) + return -ENOIOCTLCMD; + else + return msm_ion_custom_ioctl(client, cmd, arg); + } +} diff --git a/drivers/staging/android/ion/msm/compat_msm_ion.h b/drivers/staging/android/ion/msm/compat_msm_ion.h new file mode 100644 index 000000000000..72083eb8108a --- /dev/null +++ b/drivers/staging/android/ion/msm/compat_msm_ion.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_COMPAT_ION_H +#define _LINUX_COMPAT_ION_H + +#include <linux/ion.h> + +#if IS_ENABLED(CONFIG_COMPAT) + +long compat_msm_ion_ioctl(struct ion_client *client, unsigned int cmd, + unsigned long arg); + +#define compat_ion_user_handle_t compat_int_t + +#else + +#define compat_msm_ion_ioctl msm_ion_custom_ioctl + +#endif +#endif diff --git a/drivers/staging/android/ion/msm/ion_cp_common.h b/drivers/staging/android/ion/msm/ion_cp_common.h new file mode 100644 index 000000000000..035ec6e7ea17 --- /dev/null +++ b/drivers/staging/android/ion/msm/ion_cp_common.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ION_CP_COMMON_H +#define ION_CP_COMMON_H + +#include <asm-generic/errno-base.h> +#include <linux/msm_ion.h> + +#define ION_CP_V1 1 +#define ION_CP_V2 2 + +struct ion_cp_buffer { + phys_addr_t buffer; + atomic_t secure_cnt; + int is_secure; + int want_delayed_unsecure; + /* + * Currently all user/kernel mapping is protected by the heap lock. + * This is sufficient to protect the map count as well. The lock + * should be used to protect map_cnt if the whole heap lock is + * ever removed. + */ + atomic_t map_cnt; + /* + * protects secure_cnt for securing. + */ + struct mutex lock; + int version; + void *data; + /* + * secure is happening at allocation time, ignore version/data check + */ + bool ignore_check; +}; + +#if defined(CONFIG_ION_MSM) +/* + * ion_cp2_protect_mem - secures memory via trustzone + * + * @chunks - physical address of the array containing the chunks to + * be locked down + * @nchunks - number of entries in the array + * @chunk_size - size of each memory chunk + * @usage - usage hint + * @lock - 1 for lock, 0 for unlock + * + * return value is the result of the scm call + */ +int ion_cp_change_chunks_state(unsigned long chunks, unsigned int nchunks, + unsigned int chunk_size, enum cp_mem_usage usage, + int lock); + +int ion_cp_protect_mem(unsigned int phy_base, unsigned int size, + unsigned int permission_type, int version, + void *data); + +int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size, + unsigned int permission_type, int version, + void *data); + +int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, void *data, + int flags); + +int ion_cp_unsecure_buffer(struct ion_buffer *buffer, int force_unsecure); + +#else +static inline int ion_cp_change_chunks_state(unsigned long chunks, + unsigned int nchunks, unsigned int chunk_size, + enum cp_mem_usage usage, int lock) +{ + return -ENODEV; +} + +static inline int ion_cp_protect_mem(unsigned int phy_base, unsigned int size, + unsigned int permission_type, int version, + void *data) +{ + return -ENODEV; +} + +static inline int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size, + unsigned int permission_type, int version, + void *data) +{ + return -ENODEV; +} + +static inline int ion_cp_secure_buffer(struct ion_buffer *buffer, int version, + void *data, int flags) +{ + return -ENODEV; +} + +static inline int ion_cp_unsecure_buffer(struct ion_buffer *buffer, + int force_unsecure) +{ + return -ENODEV; +} +#endif + +#endif diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c new file mode 100644 index 000000000000..63fc82f113f1 --- /dev/null +++ b/drivers/staging/android/ion/msm/msm_ion.c @@ -0,0 +1,1088 @@ +/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/msm_ion.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_address.h> +#include <linux/mm.h> +#include <linux/mm_types.h> +#include <linux/sched.h> +#include <linux/rwsem.h> +#include <linux/uaccess.h> +#include <linux/memblock.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> +#include <linux/vmalloc.h> +#include <linux/highmem.h> +#include <linux/cma.h> +#include <linux/module.h> +#include <linux/show_mem_notifier.h> +#include <asm/cacheflush.h> +#include "../ion_priv.h" +#include "ion_cp_common.h" +#include "compat_msm_ion.h" + +#define ION_COMPAT_STR "qcom,msm-ion" + +static struct ion_device *idev; +static int num_heaps; +static struct ion_heap **heaps; + +struct ion_heap_desc { + unsigned int id; + enum ion_heap_type type; + const char *name; + unsigned int permission_type; +}; + + +#ifdef CONFIG_OF +static struct ion_heap_desc ion_heap_meta[] = { + { + .id = ION_SYSTEM_HEAP_ID, + .name = ION_SYSTEM_HEAP_NAME, + }, + { + .id = ION_SYSTEM_CONTIG_HEAP_ID, + .name = ION_KMALLOC_HEAP_NAME, + }, + { + .id = ION_CP_MM_HEAP_ID, + .name = ION_MM_HEAP_NAME, + .permission_type = IPT_TYPE_MM_CARVEOUT, + }, + { + .id = ION_MM_FIRMWARE_HEAP_ID, + .name = ION_MM_FIRMWARE_HEAP_NAME, + }, + { + .id = ION_CP_MFC_HEAP_ID, + .name = ION_MFC_HEAP_NAME, + .permission_type = IPT_TYPE_MFC_SHAREDMEM, + }, + { + .id = ION_SF_HEAP_ID, + .name = ION_SF_HEAP_NAME, + }, + { + .id = ION_QSECOM_HEAP_ID, + .name = ION_QSECOM_HEAP_NAME, + }, + { + .id = ION_AUDIO_HEAP_ID, + .name = ION_AUDIO_HEAP_NAME, + }, + { + .id = ION_PIL1_HEAP_ID, + .name = ION_PIL1_HEAP_NAME, + }, + { + .id = ION_PIL2_HEAP_ID, + .name = ION_PIL2_HEAP_NAME, + }, + { + .id = ION_CP_WB_HEAP_ID, + .name = ION_WB_HEAP_NAME, + }, + { + .id = ION_CAMERA_HEAP_ID, + .name = ION_CAMERA_HEAP_NAME, + }, + { + .id = ION_ADSP_HEAP_ID, + .name = ION_ADSP_HEAP_NAME, + } +}; +#endif + +struct ion_client *msm_ion_client_create(const char *name) +{ + /* + * The assumption is that if there is a NULL device, the ion + * driver has not yet probed. + */ + if (idev == NULL) + return ERR_PTR(-EPROBE_DEFER); + + if (IS_ERR(idev)) + return (struct ion_client *)idev; + + return ion_client_create(idev, name); +} +EXPORT_SYMBOL(msm_ion_client_create); + +int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, + void *vaddr, unsigned long len, unsigned int cmd) +{ + return ion_do_cache_op(client, handle, vaddr, 0, len, cmd); +} +EXPORT_SYMBOL(msm_ion_do_cache_op); + +static int ion_no_pages_cache_ops(struct ion_client *client, + struct ion_handle *handle, + void *vaddr, + unsigned int offset, unsigned int length, + unsigned int cmd) +{ + unsigned long size_to_vmap, total_size; + int i, j, ret; + void *ptr = NULL; + ion_phys_addr_t buff_phys = 0; + ion_phys_addr_t buff_phys_start = 0; + size_t buf_length = 0; + + ret = ion_phys(client, handle, &buff_phys_start, &buf_length); + if (ret) + return -EINVAL; + + buff_phys = buff_phys_start; + + if (!vaddr) { + /* + * Split the vmalloc space into smaller regions in + * order to clean and/or invalidate the cache. + */ + size_to_vmap = ((VMALLOC_END - VMALLOC_START)/8); + total_size = buf_length; + + for (i = 0; i < total_size; i += size_to_vmap) { + size_to_vmap = min(size_to_vmap, total_size - i); + for (j = 0; j < 10 && size_to_vmap; ++j) { + ptr = ioremap(buff_phys, size_to_vmap); + if (ptr) { + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + dmac_clean_range(ptr, + ptr + size_to_vmap); + break; + case ION_IOC_INV_CACHES: + dmac_inv_range(ptr, + ptr + size_to_vmap); + break; + case ION_IOC_CLEAN_INV_CACHES: + dmac_flush_range(ptr, + ptr + size_to_vmap); + break; + default: + return -EINVAL; + } + buff_phys += size_to_vmap; + break; + } else { + size_to_vmap >>= 1; + } + } + if (!ptr) { + pr_err("Couldn't io-remap the memory\n"); + return -EINVAL; + } + iounmap(ptr); + } + } else { + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + dmac_clean_range(vaddr, vaddr + length); + break; + case ION_IOC_INV_CACHES: + dmac_inv_range(vaddr, vaddr + length); + break; + case ION_IOC_CLEAN_INV_CACHES: + dmac_flush_range(vaddr, vaddr + length); + break; + default: + return -EINVAL; + } + } + + return 0; +} + +static int ion_pages_cache_ops(struct ion_client *client, + struct ion_handle *handle, + void *vaddr, unsigned int offset, unsigned int length, + unsigned int cmd) +{ + struct sg_table *table = NULL; + + table = ion_sg_table(client, handle); + if (IS_ERR_OR_NULL(table)) + return PTR_ERR(table); + + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + if (!vaddr) + dma_sync_sg_for_device(NULL, table->sgl, + table->nents, DMA_TO_DEVICE); + else + dmac_clean_range(vaddr, vaddr + length); + break; + case ION_IOC_INV_CACHES: + dma_sync_sg_for_cpu(NULL, table->sgl, + table->nents, DMA_FROM_DEVICE); + break; + case ION_IOC_CLEAN_INV_CACHES: + if (!vaddr) { + dma_sync_sg_for_device(NULL, table->sgl, + table->nents, DMA_TO_DEVICE); + dma_sync_sg_for_cpu(NULL, table->sgl, + table->nents, DMA_FROM_DEVICE); + } else { + dmac_flush_range(vaddr, vaddr + length); + } + break; + default: + return -EINVAL; + } + + return 0; +} + +int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, + void *uaddr, unsigned long offset, unsigned long len, + unsigned int cmd) +{ + int ret = -EINVAL; + unsigned long flags; + struct sg_table *table; + struct page *page; + + ret = ion_handle_get_flags(client, handle, &flags); + if (ret) + return -EINVAL; + + if (!ION_IS_CACHED(flags)) + return 0; + + if (flags & ION_FLAG_SECURE) + return 0; + + table = ion_sg_table(client, handle); + + if (IS_ERR_OR_NULL(table)) + return PTR_ERR(table); + + page = sg_page(table->sgl); + + if (page) + ret = ion_pages_cache_ops(client, handle, uaddr, + offset, len, cmd); + else + ret = ion_no_pages_cache_ops(client, handle, uaddr, + offset, len, cmd); + + return ret; + +} + +static void msm_ion_allocate(struct ion_platform_heap *heap) +{ + + if (!heap->base && heap->extra_data) { + WARN(1, "Specifying carveout heaps without a base is deprecated. Convert to the DMA heap type instead"); + return; + } +} + +static int is_heap_overlapping(const struct ion_platform_heap *heap1, + const struct ion_platform_heap *heap2) +{ + ion_phys_addr_t heap1_base = heap1->base; + ion_phys_addr_t heap2_base = heap2->base; + ion_phys_addr_t heap1_end = heap1->base + heap1->size - 1; + ion_phys_addr_t heap2_end = heap2->base + heap2->size - 1; + + if (heap1_base == heap2_base) + return 1; + if (heap1_base < heap2_base && heap1_end >= heap2_base) + return 1; + if (heap2_base < heap1_base && heap2_end >= heap1_base) + return 1; + return 0; +} + +static void check_for_heap_overlap(const struct ion_platform_heap heap_list[], + unsigned long nheaps) +{ + unsigned long i; + unsigned long j; + + for (i = 0; i < nheaps; ++i) { + const struct ion_platform_heap *heap1 = &heap_list[i]; + if (!heap1->base) + continue; + for (j = i + 1; j < nheaps; ++j) { + const struct ion_platform_heap *heap2 = &heap_list[j]; + if (!heap2->base) + continue; + if (is_heap_overlapping(heap1, heap2)) { + panic("Memory in heap %s overlaps with heap %s\n", + heap1->name, heap2->name); + } + } + } +} + +#ifdef CONFIG_OF +static int msm_init_extra_data(struct device_node *node, + struct ion_platform_heap *heap, + const struct ion_heap_desc *heap_desc) +{ + int ret = 0; + + switch ((int) heap->type) { + case ION_HEAP_TYPE_CARVEOUT: + { + heap->extra_data = kzalloc(sizeof(struct ion_co_heap_pdata), + GFP_KERNEL); + if (!heap->extra_data) + ret = -ENOMEM; + break; + } + case ION_HEAP_TYPE_SECURE_DMA: + { + unsigned int val; + + ret = of_property_read_u32(node, + "qcom,default-prefetch-size", &val); + + if (!ret) { + heap->extra_data = kzalloc(sizeof(struct ion_cma_pdata), + GFP_KERNEL); + + if (!heap->extra_data) { + ret = -ENOMEM; + } else { + struct ion_cma_pdata *extra = heap->extra_data; + extra->default_prefetch_size = val; + } + } else { + ret = 0; + } + break; + } + default: + heap->extra_data = 0; + break; + } + return ret; +} + +#define MAKE_HEAP_TYPE_MAPPING(h) { .name = #h, \ + .heap_type = ION_HEAP_TYPE_##h, } + +static struct heap_types_info { + const char *name; + int heap_type; +} heap_types_info[] = { + MAKE_HEAP_TYPE_MAPPING(SYSTEM), + MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG), + MAKE_HEAP_TYPE_MAPPING(CARVEOUT), + MAKE_HEAP_TYPE_MAPPING(CHUNK), + MAKE_HEAP_TYPE_MAPPING(DMA), + MAKE_HEAP_TYPE_MAPPING(SECURE_DMA), +}; + +static int msm_ion_get_heap_type_from_dt_node(struct device_node *node, + int *heap_type) +{ + const char *name; + int i, ret = -EINVAL; + ret = of_property_read_string(node, "qcom,ion-heap-type", &name); + if (ret) + goto out; + for (i = 0; i < ARRAY_SIZE(heap_types_info); ++i) { + if (!strcmp(heap_types_info[i].name, name)) { + *heap_type = heap_types_info[i].heap_type; + ret = 0; + goto out; + } + } + WARN(1, "Unknown heap type: %s. You might need to update heap_types_info in %s", + name, __FILE__); +out: + return ret; +} + +static int msm_ion_populate_heap(struct device_node *node, + struct ion_platform_heap *heap) +{ + unsigned int i; + int ret = -EINVAL, heap_type = -1; + unsigned int len = ARRAY_SIZE(ion_heap_meta); + for (i = 0; i < len; ++i) { + if (ion_heap_meta[i].id == heap->id) { + heap->name = ion_heap_meta[i].name; + ret = msm_ion_get_heap_type_from_dt_node(node, + &heap_type); + if (ret) + break; + heap->type = heap_type; + ret = msm_init_extra_data(node, heap, + &ion_heap_meta[i]); + break; + } + } + if (ret) + pr_err("%s: Unable to populate heap, error: %d", __func__, ret); + return ret; +} + +static void free_pdata(const struct ion_platform_data *pdata) +{ + unsigned int i; + for (i = 0; i < pdata->nr; ++i) + kfree(pdata->heaps[i].extra_data); + kfree(pdata->heaps); + kfree(pdata); +} + +static void msm_ion_get_heap_align(struct device_node *node, + struct ion_platform_heap *heap) +{ + unsigned int val; + + int ret = of_property_read_u32(node, "qcom,heap-align", &val); + if (!ret) { + switch ((int) heap->type) { + case ION_HEAP_TYPE_CARVEOUT: + { + struct ion_co_heap_pdata *extra = + heap->extra_data; + extra->align = val; + break; + } + default: + pr_err("ION-heap %s: Cannot specify alignment for this type of heap\n", + heap->name); + break; + } + } +} + +static int msm_ion_get_heap_size(struct device_node *node, + struct ion_platform_heap *heap) +{ + unsigned int val; + int ret = 0; + u32 out_values[2]; + struct device_node *pnode; + + ret = of_property_read_u32(node, "qcom,memory-reservation-size", &val); + if (!ret) + heap->size = val; + + ret = of_property_read_u32_array(node, "qcom,memory-fixed", + out_values, 2); + if (!ret) { + heap->size = out_values[1]; + goto out; + } + + pnode = of_parse_phandle(node, "linux,contiguous-region", 0); + if (pnode != NULL) { + const u32 *addr; + u64 size; + + addr = of_get_address(pnode, 0, &size, NULL); + if (!addr) { + of_node_put(pnode); + ret = -EINVAL; + goto out; + } + heap->size = (u32) size; + ret = 0; + of_node_put(pnode); + } + + ret = 0; +out: + return ret; +} + +static void msm_ion_get_heap_base(struct device_node *node, + struct ion_platform_heap *heap) +{ + u32 out_values[2]; + int ret = 0; + struct device_node *pnode; + + ret = of_property_read_u32_array(node, "qcom,memory-fixed", + out_values, 2); + if (!ret) + heap->base = out_values[0]; + + pnode = of_parse_phandle(node, "linux,contiguous-region", 0); + if (pnode != NULL) { + heap->base = cma_get_base(heap->priv); + of_node_put(pnode); + } + + return; +} + +static void msm_ion_get_heap_adjacent(struct device_node *node, + struct ion_platform_heap *heap) +{ + unsigned int val; + int ret = of_property_read_u32(node, "qcom,heap-adjacent", &val); + if (!ret) { + switch (heap->type) { + case ION_HEAP_TYPE_CARVEOUT: + { + struct ion_co_heap_pdata *extra = heap->extra_data; + extra->adjacent_mem_id = val; + break; + } + default: + pr_err("ION-heap %s: Cannot specify adjcent mem id for this type of heap\n", + heap->name); + break; + } + } else { + switch (heap->type) { + case ION_HEAP_TYPE_CARVEOUT: + { + struct ion_co_heap_pdata *extra = heap->extra_data; + extra->adjacent_mem_id = INVALID_HEAP_ID; + break; + } + default: + break; + } + } +} + +static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev) +{ + struct ion_platform_data *pdata = 0; + struct ion_platform_heap *heaps = NULL; + struct device_node *node; + struct platform_device *new_dev = NULL; + const struct device_node *dt_node = pdev->dev.of_node; + uint32_t val = 0; + int ret = 0; + uint32_t num_heaps = 0; + int idx = 0; + + for_each_available_child_of_node(dt_node, node) + num_heaps++; + + if (!num_heaps) + return ERR_PTR(-EINVAL); + + pdata = kzalloc(sizeof(struct ion_platform_data), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + heaps = kzalloc(sizeof(struct ion_platform_heap)*num_heaps, GFP_KERNEL); + if (!heaps) { + kfree(pdata); + return ERR_PTR(-ENOMEM); + } + + pdata->heaps = heaps; + pdata->nr = num_heaps; + + for_each_available_child_of_node(dt_node, node) { + new_dev = of_platform_device_create(node, NULL, &pdev->dev); + if (!new_dev) { + pr_err("Failed to create device %s\n", node->name); + goto free_heaps; + } + + pdata->heaps[idx].priv = &new_dev->dev; + /** + * TODO: Replace this with of_get_address() when this patch + * gets merged: http:// + * permalink.gmane.org/gmane.linux.drivers.devicetree/18614 + */ + ret = of_property_read_u32(node, "reg", &val); + if (ret) { + pr_err("%s: Unable to find reg key", __func__); + goto free_heaps; + } + pdata->heaps[idx].id = val; + + ret = msm_ion_populate_heap(node, &pdata->heaps[idx]); + if (ret) + goto free_heaps; + + msm_ion_get_heap_base(node, &pdata->heaps[idx]); + msm_ion_get_heap_align(node, &pdata->heaps[idx]); + + ret = msm_ion_get_heap_size(node, &pdata->heaps[idx]); + if (ret) + goto free_heaps; + + msm_ion_get_heap_adjacent(node, &pdata->heaps[idx]); + + ++idx; + } + return pdata; + +free_heaps: + free_pdata(pdata); + return ERR_PTR(ret); +} +#else +static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev) +{ + return NULL; +} + +static void free_pdata(const struct ion_platform_data *pdata) +{ + +} +#endif + +static int check_vaddr_bounds(unsigned long start, unsigned long end) +{ + struct mm_struct *mm = current->active_mm; + struct vm_area_struct *vma; + int ret = 1; + + if (end < start) + goto out; + + vma = find_vma(mm, start); + if (vma && vma->vm_start < end) { + if (start < vma->vm_start) + goto out; + if (end > vma->vm_end) + goto out; + ret = 0; + } + +out: + return ret; +} + +int ion_heap_allow_secure_allocation(enum ion_heap_type type) +{ + return type == ((enum ion_heap_type) ION_HEAP_TYPE_SECURE_DMA); +} + +int ion_heap_allow_handle_secure(enum ion_heap_type type) +{ + return type == ((enum ion_heap_type) ION_HEAP_TYPE_SECURE_DMA); +} + +int ion_heap_allow_heap_secure(enum ion_heap_type type) +{ + return false; +} + +/* fix up the cases where the ioctl direction bits are incorrect */ +static unsigned int msm_ion_ioctl_dir(unsigned int cmd) +{ + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + case ION_IOC_INV_CACHES: + case ION_IOC_CLEAN_INV_CACHES: + case ION_IOC_PREFETCH: + case ION_IOC_DRAIN: + return _IOC_WRITE; + default: + return _IOC_DIR(cmd); + } +} + +long msm_ion_custom_ioctl(struct ion_client *client, + unsigned int cmd, + unsigned long arg) +{ + unsigned int dir; + union { + struct ion_flush_data flush_data; + struct ion_prefetch_data prefetch_data; + } data; + + dir = msm_ion_ioctl_dir(cmd); + + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + if (dir & _IOC_WRITE) + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + switch (cmd) { + case ION_IOC_CLEAN_CACHES: + case ION_IOC_INV_CACHES: + case ION_IOC_CLEAN_INV_CACHES: + { + unsigned long start, end; + struct ion_handle *handle = NULL; + int ret; + struct mm_struct *mm = current->active_mm; + + if (data.flush_data.handle > 0) { + handle = ion_handle_get_by_id(client, + (int)data.flush_data.handle); + if (IS_ERR(handle)) { + pr_info("%s: Could not find handle: %d\n", + __func__, (int)data.flush_data.handle); + return PTR_ERR(handle); + } + } else { + handle = ion_import_dma_buf(client, data.flush_data.fd); + if (IS_ERR(handle)) { + pr_info("%s: Could not import handle: %p\n", + __func__, handle); + return -EINVAL; + } + } + + down_read(&mm->mmap_sem); + + start = (unsigned long) data.flush_data.vaddr; + end = (unsigned long) data.flush_data.vaddr + + data.flush_data.length; + + if (start && check_vaddr_bounds(start, end)) { + pr_err("%s: virtual address %p is out of bounds\n", + __func__, data.flush_data.vaddr); + ret = -EINVAL; + } else { + ret = ion_do_cache_op( + client, handle, data.flush_data.vaddr, + data.flush_data.offset, + data.flush_data.length, cmd); + } + up_read(&mm->mmap_sem); + + ion_free(client, handle); + + if (ret < 0) + return ret; + break; + } + case ION_IOC_PREFETCH: + { + ion_walk_heaps(client, data.prefetch_data.heap_id, + (void *)data.prefetch_data.len, + ion_secure_cma_prefetch); + break; + } + case ION_IOC_DRAIN: + { + ion_walk_heaps(client, data.prefetch_data.heap_id, + (void *)data.prefetch_data.len, + ion_secure_cma_drain_pool); + break; + } + + default: + return -ENOTTY; + } + return 0; +} + +#define MAX_VMAP_RETRIES 10 + +/** + * An optimized page-zero'ing function. vmaps arrays of pages in large + * chunks to minimize the number of memsets and vmaps/vunmaps. + * + * Note that the `pages' array should be composed of all 4K pages. + * + * NOTE: This function does not guarantee synchronization of the caches + * and thus caller is responsible for handling any cache maintenance + * operations needed. + */ +int msm_ion_heap_pages_zero(struct page **pages, int num_pages) +{ + int i, j, npages_to_vmap; + void *ptr = NULL; + + /* + * As an optimization, we manually zero out all of the pages + * in one fell swoop here. To safeguard against insufficient + * vmalloc space, we only vmap `npages_to_vmap' at a time, + * starting with a conservative estimate of 1/8 of the total + * number of vmalloc pages available. + */ + npages_to_vmap = ((VMALLOC_END - VMALLOC_START)/8) + >> PAGE_SHIFT; + for (i = 0; i < num_pages; i += npages_to_vmap) { + npages_to_vmap = min(npages_to_vmap, num_pages - i); + for (j = 0; j < MAX_VMAP_RETRIES && npages_to_vmap; + ++j) { + ptr = vmap(&pages[i], npages_to_vmap, + VM_IOREMAP, PAGE_KERNEL); + if (ptr) + break; + else + npages_to_vmap >>= 1; + } + if (!ptr) + return -ENOMEM; + + memset(ptr, 0, npages_to_vmap * PAGE_SIZE); + vunmap(ptr); + } + + return 0; +} + +int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem) +{ + struct page **pages; + unsigned int page_tbl_size; + + pages_mem->free_fn = kfree; + page_tbl_size = sizeof(struct page *) * (pages_mem->size >> PAGE_SHIFT); + if (page_tbl_size > SZ_8K) { + /* + * Do fallback to ensure we have a balance between + * performance and availability. + */ + pages = kmalloc(page_tbl_size, + __GFP_COMP | __GFP_NORETRY | + __GFP_NOWARN); + if (!pages) { + pages = vmalloc(page_tbl_size); + pages_mem->free_fn = vfree; + } + } else { + pages = kmalloc(page_tbl_size, GFP_KERNEL); + } + + if (!pages) + return -ENOMEM; + + pages_mem->pages = pages; + return 0; +} + +void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem) +{ + pages_mem->free_fn(pages_mem->pages); +} + +int msm_ion_heap_high_order_page_zero(struct page *page, int order) +{ + int i, ret; + struct pages_mem pages_mem; + int npages = 1 << order; + pages_mem.size = npages * PAGE_SIZE; + + if (msm_ion_heap_alloc_pages_mem(&pages_mem)) + return -ENOMEM; + + for (i = 0; i < (1 << order); ++i) + pages_mem.pages[i] = page + i; + + ret = msm_ion_heap_pages_zero(pages_mem.pages, npages); + dma_sync_single_for_device(NULL, page_to_phys(page), pages_mem.size, + DMA_BIDIRECTIONAL); + msm_ion_heap_free_pages_mem(&pages_mem); + return ret; +} + +int msm_ion_heap_buffer_zero(struct ion_buffer *buffer) +{ + struct sg_table *table = buffer->sg_table; + struct scatterlist *sg; + int i, j, ret = 0, npages = 0; + struct pages_mem pages_mem; + + pages_mem.size = PAGE_ALIGN(buffer->size); + + if (msm_ion_heap_alloc_pages_mem(&pages_mem)) + return -ENOMEM; + + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + unsigned long len = sg->length; + + for (j = 0; j < len / PAGE_SIZE; j++) + pages_mem.pages[npages++] = page + j; + } + + ret = msm_ion_heap_pages_zero(pages_mem.pages, npages); + dma_sync_sg_for_device(NULL, table->sgl, table->nents, + DMA_BIDIRECTIONAL); + msm_ion_heap_free_pages_mem(&pages_mem); + return ret; +} + +static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_heap *heap = NULL; + + switch ((int)heap_data->type) { +#ifdef CONFIG_CMA + case ION_HEAP_TYPE_SECURE_DMA: + heap = ion_secure_cma_heap_create(heap_data); + break; +#endif + default: + heap = ion_heap_create(heap_data); + } + + if (IS_ERR_OR_NULL(heap)) { + pr_err("%s: error creating heap %s type %d base %pa size %zu\n", + __func__, heap_data->name, heap_data->type, + &heap_data->base, heap_data->size); + return ERR_PTR(-EINVAL); + } + + heap->name = heap_data->name; + heap->id = heap_data->id; + heap->priv = heap_data->priv; + return heap; +} + +static void msm_ion_heap_destroy(struct ion_heap *heap) +{ + if (!heap) + return; + + switch ((int)heap->type) { +#ifdef CONFIG_CMA + case ION_HEAP_TYPE_SECURE_DMA: + ion_secure_cma_heap_destroy(heap); + break; +#endif + default: + ion_heap_destroy(heap); + } +} + +static int msm_ion_probe(struct platform_device *pdev) +{ + static struct ion_device *new_dev; + struct ion_platform_data *pdata; + unsigned int pdata_needs_to_be_freed; + int err = -1; + int i; + if (pdev->dev.of_node) { + pdata = msm_ion_parse_dt(pdev); + if (IS_ERR(pdata)) { + err = PTR_ERR(pdata); + goto out; + } + pdata_needs_to_be_freed = 1; + } else { + pdata = pdev->dev.platform_data; + pdata_needs_to_be_freed = 0; + } + + num_heaps = pdata->nr; + + heaps = kcalloc(pdata->nr, sizeof(struct ion_heap *), GFP_KERNEL); + + if (!heaps) { + err = -ENOMEM; + goto out; + } + + new_dev = ion_device_create(compat_msm_ion_ioctl); + if (IS_ERR_OR_NULL(new_dev)) { + /* + * set this to the ERR to indicate to the clients + * that Ion failed to probe. + */ + idev = new_dev; + err = PTR_ERR(new_dev); + goto freeheaps; + } + + /* create the heaps as specified in the board file */ + for (i = 0; i < num_heaps; i++) { + struct ion_platform_heap *heap_data = &pdata->heaps[i]; + msm_ion_allocate(heap_data); + + heap_data->has_outer_cache = pdata->has_outer_cache; + heaps[i] = msm_ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + heaps[i] = 0; + continue; + } else { + if (heap_data->size) + pr_info("ION heap %s created at %pa with size %zx\n", + heap_data->name, + &heap_data->base, + heap_data->size); + else + pr_info("ION heap %s created\n", + heap_data->name); + } + + ion_device_add_heap(new_dev, heaps[i]); + } + check_for_heap_overlap(pdata->heaps, num_heaps); + if (pdata_needs_to_be_freed) + free_pdata(pdata); + + platform_set_drvdata(pdev, new_dev); + /* + * intentionally set this at the very end to allow probes to be deferred + * completely until Ion is setup + */ + idev = new_dev; + return 0; + +freeheaps: + kfree(heaps); + if (pdata_needs_to_be_freed) + free_pdata(pdata); +out: + return err; +} + +static int msm_ion_remove(struct platform_device *pdev) +{ + struct ion_device *idev = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < num_heaps; i++) + msm_ion_heap_destroy(heaps[i]); + + ion_device_destroy(idev); + kfree(heaps); + return 0; +} + +static struct of_device_id msm_ion_match_table[] = { + {.compatible = ION_COMPAT_STR}, + {}, +}; + +static struct platform_driver msm_ion_driver = { + .probe = msm_ion_probe, + .remove = msm_ion_remove, + .driver = { + .name = "ion-msm", + .of_match_table = msm_ion_match_table, + }, +}; + +static int __init msm_ion_init(void) +{ + return platform_driver_register(&msm_ion_driver); +} + +static void __exit msm_ion_exit(void) +{ + platform_driver_unregister(&msm_ion_driver); +} + +subsys_initcall(msm_ion_init); +module_exit(msm_ion_exit); diff --git a/drivers/staging/android/ion/msm/msm_ion.h b/drivers/staging/android/ion/msm/msm_ion.h new file mode 100644 index 000000000000..3bb7bd074f6b --- /dev/null +++ b/drivers/staging/android/ion/msm/msm_ion.h @@ -0,0 +1,226 @@ +#ifndef _MSM_MSM_ION_H +#define _MSM_MSM_ION_H + +#include "../ion.h" +#include "../../uapi/msm_ion.h" + +enum ion_permission_type { + IPT_TYPE_MM_CARVEOUT = 0, + IPT_TYPE_MFC_SHAREDMEM = 1, + IPT_TYPE_MDP_WRITEBACK = 2, +}; + +/* + * This flag allows clients when mapping into the IOMMU to specify to + * defer un-mapping from the IOMMU until the buffer memory is freed. + */ +#define ION_IOMMU_UNMAP_DELAYED 1 + +/* + * This flag allows clients to defer unsecuring a buffer until the buffer + * is actually freed. + */ +#define ION_UNSECURE_DELAYED 1 + +/** + * struct ion_cp_heap_pdata - defines a content protection heap in the given + * platform + * @permission_type: Memory ID used to identify the memory to TZ + * @align: Alignment requirement for the memory + * @secure_base: Base address for securing the heap. + * Note: This might be different from actual base address + * of this heap in the case of a shared heap. + * @secure_size: Memory size for securing the heap. + * Note: This might be different from actual size + * of this heap in the case of a shared heap. + * @fixed_position If nonzero, position in the fixed area. + * @iommu_map_all: Indicates whether we should map whole heap into IOMMU. + * @iommu_2x_map_domain: Indicates the domain to use for overmapping. + * @request_ion_region: function to be called when the number of allocations + * goes from 0 -> 1 + * @release_ion_region: function to be called when the number of allocations + * goes from 1 -> 0 + * @setup_ion_region: function to be called upon ion registration + * @allow_nonsecure_alloc: allow non-secure allocations from this heap. For + * secure heaps, this flag must be set so allow non-secure + * allocations. For non-secure heaps, this flag is ignored. + * + */ +struct ion_cp_heap_pdata { + enum ion_permission_type permission_type; + unsigned int align; + ion_phys_addr_t secure_base; /* Base addr used when heap is shared */ + size_t secure_size; /* Size used for securing heap when heap is shared*/ + int is_cma; + enum ion_fixed_position fixed_position; + int iommu_map_all; + int iommu_2x_map_domain; + int (*request_ion_region)(void *); + int (*release_ion_region)(void *); + void *(*setup_ion_region)(void); + int allow_nonsecure_alloc; +}; + +/** + * struct ion_co_heap_pdata - defines a carveout heap in the given platform + * @adjacent_mem_id: Id of heap that this heap must be adjacent to. + * @align: Alignment requirement for the memory + * @fixed_position If nonzero, position in the fixed area. + * @request_ion_region: function to be called when the number of allocations + * goes from 0 -> 1 + * @release_ion_region: function to be called when the number of allocations + * goes from 1 -> 0 + * @setup_ion_region: function to be called upon ion registration + * @memory_type:Memory type used for the heap + * + */ +struct ion_co_heap_pdata { + int adjacent_mem_id; + unsigned int align; + enum ion_fixed_position fixed_position; + int (*request_ion_region)(void *); + int (*release_ion_region)(void *); + void *(*setup_ion_region)(void); +}; + +/** + * struct ion_cma_pdata - extra data for CMA regions + * @default_prefetch_size - default size to use for prefetching + */ +struct ion_cma_pdata { + unsigned long default_prefetch_size; +}; + +#ifdef CONFIG_ION +/** + * msm_ion_client_create - allocate a client using the ion_device specified in + * drivers/staging/android/ion/msm/msm_ion.c + * + * name is the same as ion_client_create, return values + * are the same as ion_client_create. + */ + +struct ion_client *msm_ion_client_create(const char *name); + +/** + * ion_handle_get_flags - get the flags for a given handle + * + * @client - client who allocated the handle + * @handle - handle to get the flags + * @flags - pointer to store the flags + * + * Gets the current flags for a handle. These flags indicate various options + * of the buffer (caching, security, etc.) + */ +int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle, + unsigned long *flags); + + + +/** + * ion_handle_get_size - get the allocated size of a given handle + * + * @client - client who allocated the handle + * @handle - handle to get the size + * @size - pointer to store the size + * + * gives the allocated size of a handle. returns 0 on success, negative + * value on error + * + * NOTE: This is intended to be used only to get a size to pass to map_iommu. + * You should *NOT* rely on this for any other usage. + */ + +int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle, + unsigned long *size); +/** + * msm_ion_do_cache_op - do cache operations. + * + * @client - pointer to ION client. + * @handle - pointer to buffer handle. + * @vaddr - virtual address to operate on. + * @len - Length of data to do cache operation on. + * @cmd - Cache operation to perform: + * ION_IOC_CLEAN_CACHES + * ION_IOC_INV_CACHES + * ION_IOC_CLEAN_INV_CACHES + * + * Returns 0 on success + */ +int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, + void *vaddr, unsigned long len, unsigned int cmd); + +/** + * msm_ion_secure_buffer - secure an individual buffer + * + * @client - client who has access to the buffer + * @handle - buffer to secure + * @usage - usage hint to TZ + * @flags - flags for the securing + */ +int msm_ion_secure_buffer(struct ion_client *client, struct ion_handle *handle, + enum cp_mem_usage usage, int flags); + +/** + * msm_ion_unsecure_buffer - unsecure an individual buffer + * + * @client - client who has access to the buffer + * @handle - buffer to secure + */ +int msm_ion_unsecure_buffer(struct ion_client *client, + struct ion_handle *handle); + + +int msm_ion_secure_table(struct sg_table *table, enum cp_mem_usage usage, + int flags); + +int msm_ion_unsecure_table(struct sg_table *table); +#else +static inline struct ion_client *msm_ion_client_create(const char *name) +{ + return ERR_PTR(-ENODEV); +} + +static inline int ion_handle_get_size(struct ion_client *client, + struct ion_handle *handle, unsigned long *size) +{ + return -ENODEV; +} + +static inline int msm_ion_do_cache_op(struct ion_client *client, + struct ion_handle *handle, void *vaddr, + unsigned long len, unsigned int cmd) +{ + return -ENODEV; +} + +static inline int msm_ion_secure_buffer(struct ion_client *client, + struct ion_handle *handle, + enum cp_mem_usage usage, + int flags) +{ + return -ENODEV; +} + +static inline int msm_ion_unsecure_buffer(struct ion_client *client, + struct ion_handle *handle) +{ + return -ENODEV; +} + +static inline int msm_ion_secure_table(struct sg_table *table, + enum cp_mem_usage usage, + int flags) +{ + return -ENODEV; +} + +static inline int msm_ion_unsecure_table(struct sg_table *table) +{ + return -ENODEV; +} + + +#endif /* CONFIG_ION */ + +#endif diff --git a/drivers/staging/android/ion/msm/secure_buffer.c b/drivers/staging/android/ion/msm/secure_buffer.c new file mode 100644 index 000000000000..6c76318d6fc3 --- /dev/null +++ b/drivers/staging/android/ion/msm/secure_buffer.c @@ -0,0 +1,309 @@ +/* + * Copyright (C) 2011 Google, Inc + * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/highmem.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/msm_ion.h> +#include <linux/mutex.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <soc/qcom/scm.h> + + +static struct rb_root secure_root; +DEFINE_MUTEX(secure_buffer_mutex); + +struct secure_meta { + struct rb_node node; + struct sg_table *table; + struct kref ref; + enum cp_mem_usage usage; +}; + +struct cp2_mem_chunks { + u32 chunk_list; + u32 chunk_list_size; + u32 chunk_size; +} __attribute__ ((__packed__)); + +struct cp2_lock_req { + struct cp2_mem_chunks chunks; + u32 mem_usage; + u32 lock; +} __attribute__ ((__packed__)); + +#define MEM_PROTECT_LOCK_ID2 0x0A +#define V2_CHUNK_SIZE SZ_1M +#define FEATURE_ID_CP 12 + +static void secure_meta_add(struct secure_meta *meta) +{ + struct rb_root *root = &secure_root; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct secure_meta *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct secure_meta, node); + + if (meta->table < entry->table) { + p = &(*p)->rb_left; + } else if (meta->table > entry->table) { + p = &(*p)->rb_right; + } else { + pr_err("%s: table %p already exists\n", __func__, + entry->table); + BUG(); + } + } + + rb_link_node(&meta->node, parent, p); + rb_insert_color(&meta->node, root); +} + + +static struct secure_meta *secure_meta_lookup(struct sg_table *table) +{ + struct rb_root *root = &secure_root; + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct secure_meta *entry = NULL; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct secure_meta, node); + + if (table < entry->table) + p = &(*p)->rb_left; + else if (table > entry->table) + p = &(*p)->rb_right; + else + return entry; + } + + return NULL; +} + + +static int secure_buffer_change_chunk(u32 chunks, + u32 nchunks, + u32 chunk_size, + enum cp_mem_usage usage, + int lock) +{ + struct cp2_lock_req request; + u32 resp; + + request.mem_usage = usage; + request.lock = lock; + + request.chunks.chunk_list = chunks; + request.chunks.chunk_list_size = nchunks; + request.chunks.chunk_size = chunk_size; + + kmap_flush_unused(); + kmap_atomic_flush_unused(); + return scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2, + &request, sizeof(request), &resp, sizeof(resp)); + +} + + + +static int secure_buffer_change_table(struct sg_table *table, + enum cp_mem_usage usage, + int lock) +{ + int i, j; + int ret = -EINVAL; + u32 *chunk_list; + struct scatterlist *sg; + + for_each_sg(table->sgl, sg, table->nents, i) { + int nchunks; + int size = sg->length; + int chunk_list_len; + phys_addr_t chunk_list_phys; + + /* + * This should theoretically be a phys_addr_t but the protocol + * indicates this should be a u32. + */ + u32 base; + u64 tmp = sg_dma_address(sg); + WARN((tmp >> 32) & 0xffffffff, + "%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n", + __func__, sg, tmp); + if (unlikely(!size || (size % V2_CHUNK_SIZE))) { + WARN(1, + "%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n", + __func__, i, size, V2_CHUNK_SIZE); + return -EINVAL; + } + + base = (u32)tmp; + + nchunks = size / V2_CHUNK_SIZE; + chunk_list_len = sizeof(u32)*nchunks; + + chunk_list = kzalloc(chunk_list_len, GFP_KERNEL); + + if (!chunk_list) + return -ENOMEM; + + chunk_list_phys = virt_to_phys(chunk_list); + for (j = 0; j < nchunks; j++) + chunk_list[j] = base + j * V2_CHUNK_SIZE; + + /* + * Flush the chunk list before sending the memory to the + * secure environment to ensure the data is actually present + * in RAM + */ + dmac_flush_range(chunk_list, chunk_list + chunk_list_len); + + ret = secure_buffer_change_chunk(virt_to_phys(chunk_list), + nchunks, V2_CHUNK_SIZE, usage, lock); + + kfree(chunk_list); + } + + return ret; +} + +int msm_ion_secure_table(struct sg_table *table, enum cp_mem_usage usage, + int flags) +{ + struct secure_meta *meta; + int ret; + + mutex_lock(&secure_buffer_mutex); + meta = secure_meta_lookup(table); + + if (meta) { + kref_get(&meta->ref); + ret = 0; + } else { + meta = kzalloc(sizeof(*meta), GFP_KERNEL); + + if (!meta) { + ret = -ENOMEM; + goto out; + } + + meta->table = table; + meta->usage = usage; + kref_init(&meta->ref); + + ret = secure_buffer_change_table(table, usage, 1); + if (!ret) + secure_meta_add(meta); + else + kfree(meta); + } +out: + mutex_unlock(&secure_buffer_mutex); + + return ret; + +} + +int msm_ion_secure_buffer(struct ion_client *client, struct ion_handle *handle, + enum cp_mem_usage usage, int flags) +{ + struct sg_table *table; + int ret; + + table = ion_sg_table(client, handle); + + if (IS_ERR_OR_NULL(table)) { + ret = -EINVAL; + goto out; + } + + ret = msm_ion_secure_table(table, usage, flags); +out: + return ret; +} +EXPORT_SYMBOL(msm_ion_secure_buffer); + +static void msm_secure_buffer_release(struct kref *kref) +{ + struct secure_meta *meta = container_of(kref, struct secure_meta, + ref); + + rb_erase(&meta->node, &secure_root); + secure_buffer_change_table(meta->table, meta->usage, 0); + kfree(meta); +} + +int msm_ion_unsecure_table(struct sg_table *table) +{ + struct secure_meta *meta; + int ret = 0; + + mutex_lock(&secure_buffer_mutex); + meta = secure_meta_lookup(table); + + if (!meta) { + ret = -EINVAL; + goto out; + } + + kref_put(&meta->ref, msm_secure_buffer_release); + +out: + mutex_unlock(&secure_buffer_mutex); + return ret; + +} + +int msm_ion_unsecure_buffer(struct ion_client *client, + struct ion_handle *handle) +{ + struct sg_table *table; + int ret = 0; + + table = ion_sg_table(client, handle); + + if (IS_ERR_OR_NULL(table)) { + WARN(1, "Could not get table for handle %p to unsecure\n", + handle); + ret = -EINVAL; + goto out; + } + + msm_ion_unsecure_table(table); + +out: + return ret; +} +EXPORT_SYMBOL(msm_ion_unsecure_buffer); + +#define MAKE_CP_VERSION(major, minor, patch) \ + (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF)) + +bool msm_secure_v2_is_supported(void) +{ + int version = scm_get_feat_version(FEATURE_ID_CP); + + /* + * if the version is < 1.1.0 then dynamic buffer allocation is + * not supported + */ + return version >= MAKE_CP_VERSION(1, 1, 0); +} diff --git a/drivers/staging/android/ion/msm_ion_priv.h b/drivers/staging/android/ion/msm_ion_priv.h new file mode 100644 index 000000000000..a40f78023b3a --- /dev/null +++ b/drivers/staging/android/ion/msm_ion_priv.h @@ -0,0 +1,126 @@ +/* + * drivers/staging/android/ion/msm_ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _MSM_ION_PRIV_H +#define _MSM_ION_PRIV_H + +#include <linux/kref.h> +#include <linux/mm_types.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/ion.h> +#include <linux/iommu.h> +#include <linux/seq_file.h> + +/** + * struct mem_map_data - represents information about the memory map for a heap + * @node: list node used to store in the list of mem_map_data + * @addr: start address of memory region. + * @addr: end address of memory region. + * @size: size of memory region + * @client_name: name of the client who owns this buffer. + * + */ +struct mem_map_data { + struct list_head node; + ion_phys_addr_t addr; + ion_phys_addr_t addr_end; + unsigned long size; + const char *client_name; +}; + +struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *); +void ion_iommu_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *); +void ion_cp_heap_destroy(struct ion_heap *); + +long msm_ion_custom_ioctl(struct ion_client *client, + unsigned int cmd, + unsigned long arg); + +#ifdef CONFIG_CMA +struct ion_heap *ion_secure_cma_heap_create(struct ion_platform_heap *); +void ion_secure_cma_heap_destroy(struct ion_heap *); + +int ion_secure_cma_prefetch(struct ion_heap *heap, void *data); + +int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused); + +#else +static inline int ion_secure_cma_prefetch(struct ion_heap *heap, void *data) +{ + return -ENODEV; +} + +static inline int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused) +{ + return -ENODEV; +} + + + +#endif + +struct ion_heap *ion_removed_heap_create(struct ion_platform_heap *); +void ion_removed_heap_destroy(struct ion_heap *); + +#define ION_CP_ALLOCATE_FAIL -1 +#define ION_RESERVED_ALLOCATE_FAIL -1 + +/** + * ion_do_cache_op - do cache operations. + * + * @client - pointer to ION client. + * @handle - pointer to buffer handle. + * @uaddr - virtual address to operate on. + * @offset - offset from physical address. + * @len - Length of data to do cache operation on. + * @cmd - Cache operation to perform: + * ION_IOC_CLEAN_CACHES + * ION_IOC_INV_CACHES + * ION_IOC_CLEAN_INV_CACHES + * + * Returns 0 on success + */ +int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle, + void *uaddr, unsigned long offset, unsigned long len, + unsigned int cmd); + +void ion_cp_heap_get_base(struct ion_heap *heap, unsigned long *base, + unsigned long *size); + +void ion_mem_map_show(struct ion_heap *heap); + +int ion_heap_allow_secure_allocation(enum ion_heap_type type); + +int ion_heap_allow_heap_secure(enum ion_heap_type type); + +int ion_heap_allow_handle_secure(enum ion_heap_type type); + +/** + * ion_create_chunked_sg_table - helper function to create sg table + * with specified chunk size + * @buffer_base: The starting address used for the sg dma address + * @chunk_size: The size of each entry in the sg table + * @total_size: The total size of the sg table (i.e. the sum of the + * entries). This will be rounded up to the nearest + * multiple of `chunk_size' + */ +struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base, + size_t chunk_size, size_t total_size); +#endif /* _MSM_ION_PRIV_H */ diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h new file mode 100644 index 000000000000..3b56015252d8 --- /dev/null +++ b/drivers/staging/android/uapi/msm_ion.h @@ -0,0 +1,174 @@ +#ifndef _UAPI_MSM_ION_H +#define _UAPI_MSM_ION_H + +#include "ion.h" + +enum msm_ion_heap_types { + ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1, + ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START, + /* + * if you add a heap type here you should also add it to + * heap_types_info[] in msm_ion.c + */ +}; + +/** + * These are the only ids that should be used for Ion heap ids. + * The ids listed are the order in which allocation will be attempted + * if specified. Don't swap the order of heap ids unless you know what + * you are doing! + * Id's are spaced by purpose to allow new Id's to be inserted in-between (for + * possible fallbacks) + */ + +enum ion_heap_ids { + INVALID_HEAP_ID = -1, + ION_CP_MM_HEAP_ID = 8, + ION_CP_MFC_HEAP_ID = 12, + ION_CP_WB_HEAP_ID = 16, /* 8660 only */ + ION_CAMERA_HEAP_ID = 20, /* 8660 only */ + ION_SYSTEM_CONTIG_HEAP_ID = 21, + ION_ADSP_HEAP_ID = 22, + ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */ + ION_SF_HEAP_ID = 24, + ION_SYSTEM_HEAP_ID = 25, + ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images */ + ION_QSECOM_HEAP_ID = 27, + ION_AUDIO_HEAP_ID = 28, + + ION_MM_FIRMWARE_HEAP_ID = 29, + + ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */ +}; + +/* + * The IOMMU heap is deprecated! Here are some aliases for backwards + * compatibility: + */ +#define ION_IOMMU_HEAP_ID ION_SYSTEM_HEAP_ID +#define ION_HEAP_TYPE_IOMMU ION_HEAP_TYPE_SYSTEM + +enum ion_fixed_position { + NOT_FIXED, + FIXED_LOW, + FIXED_MIDDLE, + FIXED_HIGH, +}; + +enum cp_mem_usage { + VIDEO_BITSTREAM = 0x1, + VIDEO_PIXEL = 0x2, + VIDEO_NONPIXEL = 0x3, + DISPLAY_SECURE_CP_USAGE = 0x4, + CAMERA_SECURE_CP_USAGE = 0x5, + MAX_USAGE = 0x6, + UNKNOWN = 0x7FFFFFFF, +}; + +/** + * Flag to use when allocating to indicate that a heap is secure. + */ +#define ION_FLAG_SECURE (1 << ION_HEAP_ID_RESERVED) + +/** + * Flag for clients to force contiguous memort allocation + * + * Use of this flag is carefully monitored! + */ +#define ION_FLAG_FORCE_CONTIGUOUS (1 << 30) + +/* + * Used in conjunction with heap which pool memory to force an allocation + * to come from the page allocator directly instead of from the pool allocation + */ +#define ION_FLAG_POOL_FORCE_ALLOC (1 << 16) + +/** +* Deprecated! Please use the corresponding ION_FLAG_* +*/ +#define ION_SECURE ION_FLAG_SECURE +#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS + +/** + * Macro should be used with ion_heap_ids defined above. + */ +#define ION_HEAP(bit) (1 << (bit)) + +#define ION_ADSP_HEAP_NAME "adsp" +#define ION_SYSTEM_HEAP_NAME "system" +#define ION_VMALLOC_HEAP_NAME ION_SYSTEM_HEAP_NAME +#define ION_KMALLOC_HEAP_NAME "kmalloc" +#define ION_AUDIO_HEAP_NAME "audio" +#define ION_SF_HEAP_NAME "sf" +#define ION_MM_HEAP_NAME "mm" +#define ION_CAMERA_HEAP_NAME "camera_preview" +#define ION_IOMMU_HEAP_NAME "iommu" +#define ION_MFC_HEAP_NAME "mfc" +#define ION_WB_HEAP_NAME "wb" +#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw" +#define ION_PIL1_HEAP_NAME "pil_1" +#define ION_PIL2_HEAP_NAME "pil_2" +#define ION_QSECOM_HEAP_NAME "qsecom" + +#define ION_SET_CACHED(__cache) (__cache | ION_FLAG_CACHED) +#define ION_SET_UNCACHED(__cache) (__cache & ~ION_FLAG_CACHED) + +#define ION_IS_CACHED(__flags) ((__flags) & ION_FLAG_CACHED) + +/* struct ion_flush_data - data passed to ion for flushing caches + * + * @handle: handle with data to flush + * @fd: fd to flush + * @vaddr: userspace virtual address mapped with mmap + * @offset: offset into the handle to flush + * @length: length of handle to flush + * + * Performs cache operations on the handle. If p is the start address + * of the handle, p + offset through p + offset + length will have + * the cache operations performed + */ +struct ion_flush_data { + ion_user_handle_t handle; + int fd; + void *vaddr; + unsigned int offset; + unsigned int length; +}; + + +struct ion_prefetch_data { + int heap_id; + unsigned long len; +}; + +#define ION_IOC_MSM_MAGIC 'M' + +/** + * DOC: ION_IOC_CLEAN_CACHES - clean the caches + * + * Clean the caches of the handle specified. + */ +#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MSM_MAGIC, 0, \ + struct ion_flush_data) +/** + * DOC: ION_IOC_INV_CACHES - invalidate the caches + * + * Invalidate the caches of the handle specified. + */ +#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 1, \ + struct ion_flush_data) +/** + * DOC: ION_IOC_CLEAN_INV_CACHES - clean and invalidate the caches + * + * Clean and invalidate the caches of the handle specified. + */ +#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MSM_MAGIC, 2, \ + struct ion_flush_data) + +#define ION_IOC_PREFETCH _IOWR(ION_IOC_MSM_MAGIC, 3, \ + struct ion_prefetch_data) + +#define ION_IOC_DRAIN _IOWR(ION_IOC_MSM_MAGIC, 4, \ + struct ion_prefetch_data) + +#endif diff --git a/include/linux/ion.h b/include/linux/ion.h new file mode 100644 index 000000000000..242fef6ba990 --- /dev/null +++ b/include/linux/ion.h @@ -0,0 +1,6 @@ +#ifndef __LINUX_ION_H__ +#define __LINUX_ION_H__ + +#include "../../drivers/staging/android/ion/ion.h" + +#endif /* __LINUX_ION_H__ */ diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h new file mode 100644 index 000000000000..04afdf587421 --- /dev/null +++ b/include/linux/msm_ion.h @@ -0,0 +1,6 @@ +#ifndef __LINUX_MSM_ION_H__ +#define __LINUX_MSM_ION_H__ + +#include "../../drivers/staging/android/ion/msm/msm_ion.h" + +#endif /* __LINUX_MSM_ION_H__ */ diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index f7554fd7fc62..ba8c415771b7 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -351,6 +351,550 @@ TRACE_EVENT(mm_page_alloc_extfrag, __entry->change_ownership) ); + +DECLARE_EVENT_CLASS(ion_alloc, + + TP_PROTO(const char *client_name, + const char *heap_name, + size_t len, + unsigned int mask, + unsigned int flags), + + TP_ARGS(client_name, heap_name, len, mask, flags), + + TP_STRUCT__entry( + __array(char, client_name, 64) + __field(const char *, heap_name) + __field(size_t, len) + __field(unsigned int, mask) + __field(unsigned int, flags) + ), + + TP_fast_assign( + strlcpy(__entry->client_name, client_name, 64); + __entry->heap_name = heap_name; + __entry->len = len; + __entry->mask = mask; + __entry->flags = flags; + ), + + TP_printk("client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x", + __entry->client_name, + __entry->heap_name, + __entry->len, + __entry->mask, + __entry->flags) +); + +DEFINE_EVENT(ion_alloc, ion_alloc_buffer_start, + + TP_PROTO(const char *client_name, + const char *heap_name, + size_t len, + unsigned int mask, + unsigned int flags), + + TP_ARGS(client_name, heap_name, len, mask, flags) +); + +DEFINE_EVENT(ion_alloc, ion_alloc_buffer_end, + + TP_PROTO(const char *client_name, + const char *heap_name, + size_t len, + unsigned int mask, + unsigned int flags), + + TP_ARGS(client_name, heap_name, len, mask, flags) +); + +DECLARE_EVENT_CLASS(ion_alloc_error, + + TP_PROTO(const char *client_name, + const char *heap_name, + size_t len, + unsigned int mask, + unsigned int flags, + long error), + + TP_ARGS(client_name, heap_name, len, mask, flags, error), + + TP_STRUCT__entry( + __field(const char *, client_name) + __field(const char *, heap_name) + __field(size_t, len) + __field(unsigned int, mask) + __field(unsigned int, flags) + __field(long, error) + ), + + TP_fast_assign( + __entry->client_name = client_name; + __entry->heap_name = heap_name; + __entry->len = len; + __entry->mask = mask; + __entry->flags = flags; + __entry->error = error; + ), + + TP_printk( + "client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x error=%ld", + __entry->client_name, + __entry->heap_name, + __entry->len, + __entry->mask, + __entry->flags, + __entry->error) +); + + +DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fallback, + + TP_PROTO(const char *client_name, + const char *heap_name, + size_t len, + unsigned int mask, + unsigned int flags, + long error), + + TP_ARGS(client_name, heap_name, len, mask, flags, error) +); + +DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fail, + + TP_PROTO(const char *client_name, + const char *heap_name, + size_t len, + unsigned int mask, + unsigned int flags, + long error), + + TP_ARGS(client_name, heap_name, len, mask, flags, error) +); + + +DECLARE_EVENT_CLASS(alloc_retry, + + TP_PROTO(int tries), + + TP_ARGS(tries), + + TP_STRUCT__entry( + __field(int, tries) + ), + + TP_fast_assign( + __entry->tries = tries; + ), + + TP_printk("tries=%d", + __entry->tries) +); + +DEFINE_EVENT(alloc_retry, ion_cp_alloc_retry, + + TP_PROTO(int tries), + + TP_ARGS(tries) +); + +DEFINE_EVENT(alloc_retry, migrate_retry, + + TP_PROTO(int tries), + + TP_ARGS(tries) +); + +DEFINE_EVENT(alloc_retry, dma_alloc_contiguous_retry, + + TP_PROTO(int tries), + + TP_ARGS(tries) +); + +DECLARE_EVENT_CLASS(migrate_pages, + + TP_PROTO(int mode), + + TP_ARGS(mode), + + TP_STRUCT__entry( + __field(int, mode) + ), + + TP_fast_assign( + __entry->mode = mode; + ), + + TP_printk("mode=%d", + __entry->mode) +); + +DEFINE_EVENT(migrate_pages, migrate_pages_start, + + TP_PROTO(int mode), + + TP_ARGS(mode) +); + +DEFINE_EVENT(migrate_pages, migrate_pages_end, + + TP_PROTO(int mode), + + TP_ARGS(mode) +); + +DECLARE_EVENT_CLASS(ion_alloc_pages, + + TP_PROTO(gfp_t gfp_flags, + unsigned int order), + + TP_ARGS(gfp_flags, order), + + TP_STRUCT__entry( + __field(gfp_t, gfp_flags) + __field(unsigned int, order) + ), + + TP_fast_assign( + __entry->gfp_flags = gfp_flags; + __entry->order = order; + ), + + TP_printk("gfp_flags=%s order=%d", + show_gfp_flags(__entry->gfp_flags), + __entry->order) + ); + +DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_start, + TP_PROTO(gfp_t gfp_flags, + unsigned int order), + + TP_ARGS(gfp_flags, order) + ); + +DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_end, + TP_PROTO(gfp_t gfp_flags, + unsigned int order), + + TP_ARGS(gfp_flags, order) + ); + +DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_fail, + TP_PROTO(gfp_t gfp_flags, + unsigned int order), + + TP_ARGS(gfp_flags, order) + ); + +DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_start, + TP_PROTO(gfp_t gfp_flags, + unsigned int order), + + TP_ARGS(gfp_flags, order) + ); + +DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_end, + TP_PROTO(gfp_t gfp_flags, + unsigned int order), + + TP_ARGS(gfp_flags, order) + ); + +DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_fail, + TP_PROTO(gfp_t gfp_flags, + unsigned int order), + + TP_ARGS(gfp_flags, order) + + ); + +DECLARE_EVENT_CLASS(smmu_map, + + TP_PROTO(unsigned long va, + phys_addr_t pa, + unsigned long chunk_size, + size_t len), + + TP_ARGS(va, pa, chunk_size, len), + + TP_STRUCT__entry( + __field(unsigned long, va) + __field(phys_addr_t, pa) + __field(unsigned long, chunk_size) + __field(size_t, len) + ), + + TP_fast_assign( + __entry->va = va; + __entry->pa = pa; + __entry->chunk_size = chunk_size; + __entry->len = len; + ), + + TP_printk("v_addr=%p p_addr=%pa chunk_size=0x%lu len=%zu", + (void *)__entry->va, + &__entry->pa, + __entry->chunk_size, + __entry->len) + ); + +DEFINE_EVENT(smmu_map, iommu_map_range, + TP_PROTO(unsigned long va, + phys_addr_t pa, + unsigned long chunk_size, + size_t len), + + TP_ARGS(va, pa, chunk_size, len) + ); + +DECLARE_EVENT_CLASS(ion_secure_cma_add_to_pool, + + TP_PROTO(unsigned long len, + int pool_total, + bool is_prefetch), + + TP_ARGS(len, pool_total, is_prefetch), + + TP_STRUCT__entry( + __field(unsigned long, len) + __field(int, pool_total) + __field(bool, is_prefetch) + ), + + TP_fast_assign( + __entry->len = len; + __entry->pool_total = pool_total; + __entry->is_prefetch = is_prefetch; + ), + + TP_printk("len %lx, pool total %x is_prefetch %d", + __entry->len, + __entry->pool_total, + __entry->is_prefetch) + ); + +DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_start, + TP_PROTO(unsigned long len, + int pool_total, + bool is_prefetch), + + TP_ARGS(len, pool_total, is_prefetch) + ); + +DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_end, + TP_PROTO(unsigned long len, + int pool_total, + bool is_prefetch), + + TP_ARGS(len, pool_total, is_prefetch) + ); + +DECLARE_EVENT_CLASS(ion_secure_cma_shrink_pool, + + TP_PROTO(unsigned long drained_size, + unsigned long skipped_size), + + TP_ARGS(drained_size, skipped_size), + + TP_STRUCT__entry( + __field(unsigned long, drained_size) + __field(unsigned long, skipped_size) + ), + + TP_fast_assign( + __entry->drained_size = drained_size; + __entry->skipped_size = skipped_size; + ), + + TP_printk("drained size %lx, skipped size %lx", + __entry->drained_size, + __entry->skipped_size) + ); + +DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_start, + TP_PROTO(unsigned long drained_size, + unsigned long skipped_size), + + TP_ARGS(drained_size, skipped_size) + ); + +DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_end, + TP_PROTO(unsigned long drained_size, + unsigned long skipped_size), + + TP_ARGS(drained_size, skipped_size) + ); + +TRACE_EVENT(ion_prefetching, + + TP_PROTO(unsigned long len), + + TP_ARGS(len), + + TP_STRUCT__entry( + __field(unsigned long, len) + ), + + TP_fast_assign( + __entry->len = len; + ), + + TP_printk("prefetch size %lx", + __entry->len) + ); + +DECLARE_EVENT_CLASS(ion_secure_cma_allocate, + + TP_PROTO(const char *heap_name, + unsigned long len, + unsigned long align, + unsigned long flags), + + TP_ARGS(heap_name, len, align, flags), + + TP_STRUCT__entry( + __field(const char *, heap_name) + __field(unsigned long, len) + __field(unsigned long, align) + __field(unsigned long, flags) + ), + + TP_fast_assign( + __entry->heap_name = heap_name; + __entry->len = len; + __entry->align = align; + __entry->flags = flags; + ), + + TP_printk("heap_name=%s len=%lx align=%lx flags=%lx", + __entry->heap_name, + __entry->len, + __entry->align, + __entry->flags) + ); + +DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_start, + TP_PROTO(const char *heap_name, + unsigned long len, + unsigned long align, + unsigned long flags), + + TP_ARGS(heap_name, len, align, flags) + ); + +DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_end, + TP_PROTO(const char *heap_name, + unsigned long len, + unsigned long align, + unsigned long flags), + + TP_ARGS(heap_name, len, align, flags) + ); + +DECLARE_EVENT_CLASS(ion_cp_secure_buffer, + + TP_PROTO(const char *heap_name, + unsigned long len, + unsigned long align, + unsigned long flags), + + TP_ARGS(heap_name, len, align, flags), + + TP_STRUCT__entry( + __field(const char *, heap_name) + __field(unsigned long, len) + __field(unsigned long, align) + __field(unsigned long, flags) + ), + + TP_fast_assign( + __entry->heap_name = heap_name; + __entry->len = len; + __entry->align = align; + __entry->flags = flags; + ), + + TP_printk("heap_name=%s len=%lx align=%lx flags=%lx", + __entry->heap_name, + __entry->len, + __entry->align, + __entry->flags) + ); + +DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_start, + TP_PROTO(const char *heap_name, + unsigned long len, + unsigned long align, + unsigned long flags), + + TP_ARGS(heap_name, len, align, flags) + ); + +DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_end, + TP_PROTO(const char *heap_name, + unsigned long len, + unsigned long align, + unsigned long flags), + + TP_ARGS(heap_name, len, align, flags) + ); + +DECLARE_EVENT_CLASS(iommu_sec_ptbl_map_range, + + TP_PROTO(int sec_id, + int num, + unsigned long va, + unsigned int pa, + size_t len), + + TP_ARGS(sec_id, num, va, pa, len), + + TP_STRUCT__entry( + __field(int, sec_id) + __field(int, num) + __field(unsigned long, va) + __field(unsigned int, pa) + __field(size_t, len) + ), + + TP_fast_assign( + __entry->sec_id = sec_id; + __entry->num = num; + __entry->va = va; + __entry->pa = pa; + __entry->len = len; + ), + + TP_printk("sec_id=%d num=%d va=%lx pa=%u len=%zu", + __entry->sec_id, + __entry->num, + __entry->va, + __entry->pa, + __entry->len) + ); + +DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_start, + + TP_PROTO(int sec_id, + int num, + unsigned long va, + unsigned int pa, + size_t len), + + TP_ARGS(sec_id, num, va, pa, len) + ); + +DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_end, + + TP_PROTO(int sec_id, + int num, + unsigned long va, + unsigned int pa, + size_t len), + + TP_ARGS(sec_id, num, va, pa, len) + ); #endif /* _TRACE_KMEM_H */ /* This part must be outside protection */ |
