summaryrefslogtreecommitdiff
path: root/drivers/android/binder_alloc.c
diff options
context:
space:
mode:
authorBlagovest Kolenichev <bkolenichev@codeaurora.org>2017-07-26 08:12:21 -0700
committerBlagovest Kolenichev <bkolenichev@codeaurora.org>2017-08-04 07:53:32 -0700
commitb65ef4723782d7e5f75604ba17bf5dd6e41c1d57 (patch)
treed37206c8a823ffede15fe9482689ef24fe0cf34a /drivers/android/binder_alloc.c
parent6ce32a69b7f19cb98df4b70f382d7b8bb3f142cd (diff)
parent59ff2e15be118b70755d9709be67ed1b842cd5e6 (diff)
Merge android-4.4@59ff2e1 (v4.4.78) into msm-4.4
* refs/heads/tmp-59ff2e1 Linux 4.4.78 kvm: vmx: allow host to access guest MSR_IA32_BNDCFGS kvm: vmx: Check value written to IA32_BNDCFGS kvm: x86: Guest BNDCFGS requires guest MPX support kvm: vmx: Do not disable intercepts for BNDCFGS KVM: x86: disable MPX if host did not enable MPX XSAVE features tracing: Use SOFTIRQ_OFFSET for softirq dectection for more accurate results PM / QoS: return -EINVAL for bogus strings PM / wakeirq: Convert to SRCU sched/topology: Optimize build_group_mask() sched/topology: Fix overlapping sched_group_mask crypto: caam - fix signals handling crypto: sha1-ssse3 - Disable avx2 crypto: atmel - only treat EBUSY as transient if backlog crypto: talitos - Extend max key length for SHA384/512-HMAC and AEAD mm: fix overflow check in expand_upwards() tpm: Issue a TPM2_Shutdown for TPM2 devices. Add "shutdown" to "struct class". tpm: Provide strong locking for device removal tpm: Get rid of chip->pdev selftests/capabilities: Fix the test_execve test mnt: Make propagate_umount less slow for overlapping mount propagation trees mnt: In propgate_umount handle visiting mounts in any order mnt: In umount propagation reparent in a separate pass vt: fix unchecked __put_user() in tioclinux ioctls exec: Limit arg stack to at most 75% of _STK_LIM s390: reduce ELF_ET_DYN_BASE powerpc: move ELF_ET_DYN_BASE to 4GB / 4MB arm64: move ELF_ET_DYN_BASE to 4GB / 4MB arm: move ELF_ET_DYN_BASE to 4MB binfmt_elf: use ELF_ET_DYN_BASE only for PIE checkpatch: silence perl 5.26.0 unescaped left brace warnings fs/dcache.c: fix spin lockup issue on nlru->lock mm/list_lru.c: fix list_lru_count_node() to be race free kernel/extable.c: mark core_kernel_text notrace tools/lib/lockdep: Reduce MAX_LOCK_DEPTH to avoid overflowing lock_chain/: Depth parisc/mm: Ensure IRQs are off in switch_mm() parisc: DMA API: return error instead of BUG_ON for dma ops on non dma devs parisc: use compat_sys_keyctl() parisc: Report SIGSEGV instead of SIGBUS when running out of stack irqchip/gic-v3: Fix out-of-bound access in gic_set_affinity cfg80211: Check if PMKID attribute is of expected size cfg80211: Validate frequencies nested in NL80211_ATTR_SCAN_FREQUENCIES cfg80211: Define nla_policy for NL80211_ATTR_LOCAL_MESH_POWER_MODE brcmfmac: fix possible buffer overflow in brcmf_cfg80211_mgmt_tx() rds: tcp: use sock_create_lite() to create the accept socket vrf: fix bug_on triggered by rx when destroying a vrf net: ipv6: Compare lwstate in detecting duplicate nexthops ipv6: dad: don't remove dynamic addresses if link is down net: handle NAPI_GRO_FREE_STOLEN_HEAD case also in napi_frags_finish() bpf: prevent leaking pointer via xadd on unpriviledged net: prevent sign extension in dev_get_stats() tcp: reset sk_rx_dst in tcp_disconnect() net: dp83640: Avoid NULL pointer dereference. ipv6: avoid unregistering inet6_dev for loopback net/phy: micrel: configure intterupts after autoneg workaround net: sched: Fix one possible panic when no destroy callback net_sched: fix error recovery at qdisc creation ANDROID: android-verity: mark dev as rw for linear target ANDROID: sdcardfs: Remove unnecessary lock ANDROID: binder: don't check prio permissions on restore. Add BINDER_GET_NODE_DEBUG_INFO ioctl UPSTREAM: cpufreq: schedutil: Trace frequency only if it has changed UPSTREAM: cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely UPSTREAM: cpufreq: schedutil: Refactor sugov_next_freq_shared() UPSTREAM: cpufreq: schedutil: Fix per-CPU structure initialization in sugov_start() UPSTREAM: cpufreq: schedutil: Pass sg_policy to get_next_freq() UPSTREAM: cpufreq: schedutil: move cached_raw_freq to struct sugov_policy UPSTREAM: cpufreq: schedutil: Rectify comment in sugov_irq_work() function UPSTREAM: cpufreq: schedutil: irq-work and mutex are only used in slow path UPSTREAM: cpufreq: schedutil: enable fast switch earlier UPSTREAM: cpufreq: schedutil: Avoid indented labels Linux 4.4.77 saa7134: fix warm Medion 7134 EEPROM read x86/mm/pat: Don't report PAT on CPUs that don't support it ext4: check return value of kstrtoull correctly in reserved_clusters_store staging: comedi: fix clean-up of comedi_class in comedi_init() staging: vt6556: vnt_start Fix missing call to vnt_key_init_table. tcp: fix tcp_mark_head_lost to check skb len before fragmenting md: fix super_offset endianness in super_1_rdev_size_change md: fix incorrect use of lexx_to_cpu in does_sb_need_changing perf tools: Use readdir() instead of deprecated readdir_r() again perf tests: Remove wrong semicolon in while loop in CQM test perf trace: Do not process PERF_RECORD_LOST twice perf dwarf: Guard !x86_64 definitions under #ifdef else clause perf pmu: Fix misleadingly indented assignment (whitespace) perf annotate browser: Fix behaviour of Shift-Tab with nothing focussed perf tools: Remove duplicate const qualifier perf script: Use readdir() instead of deprecated readdir_r() perf thread_map: Use readdir() instead of deprecated readdir_r() perf tools: Use readdir() instead of deprecated readdir_r() perf bench numa: Avoid possible truncation when using snprintf() perf tests: Avoid possible truncation with dirent->d_name + snprintf perf scripting perl: Fix compile error with some perl5 versions perf thread_map: Correctly size buffer used with dirent->dt_name perf intel-pt: Use __fallthrough perf top: Use __fallthrough tools strfilter: Use __fallthrough tools string: Use __fallthrough in perf_atoll() tools include: Add a __fallthrough statement mqueue: fix a use-after-free in sys_mq_notify() RDMA/uverbs: Check port number supplied by user verbs cmds KEYS: Fix an error code in request_master_key() ath10k: override CE5 config for QCA9377 x86/uaccess: Optimize copy_user_enhanced_fast_string() for short strings x86/tools: Fix gcc-7 warning in relocs.c gfs2: Fix glock rhashtable rcu bug USB: serial: qcserial: new Sierra Wireless EM7305 device ID USB: serial: option: add two Longcheer device ids pinctrl: sh-pfc: Update info pointer after SoC-specific init pinctrl: mxs: atomically switch mux and drive strength config pinctrl: sunxi: Fix SPDIF function name for A83T pinctrl: meson: meson8b: fix the NAND DQS pins pinctrl: sh-pfc: r8a7791: Fix SCIF2 pinmux data sysctl: report EINVAL if value is larger than UINT_MAX for proc_douintvec sysctl: don't print negative flag for proc_douintvec mac80211_hwsim: Replace bogus hrtimer clockid usb: Fix typo in the definition of Endpoint[out]Request usb: usbip: set buffer pointers to NULL after free Add USB quirk for HVR-950q to avoid intermittent device resets USB: serial: cp210x: add ID for CEL EM3588 USB ZigBee stick usb: dwc3: replace %p with %pK drm/virtio: don't leak bo on drm_gem_object_init failure tracing/kprobes: Allow to create probe with a module name starting with a digit mm: fix classzone_idx underflow in shrink_zones() bgmac: reset & enable Ethernet core before using it driver core: platform: fix race condition with driver_override fs: completely ignore unknown open flags fs: add a VALID_OPEN_FLAGS ANDROID: binder: add RT inheritance flag to node. ANDROID: binder: improve priority inheritance. ANDROID: binder: add min sched_policy to node. ANDROID: binder: add support for RT prio inheritance. ANDROID: binder: push new transactions to waiting threads. ANDROID: binder: remove proc waitqueue FROMLIST: binder: remove global binder lock FROMLIST: binder: fix death race conditions FROMLIST: binder: protect against stale pointers in print_binder_transaction FROMLIST: binder: protect binder_ref with outer lock FROMLIST: binder: use inner lock to protect thread accounting FROMLIST: binder: protect transaction_stack with inner lock. FROMLIST: binder: protect proc->threads with inner_lock FROMLIST: binder: protect proc->nodes with inner lock FROMLIST: binder: add spinlock to protect binder_node FROMLIST: binder: add spinlocks to protect todo lists FROMLIST: binder: use inner lock to sync work dq and node counts FROMLIST: binder: introduce locking helper functions FROMLIST: binder: use node->tmp_refs to ensure node safety FROMLIST: binder: refactor binder ref inc/dec for thread safety FROMLIST: binder: make sure accesses to proc/thread are safe FROMLIST: binder: make sure target_node has strong ref FROMLIST: binder: guarantee txn complete / errors delivered in-order FROMLIST: binder: refactor binder_pop_transaction FROMLIST: binder: use atomic for transaction_log index FROMLIST: binder: add more debug info when allocation fails. FROMLIST: binder: protect against two threads freeing buffer FROMLIST: binder: remove dead code in binder_get_ref_for_node FROMLIST: binder: don't modify thread->looper from other threads FROMLIST: binder: avoid race conditions when enqueuing txn FROMLIST: binder: refactor queue management in binder_thread_read FROMLIST: binder: add log information for binder transaction failures FROMLIST: binder: make binder_last_id an atomic FROMLIST: binder: change binder_stats to atomics FROMLIST: binder: add protection for non-perf cases FROMLIST: binder: remove binder_debug_no_lock mechanism FROMLIST: binder: move binder_alloc to separate file FROMLIST: binder: separate out binder_alloc functions FROMLIST: binder: remove unneeded cleanup code FROMLIST: binder: separate binder allocator structure from binder proc FROMLIST: binder: Use wake up hint for synchronous transactions. Revert "android: binder: move global binder state into context struct." sched: walt: fix window misalignment when HZ=300 ANDROID: android-base.cfg: remove CONFIG_CGROUP_DEBUG ANDROID: sdcardfs: use mount_nodev and fix a issue in sdcardfs_kill_sb Conflicts: drivers/android/binder.c drivers/net/wireless/ath/ath10k/pci.c Change-Id: Ic6f82c2ec9929733a16a03bb3b745187e002f4f6 Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
Diffstat (limited to 'drivers/android/binder_alloc.c')
-rw-r--r--drivers/android/binder_alloc.c802
1 files changed, 802 insertions, 0 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
new file mode 100644
index 000000000000..aabfebac6e57
--- /dev/null
+++ b/drivers/android/binder_alloc.c
@@ -0,0 +1,802 @@
+/* binder_alloc.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rtmutex.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "binder_alloc.h"
+#include "binder_trace.h"
+
+static DEFINE_MUTEX(binder_alloc_mmap_lock);
+
+enum {
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
+};
+static uint32_t binder_alloc_debug_mask;
+
+module_param_named(debug_mask, binder_alloc_debug_mask,
+ uint, S_IWUSR | S_IRUGO);
+
+#define binder_alloc_debug(mask, x...) \
+ do { \
+ if (binder_alloc_debug_mask & mask) \
+ pr_info(x); \
+ } while (0)
+
+static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &alloc->buffers))
+ return alloc->buffer +
+ alloc->buffer_size - (void *)buffer->data;
+ return (size_t)list_entry(buffer->entry.next,
+ struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: add free buffer, size %zd, at %pK\n",
+ alloc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer_locked(
+ struct binder_alloc *alloc, struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer < buffer)
+ p = &parent->rb_left;
+ else if (new_buffer > buffer)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_alloc_prepare_to_free_locked(
+ struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct rb_node *n = alloc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ struct binder_buffer *kern_ptr;
+
+ kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
+ - offsetof(struct binder_buffer, data));
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer)
+ n = n->rb_left;
+ else if (kern_ptr > buffer)
+ n = n->rb_right;
+ else {
+ /*
+ * Guard against user threads attempting to
+ * free the buffer twice
+ */
+ if (buffer->free_in_progress) {
+ pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+ alloc->pid, current->pid, (u64)user_ptr);
+ return NULL;
+ }
+ buffer->free_in_progress = 1;
+ return buffer;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * @alloc: binder_alloc for this proc
+ * @user_ptr: User pointer to buffer data
+ *
+ * Validate userspace pointer to buffer data and return buffer corresponding to
+ * that user pointer. Search the rb tree for buffer that matches user data
+ * pointer.
+ *
+ * Return: Pointer to buffer or NULL
+ */
+struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct page **page;
+ struct mm_struct *mm;
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: %s pages %pK-%pK\n", alloc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ trace_binder_update_page_range(alloc, allocate, start, end);
+
+ if (vma)
+ mm = NULL;
+ else
+ mm = get_task_mm(alloc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = alloc->vma;
+ if (vma && mm != alloc->vma_vm_mm) {
+ pr_err("%d: vma mm and task mm mismatch\n",
+ alloc->pid);
+ vma = NULL;
+ }
+ }
+
+ if (allocate == 0)
+ goto free_range;
+
+ if (vma == NULL) {
+ pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ alloc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+
+ BUG_ON(*page);
+ *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (*page == NULL) {
+ pr_err("%d: binder_alloc_buf failed for page at %pK\n",
+ alloc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ ret = map_kernel_range_noflush((unsigned long)page_addr,
+ PAGE_SIZE, PAGE_KERNEL, page);
+ flush_cache_vmap((unsigned long)page_addr,
+ (unsigned long)page_addr + PAGE_SIZE);
+ if (ret != 1) {
+ pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
+ alloc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + alloc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0]);
+ if (ret) {
+ pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+ alloc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (vma)
+ zap_page_range(vma, (uintptr_t)page_addr +
+ alloc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(*page);
+ *page = NULL;
+err_alloc_page_failed:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return vma ? -ENOMEM : -ESRCH;
+}
+
+struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct rb_node *n = alloc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size, data_offsets_size;
+ int ret;
+
+ if (alloc->vma == NULL) {
+ pr_err("%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
+ return ERR_PTR(-ESRCH);
+ }
+
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid size %zd-%zd\n",
+ alloc->pid, data_size, offsets_size);
+ return ERR_PTR(-EINVAL);
+ }
+ size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+ if (size < data_offsets_size || size < extra_buffers_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid extra_buffers_size %zd\n",
+ alloc->pid, extra_buffers_size);
+ return ERR_PTR(-EINVAL);
+ }
+ if (is_async &&
+ alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ alloc->pid, size);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ size_t allocated_buffers = 0;
+ size_t largest_alloc_size = 0;
+ size_t total_alloc_size = 0;
+ size_t free_buffers = 0;
+ size_t largest_free_size = 0;
+ size_t total_free_size = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ allocated_buffers++;
+ total_alloc_size += buffer_size;
+ if (buffer_size > largest_alloc_size)
+ largest_alloc_size = buffer_size;
+ }
+ for (n = rb_first(&alloc->free_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ free_buffers++;
+ total_free_size += buffer_size;
+ if (buffer_size > largest_free_size)
+ largest_free_size = buffer_size;
+ }
+ pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+ alloc->pid, size);
+ pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers, largest_alloc_size,
+ total_free_size, free_buffers, largest_free_size);
+ return ERR_PTR(-ENOSPC);
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ }
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
+ alloc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ if (n == NULL) {
+ if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+ buffer_size = size; /* no room for other buffers */
+ else
+ buffer_size = size + sizeof(struct binder_buffer);
+ }
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ ret = binder_update_page_range(alloc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer = (void *)buffer->data + size;
+
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(alloc, new_buffer);
+ }
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got %pK\n",
+ alloc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ buffer->extra_buffers_size = extra_buffers_size;
+ if (is_async) {
+ alloc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+ return buffer;
+}
+
+/**
+ * binder_alloc_new_buf() - Allocate a new binder buffer
+ * @alloc: binder_alloc for this proc
+ * @data_size: size of user data buffer
+ * @offsets_size: user specified buffer offset
+ * @extra_buffers_size: size of extra space for meta-data (eg, security context)
+ * @is_async: buffer for async transaction
+ *
+ * Allocate a new buffer given the requested sizes. Returns
+ * the kernel version of the buffer pointer. The size allocated
+ * is the sum of the three given sizes (each rounded up to
+ * pointer-sized boundary)
+ *
+ * Return: The allocated buffer or %NULL if error
+ */
+struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
+ extra_buffers_size, is_async);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ int free_page_end = 1;
+ int free_page_start = 1;
+
+ BUG_ON(alloc->buffers.next == &buffer->entry);
+ prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ BUG_ON(!prev->free);
+ if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+ free_page_start = 0;
+ if (buffer_end_page(prev) == buffer_end_page(buffer))
+ free_page_end = 0;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer, prev);
+ }
+
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (buffer_start_page(next) == buffer_end_page(buffer)) {
+ free_page_end = 0;
+ if (buffer_start_page(next) ==
+ buffer_start_page(buffer))
+ free_page_start = 0;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer, prev);
+ }
+ }
+ list_del(&buffer->entry);
+ if (free_page_start || free_page_end) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
+ alloc->pid, buffer, free_page_start ? "" : " end",
+ free_page_end ? "" : " start", prev, next);
+ binder_update_page_range(alloc, 0, free_page_start ?
+ buffer_start_page(buffer) : buffer_end_page(buffer),
+ (free_page_end ? buffer_end_page(buffer) :
+ buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ }
+}
+
+static void binder_free_buf_locked(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *)) +
+ ALIGN(buffer->extra_buffers_size, sizeof(void *));
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
+ alloc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON((void *)buffer < alloc->buffer);
+ BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
+
+ if (buffer->async_transaction) {
+ alloc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_free_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+
+ rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ struct binder_buffer *next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+
+ if (next->free) {
+ rb_erase(&next->rb_node, &alloc->free_buffers);
+ binder_delete_free_buffer(alloc, next);
+ }
+ }
+ if (alloc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = list_entry(buffer->entry.prev,
+ struct binder_buffer, entry);
+
+ if (prev->free) {
+ binder_delete_free_buffer(alloc, buffer);
+ rb_erase(&prev->rb_node, &alloc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(alloc, buffer);
+}
+
+/**
+ * binder_alloc_free_buf() - free a binder buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: kernel pointer to buffer
+ *
+ * Free the buffer allocated via binder_alloc_new_buffer()
+ */
+void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ mutex_lock(&alloc->mutex);
+ binder_free_buf_locked(alloc, buffer);
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_mmap_handler() - map virtual address space for proc
+ * @alloc: alloc structure for this proc
+ * @vma: vma passed to mmap()
+ *
+ * Called by binder_mmap() to initialize the space specified in
+ * vma for allocating binder buffers
+ *
+ * Return:
+ * 0 = success
+ * -EBUSY = address space already mapped
+ * -ENOMEM = failed to map memory to given address space
+ */
+int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ mutex_lock(&binder_alloc_mmap_lock);
+ if (alloc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ alloc->buffer = area->addr;
+ alloc->user_buffer_offset =
+ vma->vm_start - (uintptr_t)alloc->buffer;
+ mutex_unlock(&binder_alloc_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR(
+ (vma->vm_start ^ (uint32_t)alloc->buffer))) {
+ pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n",
+ alloc->pid, vma->vm_start, vma->vm_end,
+ alloc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
+ ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+ GFP_KERNEL);
+ if (alloc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ alloc->buffer_size = vma->vm_end - vma->vm_start;
+
+ if (binder_update_page_range(alloc, 1, alloc->buffer,
+ alloc->buffer + PAGE_SIZE, vma)) {
+ ret = -ENOMEM;
+ failure_string = "alloc small buf";
+ goto err_alloc_small_buf_failed;
+ }
+ buffer = alloc->buffer;
+ INIT_LIST_HEAD(&alloc->buffers);
+ list_add(&buffer->entry, &alloc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(alloc, buffer);
+ alloc->free_async_space = alloc->buffer_size / 2;
+ barrier();
+ alloc->vma = vma;
+ alloc->vma_vm_mm = vma->vm_mm;
+
+ return 0;
+
+err_alloc_small_buf_failed:
+ kfree(alloc->pages);
+ alloc->pages = NULL;
+err_alloc_pages_failed:
+ mutex_lock(&binder_alloc_mmap_lock);
+ vfree(alloc->buffer);
+ alloc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+ mutex_unlock(&binder_alloc_mmap_lock);
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+ alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+
+void binder_alloc_deferred_release(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int buffers, page_count;
+
+ BUG_ON(alloc->vma);
+
+ buffers = 0;
+ mutex_lock(&alloc->mutex);
+ while ((n = rb_first(&alloc->allocated_buffers))) {
+ struct binder_buffer *buffer;
+
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+
+ /* Transaction should already have been freed */
+ BUG_ON(buffer->transaction);
+
+ binder_free_buf_locked(alloc, buffer);
+ buffers++;
+ }
+
+ page_count = 0;
+ if (alloc->pages) {
+ int i;
+
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ void *page_addr;
+
+ if (!alloc->pages[i])
+ continue;
+
+ page_addr = alloc->buffer + i * PAGE_SIZE;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%s: %d: page %d at %pK not freed\n",
+ __func__, alloc->pid, i, page_addr);
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+ __free_page(alloc->pages[i]);
+ page_count++;
+ }
+ kfree(alloc->pages);
+ vfree(alloc->buffer);
+ }
+ mutex_unlock(&alloc->mutex);
+
+ binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "%s: %d buffers %d, pages %d\n",
+ __func__, alloc->pid, buffers, page_count);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->extra_buffers_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+/**
+ * binder_alloc_print_allocated() - print buffer info
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ *
+ * Prints information about every buffer associated with
+ * the binder_alloc state to the given seq_file
+ */
+void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_get_allocated_count() - return count of buffers
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: count of allocated buffers
+ */
+int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int count = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ mutex_unlock(&alloc->mutex);
+ return count;
+}
+
+
+/**
+ * binder_alloc_vma_close() - invalidate address space
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_vma_close() when releasing address space.
+ * Clears alloc->vma to prevent new incoming transactions from
+ * allocating more buffers.
+ */
+void binder_alloc_vma_close(struct binder_alloc *alloc)
+{
+ WRITE_ONCE(alloc->vma, NULL);
+ WRITE_ONCE(alloc->vma_vm_mm, NULL);
+}
+
+/**
+ * binder_alloc_init() - called by binder_open() for per-proc initialization
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_open() to initialize binder_alloc fields for
+ * new binder proc
+ */
+void binder_alloc_init(struct binder_alloc *alloc)
+{
+ alloc->tsk = current->group_leader;
+ alloc->pid = current->group_leader->pid;
+ mutex_init(&alloc->mutex);
+}
+