diff options
| author | Sunil Khatri <sunilkh@codeaurora.org> | 2017-09-04 14:05:03 +0530 |
|---|---|---|
| committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2017-09-07 05:18:38 -0700 |
| commit | 8b6bbeab08fc2aec26387806fe4b27e95bb27cbc (patch) | |
| tree | 8dc8bb09e12cc0880870eb550bacc2249c7dfb7a /drivers/gpu | |
| parent | b864286bdb88dda9170c9d088bc9bee1eb0cfd34 (diff) | |
msm: kgsl: Protect the bind object against bad dereference
Sparse bind object are unbinded with bind id. This
can be exploited by any malicious application calling
unbind with same bind id again and again.
This can lead to a race condition between two threads
where one free the bind object and second thread uses
same object leading to bad dereference.
Change-Id: I542a008fc647489560667fb5016453a0c3448f7a
Signed-off-by: Sunil Khatri <sunilkh@codeaurora.org>
Diffstat (limited to 'drivers/gpu')
| -rw-r--r-- | drivers/gpu/msm/kgsl.c | 58 |
1 files changed, 32 insertions, 26 deletions
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index afb489f10172..4666d5e41d6c 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -3451,7 +3451,7 @@ static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry, struct sparse_bind_object *new; struct rb_node **node, *parent = NULL; - new = kzalloc(sizeof(*new), GFP_KERNEL); + new = kzalloc(sizeof(*new), GFP_ATOMIC); if (new == NULL) return -ENOMEM; @@ -3485,7 +3485,6 @@ static int _sparse_rm_from_bind_tree(struct kgsl_mem_entry *entry, struct sparse_bind_object *obj, uint64_t v_offset, uint64_t size) { - spin_lock(&entry->bind_lock); if (v_offset == obj->v_off && size >= obj->size) { /* * We are all encompassing, remove the entry and free @@ -3518,7 +3517,6 @@ static int _sparse_rm_from_bind_tree(struct kgsl_mem_entry *entry, obj->size = v_offset - obj->v_off; - spin_unlock(&entry->bind_lock); ret = _sparse_add_to_bind_tree(entry, v_offset + size, obj->p_memdesc, obj->p_off + (v_offset - obj->v_off) + size, @@ -3528,11 +3526,10 @@ static int _sparse_rm_from_bind_tree(struct kgsl_mem_entry *entry, return ret; } - spin_unlock(&entry->bind_lock); - return 0; } +/* entry->bind_lock must be held by the caller */ static struct sparse_bind_object *_find_containing_bind_obj( struct kgsl_mem_entry *entry, uint64_t offset, uint64_t size) @@ -3540,8 +3537,6 @@ static struct sparse_bind_object *_find_containing_bind_obj( struct sparse_bind_object *obj = NULL; struct rb_node *node = entry->bind_tree.rb_node; - spin_lock(&entry->bind_lock); - while (node != NULL) { obj = rb_entry(node, struct sparse_bind_object, node); @@ -3560,33 +3555,16 @@ static struct sparse_bind_object *_find_containing_bind_obj( } } - spin_unlock(&entry->bind_lock); - return obj; } +/* entry->bind_lock must be held by the caller */ static int _sparse_unbind(struct kgsl_mem_entry *entry, struct sparse_bind_object *bind_obj, uint64_t offset, uint64_t size) { - struct kgsl_memdesc *memdesc = bind_obj->p_memdesc; - struct kgsl_pagetable *pt = memdesc->pagetable; int ret; - if (memdesc->cur_bindings < (size / PAGE_SIZE)) - return -EINVAL; - - memdesc->cur_bindings -= size / PAGE_SIZE; - - ret = kgsl_mmu_unmap_offset(pt, memdesc, - entry->memdesc.gpuaddr, offset, size); - if (ret) - return ret; - - ret = kgsl_mmu_sparse_dummy_map(pt, &entry->memdesc, offset, size); - if (ret) - return ret; - ret = _sparse_rm_from_bind_tree(entry, bind_obj, offset, size); if (ret == 0) { atomic_long_sub(size, &kgsl_driver.stats.mapped); @@ -3600,6 +3578,8 @@ static long sparse_unbind_range(struct kgsl_sparse_binding_object *obj, struct kgsl_mem_entry *virt_entry) { struct sparse_bind_object *bind_obj; + struct kgsl_memdesc *memdesc; + struct kgsl_pagetable *pt; int ret = 0; uint64_t size = obj->size; uint64_t tmp_size = obj->size; @@ -3607,9 +3587,14 @@ static long sparse_unbind_range(struct kgsl_sparse_binding_object *obj, while (size > 0 && ret == 0) { tmp_size = size; + + spin_lock(&virt_entry->bind_lock); bind_obj = _find_containing_bind_obj(virt_entry, offset, size); - if (bind_obj == NULL) + + if (bind_obj == NULL) { + spin_unlock(&virt_entry->bind_lock); return 0; + } if (bind_obj->v_off > offset) { tmp_size = size - bind_obj->v_off - offset; @@ -3626,7 +3611,28 @@ static long sparse_unbind_range(struct kgsl_sparse_binding_object *obj, tmp_size = bind_obj->size; } + memdesc = bind_obj->p_memdesc; + pt = memdesc->pagetable; + + if (memdesc->cur_bindings < (tmp_size / PAGE_SIZE)) { + spin_unlock(&virt_entry->bind_lock); + return -EINVAL; + } + + memdesc->cur_bindings -= tmp_size / PAGE_SIZE; + ret = _sparse_unbind(virt_entry, bind_obj, offset, tmp_size); + spin_unlock(&virt_entry->bind_lock); + + ret = kgsl_mmu_unmap_offset(pt, memdesc, + virt_entry->memdesc.gpuaddr, offset, tmp_size); + if (ret) + return ret; + + ret = kgsl_mmu_sparse_dummy_map(pt, memdesc, offset, tmp_size); + if (ret) + return ret; + if (ret == 0) { offset += tmp_size; size -= tmp_size; |
