diff options
Diffstat (limited to 'drivers')
197 files changed, 11246 insertions, 3145 deletions
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index a82fc022d34b..4d4cdc1a6e25 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -22,7 +22,7 @@ config ANDROID_BINDER_IPC config ANDROID_BINDER_DEVICES string "Android Binder devices" depends on ANDROID_BINDER_IPC - default "binder" + default "binder,hwbinder,vndbinder" ---help--- Default value for the binder.devices parameter. diff --git a/drivers/android/binder.c b/drivers/android/binder.c index d0334e50f2f9..d1490be45c67 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -18,6 +18,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <asm/cacheflush.h> +#include <linux/atomic.h> #include <linux/fdtable.h> #include <linux/file.h> #include <linux/freezer.h> @@ -46,19 +47,11 @@ #include <uapi/linux/android/binder.h> #include "binder_trace.h" -static DEFINE_MUTEX(binder_main_lock); -static DEFINE_MUTEX(binder_deferred_lock); -static DEFINE_MUTEX(binder_mmap_lock); - static HLIST_HEAD(binder_devices); -static HLIST_HEAD(binder_procs); -static HLIST_HEAD(binder_deferred_list); -static HLIST_HEAD(binder_dead_nodes); static struct dentry *binder_debugfs_dir_entry_root; static struct dentry *binder_debugfs_dir_entry_proc; -static int binder_last_id; -static struct workqueue_struct *binder_deferred_workqueue; +atomic_t binder_last_id; #define BINDER_DEBUG_ENTRY(name) \ static int binder_##name##_open(struct inode *inode, struct file *file) \ @@ -173,20 +166,24 @@ enum binder_stat_types { struct binder_stats { int br[_IOC_NR(BR_FAILED_REPLY) + 1]; int bc[_IOC_NR(BC_REPLY_SG) + 1]; - int obj_created[BINDER_STAT_COUNT]; - int obj_deleted[BINDER_STAT_COUNT]; }; -static struct binder_stats binder_stats; +/* These are still global, since it's not always easy to get the context */ +struct binder_obj_stats { + atomic_t obj_created[BINDER_STAT_COUNT]; + atomic_t obj_deleted[BINDER_STAT_COUNT]; +}; + +static struct binder_obj_stats binder_obj_stats; static inline void binder_stats_deleted(enum binder_stat_types type) { - binder_stats.obj_deleted[type]++; + atomic_inc(&binder_obj_stats.obj_deleted[type]); } static inline void binder_stats_created(enum binder_stat_types type) { - binder_stats.obj_created[type]++; + atomic_inc(&binder_obj_stats.obj_created[type]); } struct binder_transaction_log_entry { @@ -207,8 +204,6 @@ struct binder_transaction_log { int full; struct binder_transaction_log_entry entry[32]; }; -static struct binder_transaction_log binder_transaction_log; -static struct binder_transaction_log binder_transaction_log_failed; static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_transaction_log *log) @@ -229,6 +224,21 @@ struct binder_context { struct binder_node *binder_context_mgr_node; kuid_t binder_context_mgr_uid; const char *name; + + struct mutex binder_main_lock; + struct mutex binder_deferred_lock; + struct mutex binder_mmap_lock; + + struct hlist_head binder_procs; + struct hlist_head binder_dead_nodes; + struct hlist_head binder_deferred_list; + + struct work_struct deferred_work; + struct workqueue_struct *binder_deferred_workqueue; + struct binder_transaction_log transaction_log; + struct binder_transaction_log transaction_log_failed; + + struct binder_stats binder_stats; }; struct binder_device { @@ -459,18 +469,19 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) return retval; } -static inline void binder_lock(const char *tag) +static inline void binder_lock(struct binder_context *context, const char *tag) { trace_binder_lock(tag); - mutex_lock(&binder_main_lock); + mutex_lock(&context->binder_main_lock); preempt_disable(); trace_binder_locked(tag); } -static inline void binder_unlock(const char *tag) +static inline void binder_unlock(struct binder_context *context, + const char *tag) { trace_binder_unlock(tag); - mutex_unlock(&binder_main_lock); + mutex_unlock(&context->binder_main_lock); preempt_enable(); } @@ -1017,7 +1028,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc, binder_stats_created(BINDER_STAT_NODE); rb_link_node(&node->rb_node, parent, p); rb_insert_color(&node->rb_node, &proc->nodes); - node->debug_id = ++binder_last_id; + node->debug_id = atomic_inc_return(&binder_last_id); node->proc = proc; node->ptr = ptr; node->cookie = cookie; @@ -1159,7 +1170,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, if (new_ref == NULL) return NULL; binder_stats_created(BINDER_STAT_REF); - new_ref->debug_id = ++binder_last_id; + new_ref->debug_id = atomic_inc_return(&binder_last_id); new_ref->proc = proc; new_ref->node = node; rb_link_node(&new_ref->rb_node_node, parent, p); @@ -1920,7 +1931,7 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t last_fixup_min_off = 0; struct binder_context *context = proc->context; - e = binder_transaction_log_add(&binder_transaction_log); + e = binder_transaction_log_add(&context->transaction_log); e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); e->from_proc = proc->pid; e->from_thread = thread->pid; @@ -2042,7 +2053,7 @@ static void binder_transaction(struct binder_proc *proc, } binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); - t->debug_id = ++binder_last_id; + t->debug_id = atomic_inc_return(&binder_last_id); e->debug_id = t->debug_id; if (reply) @@ -2315,7 +2326,8 @@ err_no_context_mgr_node: { struct binder_transaction_log_entry *fe; - fe = binder_transaction_log_add(&binder_transaction_log_failed); + fe = binder_transaction_log_add( + &context->transaction_log_failed); *fe = *e; } @@ -2343,8 +2355,8 @@ static int binder_thread_write(struct binder_proc *proc, return -EFAULT; ptr += sizeof(uint32_t); trace_binder_command(cmd); - if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { - binder_stats.bc[_IOC_NR(cmd)]++; + if (_IOC_NR(cmd) < ARRAY_SIZE(context->binder_stats.bc)) { + context->binder_stats.bc[_IOC_NR(cmd)]++; proc->stats.bc[_IOC_NR(cmd)]++; thread->stats.bc[_IOC_NR(cmd)]++; } @@ -2710,8 +2722,8 @@ static void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd) { trace_binder_return(cmd); - if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { - binder_stats.br[_IOC_NR(cmd)]++; + if (_IOC_NR(cmd) < ARRAY_SIZE(proc->stats.br)) { + proc->context->binder_stats.br[_IOC_NR(cmd)]++; proc->stats.br[_IOC_NR(cmd)]++; thread->stats.br[_IOC_NR(cmd)]++; } @@ -2775,7 +2787,7 @@ retry: if (wait_for_proc_work) proc->ready_threads++; - binder_unlock(__func__); + binder_unlock(proc->context, __func__); trace_binder_wait_for_work(wait_for_proc_work, !!thread->transaction_stack, @@ -2802,7 +2814,7 @@ retry: ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); } - binder_lock(__func__); + binder_lock(proc->context, __func__); if (wait_for_proc_work) proc->ready_threads--; @@ -3188,14 +3200,14 @@ static unsigned int binder_poll(struct file *filp, struct binder_thread *thread = NULL; int wait_for_proc_work; - binder_lock(__func__); + binder_lock(proc->context, __func__); thread = binder_get_thread(proc); wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo) && thread->return_error == BR_OK; - binder_unlock(__func__); + binder_unlock(proc->context, __func__); if (wait_for_proc_work) { if (binder_has_proc_work(proc, thread)) @@ -3322,6 +3334,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; struct binder_proc *proc = filp->private_data; + struct binder_context *context = proc->context; struct binder_thread *thread; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; @@ -3335,7 +3348,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (ret) goto err_unlocked; - binder_lock(__func__); + binder_lock(context, __func__); thread = binder_get_thread(proc); if (thread == NULL) { ret = -ENOMEM; @@ -3386,7 +3399,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) err: if (thread) thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; - binder_unlock(__func__); + binder_unlock(context, __func__); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); @@ -3440,7 +3453,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) const char *failure_string; struct binder_buffer *buffer; - if (proc->tsk != current) + if (proc->tsk != current->group_leader) return -EINVAL; if ((vma->vm_end - vma->vm_start) > SZ_4M) @@ -3459,7 +3472,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) } vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; - mutex_lock(&binder_mmap_lock); + mutex_lock(&proc->context->binder_mmap_lock); if (proc->buffer) { ret = -EBUSY; failure_string = "already mapped"; @@ -3474,7 +3487,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) } proc->buffer = area->addr; proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; - mutex_unlock(&binder_mmap_lock); + mutex_unlock(&proc->context->binder_mmap_lock); #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { @@ -3523,12 +3536,12 @@ err_alloc_small_buf_failed: kfree(proc->pages); proc->pages = NULL; err_alloc_pages_failed: - mutex_lock(&binder_mmap_lock); + mutex_lock(&proc->context->binder_mmap_lock); vfree(proc->buffer); proc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: - mutex_unlock(&binder_mmap_lock); + mutex_unlock(&proc->context->binder_mmap_lock); err_bad_arg: pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); @@ -3546,8 +3559,8 @@ static int binder_open(struct inode *nodp, struct file *filp) proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; - get_task_struct(current); - proc->tsk = current; + get_task_struct(current->group_leader); + proc->tsk = current->group_leader; INIT_LIST_HEAD(&proc->todo); init_waitqueue_head(&proc->wait); proc->default_priority = task_nice(current); @@ -3555,15 +3568,15 @@ static int binder_open(struct inode *nodp, struct file *filp) miscdev); proc->context = &binder_dev->context; - binder_lock(__func__); + binder_lock(proc->context, __func__); binder_stats_created(BINDER_STAT_PROC); - hlist_add_head(&proc->proc_node, &binder_procs); + hlist_add_head(&proc->proc_node, &proc->context->binder_procs); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); filp->private_data = proc; - binder_unlock(__func__); + binder_unlock(proc->context, __func__); if (binder_debugfs_dir_entry_proc) { char strbuf[11]; @@ -3628,6 +3641,7 @@ static int binder_release(struct inode *nodp, struct file *filp) static int binder_node_release(struct binder_node *node, int refs) { struct binder_ref *ref; + struct binder_context *context = node->proc->context; int death = 0; list_del_init(&node->work.entry); @@ -3643,7 +3657,7 @@ static int binder_node_release(struct binder_node *node, int refs) node->proc = NULL; node->local_strong_refs = 0; node->local_weak_refs = 0; - hlist_add_head(&node->dead_node, &binder_dead_nodes); + hlist_add_head(&node->dead_node, &context->binder_dead_nodes); hlist_for_each_entry(ref, &node->refs, node_entry) { refs++; @@ -3708,7 +3722,8 @@ static void binder_deferred_release(struct binder_proc *proc) node = rb_entry(n, struct binder_node, rb_node); nodes++; rb_erase(&node->rb_node, &proc->nodes); - incoming_refs = binder_node_release(node, incoming_refs); + incoming_refs = binder_node_release(node, + incoming_refs); } outgoing_refs = 0; @@ -3780,18 +3795,20 @@ static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; struct files_struct *files; + struct binder_context *context = + container_of(work, struct binder_context, deferred_work); int defer; do { trace_binder_lock(__func__); - mutex_lock(&binder_main_lock); + mutex_lock(&context->binder_main_lock); trace_binder_locked(__func__); - mutex_lock(&binder_deferred_lock); + mutex_lock(&context->binder_deferred_lock); preempt_disable(); - if (!hlist_empty(&binder_deferred_list)) { - proc = hlist_entry(binder_deferred_list.first, + if (!hlist_empty(&context->binder_deferred_list)) { + proc = hlist_entry(context->binder_deferred_list.first, struct binder_proc, deferred_work_node); hlist_del_init(&proc->deferred_work_node); defer = proc->deferred_work; @@ -3800,7 +3817,7 @@ static void binder_deferred_func(struct work_struct *work) proc = NULL; defer = 0; } - mutex_unlock(&binder_deferred_lock); + mutex_unlock(&context->binder_deferred_lock); files = NULL; if (defer & BINDER_DEFERRED_PUT_FILES) { @@ -3816,25 +3833,25 @@ static void binder_deferred_func(struct work_struct *work) binder_deferred_release(proc); /* frees proc */ trace_binder_unlock(__func__); - mutex_unlock(&binder_main_lock); + mutex_unlock(&context->binder_main_lock); preempt_enable_no_resched(); if (files) put_files_struct(files); } while (proc); } -static DECLARE_WORK(binder_deferred_work, binder_deferred_func); static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) { - mutex_lock(&binder_deferred_lock); + mutex_lock(&proc->context->binder_deferred_lock); proc->deferred_work |= defer; if (hlist_unhashed(&proc->deferred_work_node)) { hlist_add_head(&proc->deferred_work_node, - &binder_deferred_list); - queue_work(binder_deferred_workqueue, &binder_deferred_work); + &proc->context->binder_deferred_list); + queue_work(proc->context->binder_deferred_workqueue, + &proc->context->deferred_work); } - mutex_unlock(&binder_deferred_lock); + mutex_unlock(&proc->context->binder_deferred_lock); } static void print_binder_transaction(struct seq_file *m, const char *prefix, @@ -4065,8 +4082,20 @@ static const char * const binder_objstat_strings[] = { "transaction_complete" }; +static void add_binder_stats(struct binder_stats *from, struct binder_stats *to) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(to->bc); i++) + to->bc[i] += from->bc[i]; + + for (i = 0; i < ARRAY_SIZE(to->br); i++) + to->br[i] += from->br[i]; +} + static void print_binder_stats(struct seq_file *m, const char *prefix, - struct binder_stats *stats) + struct binder_stats *stats, + struct binder_obj_stats *obj_stats) { int i; @@ -4086,16 +4115,21 @@ static void print_binder_stats(struct seq_file *m, const char *prefix, binder_return_strings[i], stats->br[i]); } - BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != + if (!obj_stats) + return; + + BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) != ARRAY_SIZE(binder_objstat_strings)); - BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != - ARRAY_SIZE(stats->obj_deleted)); - for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { - if (stats->obj_created[i] || stats->obj_deleted[i]) + BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) != + ARRAY_SIZE(obj_stats->obj_deleted)); + for (i = 0; i < ARRAY_SIZE(obj_stats->obj_created); i++) { + int obj_created = atomic_read(&obj_stats->obj_created[i]); + int obj_deleted = atomic_read(&obj_stats->obj_deleted[i]); + + if (obj_created || obj_deleted) seq_printf(m, "%s%s: active %d total %d\n", prefix, - binder_objstat_strings[i], - stats->obj_created[i] - stats->obj_deleted[i], - stats->obj_created[i]); + binder_objstat_strings[i], + obj_created - obj_deleted, obj_created); } } @@ -4150,85 +4184,131 @@ static void print_binder_proc_stats(struct seq_file *m, } seq_printf(m, " pending transactions: %d\n", count); - print_binder_stats(m, " ", &proc->stats); + print_binder_stats(m, " ", &proc->stats, NULL); } static int binder_state_show(struct seq_file *m, void *unused) { + struct binder_device *device; + struct binder_context *context; struct binder_proc *proc; struct binder_node *node; int do_lock = !binder_debug_no_lock; - - if (do_lock) - binder_lock(__func__); + bool wrote_dead_nodes_header = false; seq_puts(m, "binder state:\n"); - if (!hlist_empty(&binder_dead_nodes)) - seq_puts(m, "dead nodes:\n"); - hlist_for_each_entry(node, &binder_dead_nodes, dead_node) - print_binder_node(m, node); + hlist_for_each_entry(device, &binder_devices, hlist) { + context = &device->context; + if (do_lock) + binder_lock(context, __func__); + if (!wrote_dead_nodes_header && + !hlist_empty(&context->binder_dead_nodes)) { + seq_puts(m, "dead nodes:\n"); + wrote_dead_nodes_header = true; + } + hlist_for_each_entry(node, &context->binder_dead_nodes, + dead_node) + print_binder_node(m, node); + + if (do_lock) + binder_unlock(context, __func__); + } - hlist_for_each_entry(proc, &binder_procs, proc_node) - print_binder_proc(m, proc, 1); - if (do_lock) - binder_unlock(__func__); + hlist_for_each_entry(device, &binder_devices, hlist) { + context = &device->context; + if (do_lock) + binder_lock(context, __func__); + + hlist_for_each_entry(proc, &context->binder_procs, proc_node) + print_binder_proc(m, proc, 1); + if (do_lock) + binder_unlock(context, __func__); + } return 0; } static int binder_stats_show(struct seq_file *m, void *unused) { + struct binder_device *device; + struct binder_context *context; struct binder_proc *proc; + struct binder_stats total_binder_stats; int do_lock = !binder_debug_no_lock; - if (do_lock) - binder_lock(__func__); + memset(&total_binder_stats, 0, sizeof(struct binder_stats)); + + hlist_for_each_entry(device, &binder_devices, hlist) { + context = &device->context; + if (do_lock) + binder_lock(context, __func__); + + add_binder_stats(&context->binder_stats, &total_binder_stats); + + if (do_lock) + binder_unlock(context, __func__); + } seq_puts(m, "binder stats:\n"); + print_binder_stats(m, "", &total_binder_stats, &binder_obj_stats); - print_binder_stats(m, "", &binder_stats); + hlist_for_each_entry(device, &binder_devices, hlist) { + context = &device->context; + if (do_lock) + binder_lock(context, __func__); - hlist_for_each_entry(proc, &binder_procs, proc_node) - print_binder_proc_stats(m, proc); - if (do_lock) - binder_unlock(__func__); + hlist_for_each_entry(proc, &context->binder_procs, proc_node) + print_binder_proc_stats(m, proc); + if (do_lock) + binder_unlock(context, __func__); + } return 0; } static int binder_transactions_show(struct seq_file *m, void *unused) { + struct binder_device *device; + struct binder_context *context; struct binder_proc *proc; int do_lock = !binder_debug_no_lock; - if (do_lock) - binder_lock(__func__); - seq_puts(m, "binder transactions:\n"); - hlist_for_each_entry(proc, &binder_procs, proc_node) - print_binder_proc(m, proc, 0); - if (do_lock) - binder_unlock(__func__); + hlist_for_each_entry(device, &binder_devices, hlist) { + context = &device->context; + if (do_lock) + binder_lock(context, __func__); + + hlist_for_each_entry(proc, &context->binder_procs, proc_node) + print_binder_proc(m, proc, 0); + if (do_lock) + binder_unlock(context, __func__); + } return 0; } static int binder_proc_show(struct seq_file *m, void *unused) { + struct binder_device *device; + struct binder_context *context; struct binder_proc *itr; int pid = (unsigned long)m->private; int do_lock = !binder_debug_no_lock; - if (do_lock) - binder_lock(__func__); + hlist_for_each_entry(device, &binder_devices, hlist) { + context = &device->context; + if (do_lock) + binder_lock(context, __func__); - hlist_for_each_entry(itr, &binder_procs, proc_node) { - if (itr->pid == pid) { - seq_puts(m, "binder proc state:\n"); - print_binder_proc(m, itr, 1); + hlist_for_each_entry(itr, &context->binder_procs, proc_node) { + if (itr->pid == pid) { + seq_puts(m, "binder proc state:\n"); + print_binder_proc(m, itr, 1); + } } + if (do_lock) + binder_unlock(context, __func__); } - if (do_lock) - binder_unlock(__func__); return 0; } @@ -4243,11 +4323,10 @@ static void print_binder_transaction_log_entry(struct seq_file *m, e->to_node, e->target_handle, e->data_size, e->offsets_size); } -static int binder_transaction_log_show(struct seq_file *m, void *unused) +static int print_binder_transaction_log(struct seq_file *m, + struct binder_transaction_log *log) { - struct binder_transaction_log *log = m->private; int i; - if (log->full) { for (i = log->next; i < ARRAY_SIZE(log->entry); i++) print_binder_transaction_log_entry(m, &log->entry[i]); @@ -4257,6 +4336,31 @@ static int binder_transaction_log_show(struct seq_file *m, void *unused) return 0; } +static int binder_transaction_log_show(struct seq_file *m, void *unused) +{ + struct binder_device *device; + struct binder_context *context; + + hlist_for_each_entry(device, &binder_devices, hlist) { + context = &device->context; + print_binder_transaction_log(m, &context->transaction_log); + } + return 0; +} + +static int binder_failed_transaction_log_show(struct seq_file *m, void *unused) +{ + struct binder_device *device; + struct binder_context *context; + + hlist_for_each_entry(device, &binder_devices, hlist) { + context = &device->context; + print_binder_transaction_log(m, + &context->transaction_log_failed); + } + return 0; +} + static const struct file_operations binder_fops = { .owner = THIS_MODULE, .poll = binder_poll, @@ -4272,11 +4376,20 @@ BINDER_DEBUG_ENTRY(state); BINDER_DEBUG_ENTRY(stats); BINDER_DEBUG_ENTRY(transactions); BINDER_DEBUG_ENTRY(transaction_log); +BINDER_DEBUG_ENTRY(failed_transaction_log); + +static void __init free_binder_device(struct binder_device *device) +{ + if (device->context.binder_deferred_workqueue) + destroy_workqueue(device->context.binder_deferred_workqueue); + kfree(device); +} static int __init init_binder_device(const char *name) { int ret; struct binder_device *binder_device; + struct binder_context *context; binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); if (!binder_device) @@ -4286,31 +4399,65 @@ static int __init init_binder_device(const char *name) binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; binder_device->miscdev.name = name; - binder_device->context.binder_context_mgr_uid = INVALID_UID; - binder_device->context.name = name; + context = &binder_device->context; + context->binder_context_mgr_uid = INVALID_UID; + context->name = name; + + mutex_init(&context->binder_main_lock); + mutex_init(&context->binder_deferred_lock); + mutex_init(&context->binder_mmap_lock); + + context->binder_deferred_workqueue = + create_singlethread_workqueue(name); + + if (!context->binder_deferred_workqueue) { + ret = -ENOMEM; + goto err_create_singlethread_workqueue_failed; + } + + INIT_HLIST_HEAD(&context->binder_procs); + INIT_HLIST_HEAD(&context->binder_dead_nodes); + INIT_HLIST_HEAD(&context->binder_deferred_list); + INIT_WORK(&context->deferred_work, binder_deferred_func); ret = misc_register(&binder_device->miscdev); if (ret < 0) { - kfree(binder_device); - return ret; + goto err_misc_register_failed; } hlist_add_head(&binder_device->hlist, &binder_devices); + return ret; + +err_create_singlethread_workqueue_failed: +err_misc_register_failed: + free_binder_device(binder_device); return ret; } static int __init binder_init(void) { - int ret; + int ret = 0; char *device_name, *device_names; struct binder_device *device; struct hlist_node *tmp; - binder_deferred_workqueue = create_singlethread_workqueue("binder"); - if (!binder_deferred_workqueue) + /* + * Copy the module_parameter string, because we don't want to + * tokenize it in-place. + */ + device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); + if (!device_names) return -ENOMEM; + strcpy(device_names, binder_devices_param); + + while ((device_name = strsep(&device_names, ","))) { + ret = init_binder_device(device_name); + if (ret) + goto err_init_binder_device_failed; + } + binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); if (binder_debugfs_dir_entry_root) binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", @@ -4335,30 +4482,13 @@ static int __init binder_init(void) debugfs_create_file("transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, - &binder_transaction_log, + NULL, &binder_transaction_log_fops); debugfs_create_file("failed_transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, - &binder_transaction_log_failed, - &binder_transaction_log_fops); - } - - /* - * Copy the module_parameter string, because we don't want to - * tokenize it in-place. - */ - device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); - if (!device_names) { - ret = -ENOMEM; - goto err_alloc_device_names_failed; - } - strcpy(device_names, binder_devices_param); - - while ((device_name = strsep(&device_names, ","))) { - ret = init_binder_device(device_name); - if (ret) - goto err_init_binder_device_failed; + NULL, + &binder_failed_transaction_log_fops); } return ret; @@ -4367,12 +4497,8 @@ err_init_binder_device_failed: hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { misc_deregister(&device->miscdev); hlist_del(&device->hlist); - kfree(device); + free_binder_device(device); } -err_alloc_device_names_failed: - debugfs_remove_recursive(binder_debugfs_dir_entry_root); - - destroy_workqueue(binder_deferred_workqueue); return ret; } diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index ebbe31fee7ae..4bbe4e5f9a6d 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -592,6 +592,16 @@ config DEVPORT source "drivers/s390/char/Kconfig" +config MSM_SMD_PKT + bool "Enable device interface for some SMD packet ports" + default n + depends on MSM_SMD + help + smd_pkt driver provides the interface for the userspace clients + to communicate over smd via device nodes. This enable the + usersapce clients to read and write to some smd packets channel + for MSM chipset. + config TILE_SROM bool "Character-device access via hypervisor to the Tilera SPI ROM" depends on TILE @@ -607,7 +617,7 @@ source "drivers/char/xillybus/Kconfig" config MSM_ADSPRPC tristate "QTI ADSP RPC driver" - depends on MSM_GLINK + depends on MSM_SMD help Provides a communication mechanism that allows for clients to make remote method invocations across processor boundary to diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 7b0bd5408324..77697b8c42c0 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o obj-$(CONFIG_RAW_DRIVER) += raw.o obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o +obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o obj-$(CONFIG_MSPEC) += mspec.o obj-$(CONFIG_MMTIMER) += mmtimer.o obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index e0106a7e31fa..10c4d8ce2410 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -25,6 +25,7 @@ #include <linux/hash.h> #include <linux/msm_ion.h> #include <soc/qcom/secure_buffer.h> +#include <soc/qcom/smd.h> #include <soc/qcom/glink.h> #include <soc/qcom/subsystem_notif.h> #include <soc/qcom/subsystem_restart.h> @@ -213,6 +214,7 @@ struct fastrpc_channel_ctx { struct completion work; struct notifier_block nb; struct kref kref; + int channel; int sesscount; int ssrcount; void *handle; @@ -238,6 +240,7 @@ struct fastrpc_apps { spinlock_t hlock; struct ion_client *client; struct device *dev; + bool glink; }; struct fastrpc_mmap { @@ -298,18 +301,21 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = { { .name = "adsprpc-smd", .subsys = "adsp", + .channel = SMD_APPS_QDSP, .link.link_info.edge = "lpass", .link.link_info.transport = "smem", }, { .name = "mdsprpc-smd", .subsys = "modem", + .channel = SMD_APPS_MODEM, .link.link_info.edge = "mpss", .link.link_info.transport = "smem", }, { .name = "sdsprpc-smd", .subsys = "slpi", + .channel = SMD_APPS_DSPS, .link.link_info.edge = "dsps", .link.link_info.transport = "smem", .vmid = VMID_SSC_Q6, @@ -553,7 +559,7 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map) if (!IS_ERR_OR_NULL(map->handle)) ion_free(fl->apps->client, map->handle); - if (sess->smmu.enabled) { + if (sess && sess->smmu.enabled) { if (map->size || map->phys) msm_dma_unmap_sg(sess->smmu.dev, map->table->sgl, @@ -645,6 +651,9 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr, else sess = fl->sctx; + VERIFY(err, !IS_ERR_OR_NULL(sess)); + if (err) + goto bail; VERIFY(err, !IS_ERR_OR_NULL(map->buf = dma_buf_get(fd))); if (err) goto bail; @@ -1379,7 +1388,7 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, struct smq_msg *msg = &ctx->msg; struct fastrpc_file *fl = ctx->fl; struct fastrpc_channel_ctx *channel_ctx = &fl->apps->channel[fl->cid]; - int err = 0; + int err = 0, len; VERIFY(err, 0 != channel_ctx->chan); if (err) @@ -1394,21 +1403,64 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx, msg->invoke.page.addr = ctx->buf ? ctx->buf->phys : 0; msg->invoke.page.size = buf_page_size(ctx->used); - if (fl->ssrcount != channel_ctx->ssrcount) { - err = -ECONNRESET; - goto bail; + if (fl->apps->glink) { + if (fl->ssrcount != channel_ctx->ssrcount) { + err = -ECONNRESET; + goto bail; + } + VERIFY(err, channel_ctx->link.port_state == + FASTRPC_LINK_CONNECTED); + if (err) + goto bail; + err = glink_tx(channel_ctx->chan, + (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg), + GLINK_TX_REQ_INTENT); + } else { + spin_lock(&fl->apps->hlock); + len = smd_write((smd_channel_t *) + channel_ctx->chan, + msg, sizeof(*msg)); + spin_unlock(&fl->apps->hlock); + VERIFY(err, len == sizeof(*msg)); } - VERIFY(err, channel_ctx->link.port_state == - FASTRPC_LINK_CONNECTED); - if (err) - goto bail; - err = glink_tx(channel_ctx->chan, - (void *)&fl->apps->channel[fl->cid], msg, sizeof(*msg), - GLINK_TX_REQ_INTENT); bail: return err; } +static void fastrpc_smd_read_handler(int cid) +{ + struct fastrpc_apps *me = &gfa; + struct smq_invoke_rsp rsp = {0}; + int ret = 0; + + do { + ret = smd_read_from_cb(me->channel[cid].chan, &rsp, + sizeof(rsp)); + if (ret != sizeof(rsp)) + break; + rsp.ctx = rsp.ctx & ~1; + context_notify_user(uint64_to_ptr(rsp.ctx), rsp.retval); + } while (ret == sizeof(rsp)); +} + +static void smd_event_handler(void *priv, unsigned event) +{ + struct fastrpc_apps *me = &gfa; + int cid = (int)(uintptr_t)priv; + + switch (event) { + case SMD_EVENT_OPEN: + complete(&me->channel[cid].work); + break; + case SMD_EVENT_CLOSE: + fastrpc_notify_drivers(me, cid); + break; + case SMD_EVENT_DATA: + fastrpc_smd_read_handler(cid); + break; + } +} + static void fastrpc_init(struct fastrpc_apps *me) { int i; @@ -1984,10 +2036,12 @@ static void fastrpc_channel_close(struct kref *kref) ctx = container_of(kref, struct fastrpc_channel_ctx, kref); cid = ctx - &gcinfo[0]; - fastrpc_glink_close(ctx->chan, cid); + if (!me->glink) + smd_close(ctx->chan); + else + fastrpc_glink_close(ctx->chan, cid); + ctx->chan = 0; - glink_unregister_link_state_cb(ctx->link.link_notify_handle); - ctx->link.link_notify_handle = 0; mutex_unlock(&me->smd_mutex); pr_info("'closed /dev/%s c %d %d'\n", gcinfo[cid].name, MAJOR(me->dev_no), cid); @@ -2402,8 +2456,16 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) fl->ssrcount = me->channel[cid].ssrcount; if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) || (me->channel[cid].chan == 0)) { - fastrpc_glink_register(cid, me); - VERIFY(err, 0 == fastrpc_glink_open(cid)); + if (me->glink) { + fastrpc_glink_register(cid, me); + VERIFY(err, 0 == fastrpc_glink_open(cid)); + } else { + VERIFY(err, !smd_named_open_on_edge(FASTRPC_SMD_GUID, + gcinfo[cid].channel, + (smd_channel_t **)&me->channel[cid].chan, + (void *)(uintptr_t)cid, + smd_event_handler)); + } if (err) goto bail; @@ -2416,7 +2478,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) kref_init(&me->channel[cid].kref); pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name, MAJOR(me->dev_no), cid); - if (me->channel[cid].ssrcount != + if (cid == 0 && me->channel[cid].ssrcount != me->channel[cid].prevssrcount) { if (fastrpc_mmap_remove_ssr(fl)) pr_err("ADSPRPC: SSR: Failed to unmap remote heap\n"); @@ -2632,7 +2694,11 @@ static int fastrpc_restart_notifier_cb(struct notifier_block *nb, ctx->ssrcount++; ctx->issubsystemup = 0; if (ctx->chan) { - fastrpc_glink_close(ctx->chan, cid); + if (me->glink) + fastrpc_glink_close(ctx->chan, cid); + else + smd_close(ctx->chan); + ctx->chan = 0; pr_info("'restart notifier: closed /dev/%s c %d %d'\n", gcinfo[cid].name, MAJOR(me->dev_no), cid); @@ -2856,7 +2922,7 @@ static int fastrpc_probe(struct platform_device *pdev) } return 0; } - + me->glink = of_property_read_bool(dev->of_node, "qcom,fastrpc-glink"); VERIFY(err, !of_platform_populate(pdev->dev.of_node, fastrpc_match_table, NULL, &pdev->dev)); diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index 0c958d855f94..3c10462c2274 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -456,8 +456,15 @@ static void diag_send_feature_mask_update(uint8_t peripheral) DIAG_SET_FEATURE_MASK(F_DIAG_REQ_RSP_SUPPORT); if (driver->supports_apps_hdlc_encoding) DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE); - if (driver->supports_apps_header_untagging) - DIAG_SET_FEATURE_MASK(F_DIAG_PKT_HEADER_UNTAG); + if (driver->supports_apps_header_untagging) { + if (peripheral == PERIPHERAL_MODEM || + peripheral == PERIPHERAL_LPASS || + peripheral == PERIPHERAL_CDSP) { + DIAG_SET_FEATURE_MASK(F_DIAG_PKT_HEADER_UNTAG); + driver->peripheral_untag[peripheral] = + ENABLE_PKT_HEADER_UNTAGGING; + } + } DIAG_SET_FEATURE_MASK(F_DIAG_MASK_CENTRALIZATION); if (driver->supports_sockets) DIAG_SET_FEATURE_MASK(F_DIAG_SOCKETS_ENABLED); diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c index dc3029cc459d..bd34e6cceec0 100644 --- a/drivers/char/diag/diag_memorydevice.c +++ b/drivers/char/diag/diag_memorydevice.c @@ -129,6 +129,37 @@ void diag_md_close_all() diag_ws_reset(DIAG_WS_MUX); } +static int diag_md_get_peripheral(int ctxt) +{ + int peripheral; + + if (driver->num_pd_session) { + peripheral = GET_PD_CTXT(ctxt); + switch (peripheral) { + case UPD_WLAN: + case UPD_AUDIO: + case UPD_SENSORS: + break; + case DIAG_ID_MPSS: + case DIAG_ID_LPASS: + case DIAG_ID_CDSP: + default: + peripheral = + GET_BUF_PERIPHERAL(ctxt); + if (peripheral > NUM_PERIPHERALS) + peripheral = -EINVAL; + break; + } + } else { + /* Account for Apps data as well */ + peripheral = GET_BUF_PERIPHERAL(ctxt); + if (peripheral > NUM_PERIPHERALS) + peripheral = -EINVAL; + } + + return peripheral; +} + int diag_md_write(int id, unsigned char *buf, int len, int ctx) { int i; @@ -144,26 +175,13 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx) if (!buf || len < 0) return -EINVAL; - if (driver->pd_logging_mode) { - peripheral = GET_PD_CTXT(ctx); - switch (peripheral) { - case UPD_WLAN: - break; - case DIAG_ID_MPSS: - default: - peripheral = GET_BUF_PERIPHERAL(ctx); - if (peripheral > NUM_PERIPHERALS) - return -EINVAL; - break; - } - } else { - /* Account for Apps data as well */ - peripheral = GET_BUF_PERIPHERAL(ctx); - if (peripheral > NUM_PERIPHERALS) - return -EINVAL; - } + peripheral = + diag_md_get_peripheral(ctx); + if (peripheral < 0) + return -EINVAL; - session_info = diag_md_session_get_peripheral(peripheral); + session_info = + diag_md_session_get_peripheral(peripheral); if (!session_info) return -EIO; @@ -243,31 +261,15 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size, entry = &ch->tbl[j]; if (entry->len <= 0) continue; - if (driver->pd_logging_mode) { - peripheral = GET_PD_CTXT(entry->ctx); - switch (peripheral) { - case UPD_WLAN: - break; - case DIAG_ID_MPSS: - default: - peripheral = - GET_BUF_PERIPHERAL(entry->ctx); - if (peripheral > NUM_PERIPHERALS) - goto drop_data; - break; - } - } else { - /* Account for Apps data as well */ - peripheral = GET_BUF_PERIPHERAL(entry->ctx); - if (peripheral > NUM_PERIPHERALS) - goto drop_data; - } + + peripheral = diag_md_get_peripheral(entry->ctx); + if (peripheral < 0) + goto drop_data; session_info = diag_md_session_get_peripheral(peripheral); if (!session_info) { - mutex_unlock(&driver->diagfwd_untag_mutex); - return -EIO; + goto drop_data; } if (session_info && info && @@ -363,9 +365,15 @@ int diag_md_close_peripheral(int id, uint8_t peripheral) spin_lock_irqsave(&ch->lock, flags); for (i = 0; i < ch->num_tbl_entries && !found; i++) { entry = &ch->tbl[i]; - if ((GET_BUF_PERIPHERAL(entry->ctx) != peripheral) || - (GET_PD_CTXT(entry->ctx) != peripheral)) - continue; + + if (peripheral > NUM_PERIPHERALS) { + if (GET_PD_CTXT(entry->ctx) != peripheral) + continue; + } else { + if (GET_BUF_PERIPHERAL(entry->ctx) != + peripheral) + continue; + } found = 1; if (ch->ops && ch->ops->write_done) { ch->ops->write_done(entry->buf, entry->len, diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c index 55c5de1ea9fc..39f4b08d9b0a 100644 --- a/drivers/char/diag/diag_mux.c +++ b/drivers/char/diag/diag_mux.c @@ -27,7 +27,7 @@ #include "diag_mux.h" #include "diag_usb.h" #include "diag_memorydevice.h" - +#include "diag_ipc_logging.h" struct diag_mux_state_t *diag_mux; static struct diag_logger_t usb_logger; @@ -146,7 +146,15 @@ int diag_mux_write(int proc, unsigned char *buf, int len, int ctx) case DIAG_ID_MPSS: upd = PERIPHERAL_MODEM; break; + case DIAG_ID_LPASS: + upd = PERIPHERAL_LPASS; + break; + case DIAG_ID_CDSP: + upd = PERIPHERAL_CDSP; + break; case UPD_WLAN: + case UPD_AUDIO: + case UPD_SENSORS: break; default: pr_err("diag: invalid pd ctxt= %d\n", upd); diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h index 511b019e33ec..b17538a10ea9 100644 --- a/drivers/char/diag/diagchar.h +++ b/drivers/char/diag/diagchar.h @@ -76,7 +76,9 @@ | DIAG_CON_LPASS | DIAG_CON_WCNSS \ | DIAG_CON_SENSORS | DIAG_CON_WDSP \ | DIAG_CON_CDSP) -#define DIAG_CON_UPD_ALL (DIAG_CON_UPD_WLAN) +#define DIAG_CON_UPD_ALL (DIAG_CON_UPD_WLAN \ + | DIAG_CON_UPD_AUDIO \ + | DIAG_CON_UPD_SENSORS) #define DIAG_STM_MODEM 0x01 #define DIAG_STM_LPASS 0x02 @@ -222,6 +224,10 @@ #define DIAG_ID_APPS 1 #define DIAG_ID_MPSS 2 #define DIAG_ID_WLAN 3 +#define DIAG_ID_LPASS 4 +#define DIAG_ID_CDSP 5 +#define DIAG_ID_AUDIO 6 +#define DIAG_ID_SENSORS 7 /* Number of sessions possible in Memory Device Mode. +1 for Apps data */ #define NUM_MD_SESSIONS (NUM_PERIPHERALS \ @@ -503,6 +509,7 @@ struct diagchar_dev { int supports_separate_cmdrsp; int supports_apps_hdlc_encoding; int supports_apps_header_untagging; + int peripheral_untag[NUM_PERIPHERALS]; int supports_sockets; /* The state requested in the STM command */ int stm_state_requested[NUM_STM_PROCESSORS]; @@ -597,10 +604,15 @@ struct diagchar_dev { int in_busy_dcipktdata; int logging_mode; int logging_mask; - int pd_logging_mode; + int pd_logging_mode[NUM_UPD]; + int pd_session_clear[NUM_UPD]; int num_pd_session; - int cpd_len_1; - int cpd_len_2; + int cpd_len_1[NUM_PERIPHERALS]; + int cpd_len_2[NUM_PERIPHERALS]; + int upd_len_1_a[NUM_PERIPHERALS]; + int upd_len_1_b[NUM_PERIPHERALS]; + int upd_len_2_a; + int upd_len_2_b; int mask_check; uint32_t md_session_mask; uint8_t md_session_mode; diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index 4f56696f52e9..574a13de6a0d 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -397,6 +397,10 @@ static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask) ret |= DIAG_CON_CDSP; if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN)) ret |= DIAG_CON_UPD_WLAN; + if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_AUDIO)) + ret |= DIAG_CON_UPD_AUDIO; + if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_SENSORS)) + ret |= DIAG_CON_UPD_SENSORS; return ret; } int diag_mask_param(void) @@ -426,8 +430,8 @@ void diag_clear_masks(struct diag_md_session_t *info) static void diag_close_logging_process(const int pid) { - int i; - int session_peripheral_mask; + int i, j; + int session_mask; struct diag_md_session_t *session_info = NULL; struct diag_logging_mode_param_t params; @@ -443,27 +447,34 @@ static void diag_close_logging_process(const int pid) mutex_unlock(&driver->diag_maskclear_mutex); mutex_lock(&driver->diagchar_mutex); - session_peripheral_mask = session_info->peripheral_mask; + + session_mask = session_info->peripheral_mask; diag_md_session_close(session_info); - mutex_unlock(&driver->diagchar_mutex); + for (i = 0; i < NUM_MD_SESSIONS; i++) - if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask) + if (MD_PERIPHERAL_MASK(i) & session_mask) diag_mux_close_peripheral(DIAG_LOCAL_PROC, i); params.req_mode = USB_MODE; params.mode_param = 0; params.peripheral_mask = - diag_translate_kernel_to_user_mask(session_peripheral_mask); - if (driver->pd_logging_mode) - params.pd_mask = - diag_translate_kernel_to_user_mask(session_peripheral_mask); - - if (session_peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN)) { - driver->pd_logging_mode--; - driver->num_pd_session--; + diag_translate_kernel_to_user_mask(session_mask); + + for (i = UPD_WLAN; i < NUM_MD_SESSIONS; i++) { + if (session_mask & + MD_PERIPHERAL_MASK(i)) { + j = i - UPD_WLAN; + driver->pd_session_clear[j] = 1; + driver->pd_logging_mode[j] = 0; + driver->num_pd_session -= 1; + params.pd_mask = + diag_translate_kernel_to_user_mask(session_mask); + } else + params.pd_mask = 0; } - mutex_lock(&driver->diagchar_mutex); + diag_switch_logging(¶ms); + mutex_unlock(&driver->diagchar_mutex); } @@ -1562,17 +1573,22 @@ static uint32_t diag_translate_mask(uint32_t peripheral_mask) ret |= (1 << PERIPHERAL_CDSP); if (peripheral_mask & DIAG_CON_UPD_WLAN) ret |= (1 << UPD_WLAN); + if (peripheral_mask & DIAG_CON_UPD_AUDIO) + ret |= (1 << UPD_AUDIO); + if (peripheral_mask & DIAG_CON_UPD_SENSORS) + ret |= (1 << UPD_SENSORS); return ret; } static int diag_switch_logging(struct diag_logging_mode_param_t *param) { - int new_mode; + int new_mode, i; int curr_mode; int err = 0; uint8_t do_switch = 1; uint32_t peripheral_mask = 0; + uint8_t peripheral, upd; if (!param) return -EINVAL; @@ -1583,10 +1599,28 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param) return -EINVAL; } - switch (param->pd_mask) { - case DIAG_CON_UPD_WLAN: - if (driver->md_session_map[PERIPHERAL_MODEM] && - (MD_PERIPHERAL_MASK(PERIPHERAL_MODEM) & + if (param->pd_mask) { + switch (param->pd_mask) { + case DIAG_CON_UPD_WLAN: + peripheral = PERIPHERAL_MODEM; + upd = UPD_WLAN; + break; + case DIAG_CON_UPD_AUDIO: + peripheral = PERIPHERAL_LPASS; + upd = UPD_AUDIO; + break; + case DIAG_CON_UPD_SENSORS: + peripheral = PERIPHERAL_LPASS; + upd = UPD_SENSORS; + break; + default: + DIAG_LOG(DIAG_DEBUG_USERSPACE, + "asking for mode switch with no pd mask set\n"); + return -EINVAL; + } + + if (driver->md_session_map[peripheral] && + (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)) { DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag_fr: User PD is already logging onto active peripheral logging\n"); @@ -1595,15 +1629,16 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param) peripheral_mask = diag_translate_mask(param->pd_mask); param->peripheral_mask = peripheral_mask; - driver->pd_logging_mode++; - driver->num_pd_session++; - break; - - default: + i = upd - UPD_WLAN; + if (!driver->pd_session_clear[i]) { + driver->pd_logging_mode[i] = 1; + driver->num_pd_session += 1; + driver->pd_session_clear[i] = 0; + } + } else { peripheral_mask = diag_translate_mask(param->peripheral_mask); param->peripheral_mask = peripheral_mask; - break; } switch (param->req_mode) { @@ -1945,9 +1980,36 @@ static int diag_ioctl_hdlc_toggle(unsigned long ioarg) return 0; } -static int diag_ioctl_query_pd_logging(unsigned long ioarg) +static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param) { int ret = -EINVAL; + int peripheral; + char *p_str = NULL; + + if (!param) + return -EINVAL; + + if (!param->pd_mask) { + DIAG_LOG(DIAG_DEBUG_USERSPACE, + "query with no pd mask set, returning error\n"); + return -EINVAL; + } + + switch (param->pd_mask) { + case DIAG_CON_UPD_WLAN: + peripheral = PERIPHERAL_MODEM; + p_str = "MODEM"; + break; + case DIAG_CON_UPD_AUDIO: + case DIAG_CON_UPD_SENSORS: + peripheral = PERIPHERAL_LPASS; + p_str = "LPASS"; + break; + default: + DIAG_LOG(DIAG_DEBUG_USERSPACE, + "Invalid pd mask, returning EINVAL\n"); + return -EINVAL; + } DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: %s: Untagging support on APPS is %s\n", __func__, @@ -1955,12 +2017,13 @@ static int diag_ioctl_query_pd_logging(unsigned long ioarg) "present" : "absent")); DIAG_LOG(DIAG_DEBUG_USERSPACE, - "diag: %s: Tagging support on MODEM is %s\n", __func__, - (driver->feature[PERIPHERAL_MODEM].untag_header ? + "diag: %s: Tagging support on %s is %s\n", + __func__, p_str, + (driver->feature[peripheral].untag_header ? "present" : "absent")); if (driver->supports_apps_header_untagging && - driver->feature[PERIPHERAL_MODEM].untag_header) + driver->feature[peripheral].untag_header) ret = 0; return ret; @@ -2206,7 +2269,10 @@ long diagchar_compat_ioctl(struct file *filp, result = diag_ioctl_hdlc_toggle(ioarg); break; case DIAG_IOCTL_QUERY_PD_LOGGING: - result = diag_ioctl_query_pd_logging(ioarg); + if (copy_from_user((void *)&mode_param, (void __user *)ioarg, + sizeof(mode_param))) + return -EFAULT; + result = diag_ioctl_query_pd_logging(&mode_param); break; } return result; @@ -2332,7 +2398,10 @@ long diagchar_ioctl(struct file *filp, result = diag_ioctl_hdlc_toggle(ioarg); break; case DIAG_IOCTL_QUERY_PD_LOGGING: - result = diag_ioctl_query_pd_logging(ioarg); + if (copy_from_user((void *)&mode_param, (void __user *)ioarg, + sizeof(mode_param))) + return -EFAULT; + result = diag_ioctl_query_pd_logging(&mode_param); break; } return result; @@ -3474,7 +3543,10 @@ static int __init diagchar_init(void) poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6)); driver->num_clients = max_clients; driver->logging_mode = DIAG_USB_MODE; - driver->pd_logging_mode = 0; + for (i = 0; i < NUM_UPD; i++) { + driver->pd_logging_mode[i] = 0; + driver->pd_session_clear[i] = 0; + } driver->num_pd_session = 0; driver->mask_check = 0; driver->in_busy_pktdata = 0; diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c index 532d2b149317..8fb724305c03 100644 --- a/drivers/char/diag/diagfwd.c +++ b/drivers/char/diag/diagfwd.c @@ -38,6 +38,7 @@ #include "diag_masks.h" #include "diag_usb.h" #include "diag_mux.h" +#include "diag_ipc_logging.h" #define STM_CMD_VERSION_OFFSET 4 #define STM_CMD_MASK_OFFSET 5 @@ -259,11 +260,17 @@ static void pack_rsp_and_send(unsigned char *buf, int len, } if (info && info->peripheral_mask) { - for (i = 0; i <= NUM_PERIPHERALS; i++) { - if (info->peripheral_mask & (1 << i)) - break; + if (info->peripheral_mask == DIAG_CON_ALL || + (info->peripheral_mask & (1 << APPS_DATA)) || + (info->peripheral_mask & (1 << PERIPHERAL_MODEM))) { + rsp_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1); + } else { + for (i = 0; i <= NUM_PERIPHERALS; i++) { + if (info->peripheral_mask & (1 << i)) + break; + } + rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1); } - rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1); } else rsp_ctxt = driver->rsp_buf_ctxt; @@ -337,11 +344,17 @@ static void encode_rsp_and_send(unsigned char *buf, int len, } if (info && info->peripheral_mask) { - for (i = 0; i <= NUM_PERIPHERALS; i++) { - if (info->peripheral_mask & (1 << i)) - break; + if (info->peripheral_mask == DIAG_CON_ALL || + (info->peripheral_mask & (1 << APPS_DATA)) || + (info->peripheral_mask & (1 << PERIPHERAL_MODEM))) { + rsp_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, 1); + } else { + for (i = 0; i <= NUM_PERIPHERALS; i++) { + if (info->peripheral_mask & (1 << i)) + break; + } + rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1); } - rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1); } else rsp_ctxt = driver->rsp_buf_ctxt; @@ -1588,6 +1601,8 @@ int diagfwd_init(void) driver->supports_separate_cmdrsp = 1; driver->supports_apps_hdlc_encoding = 1; driver->supports_apps_header_untagging = 1; + for (i = 0; i < NUM_PERIPHERALS; i++) + driver->peripheral_untag[i] = 0; mutex_init(&driver->diag_hdlc_mutex); mutex_init(&driver->diag_cntl_mutex); mutex_init(&driver->mode_lock); diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c index ae749725f6db..82a67f1f6f47 100644 --- a/drivers/char/diag/diagfwd_cntl.c +++ b/drivers/char/diag/diagfwd_cntl.c @@ -110,6 +110,8 @@ void diag_notify_md_client(uint8_t peripheral, int data) { int stat = 0; struct siginfo info; + struct pid *pid_struct; + struct task_struct *result; if (peripheral > NUM_PERIPHERALS) return; @@ -122,20 +124,38 @@ void diag_notify_md_client(uint8_t peripheral, int data) info.si_code = SI_QUEUE; info.si_int = (PERIPHERAL_MASK(peripheral) | data); info.si_signo = SIGCONT; - if (driver->md_session_map[peripheral] && - driver->md_session_map[peripheral]->task) { - if (driver->md_session_map[peripheral]-> - md_client_thread_info->task != NULL - && driver->md_session_map[peripheral]->pid == - driver->md_session_map[peripheral]->task->tgid) { + + if (!driver->md_session_map[peripheral] || + driver->md_session_map[peripheral]->pid <= 0) { + pr_err("diag: md_session_map[%d] is invalid\n", peripheral); + mutex_unlock(&driver->md_session_lock); + return; + } + + pid_struct = find_get_pid( + driver->md_session_map[peripheral]->pid); + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "md_session_map[%d] pid = %d task = %pK\n", + peripheral, + driver->md_session_map[peripheral]->pid, + driver->md_session_map[peripheral]->task); + + if (pid_struct) { + result = get_pid_task(pid_struct, PIDTYPE_PID); + + if (!result) { DIAG_LOG(DIAG_DEBUG_PERIPHERALS, - "md_session %d pid = %d, md_session %d task tgid = %d\n", - peripheral, - driver->md_session_map[peripheral]->pid, + "diag: md_session_map[%d] with pid = %d Exited..\n", peripheral, - driver->md_session_map[peripheral]->task->tgid); - stat = send_sig_info(info.si_signo, &info, - driver->md_session_map[peripheral]->task); + driver->md_session_map[peripheral]->pid); + mutex_unlock(&driver->md_session_lock); + return; + } + + if (driver->md_session_map[peripheral] && + driver->md_session_map[peripheral]->task == result) { + stat = send_sig_info(info.si_signo, + &info, result); if (stat) pr_err("diag: Err sending signal to memory device client, signal data: 0x%x, stat: %d\n", info.si_int, stat); diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c index 37f3bd2626c8..2784cf71cc2b 100644 --- a/drivers/char/diag/diagfwd_glink.c +++ b/drivers/char/diag/diagfwd_glink.c @@ -468,7 +468,7 @@ static void diag_glink_connect_work_fn(struct work_struct *work) struct diag_glink_info *glink_info = container_of(work, struct diag_glink_info, connect_work); - if (!glink_info || glink_info->hdl) + if (!glink_info || !glink_info->hdl) return; atomic_set(&glink_info->opened, 1); diagfwd_channel_open(glink_info->fwd_ctxt); @@ -480,7 +480,7 @@ static void diag_glink_remote_disconnect_work_fn(struct work_struct *work) struct diag_glink_info *glink_info = container_of(work, struct diag_glink_info, remote_disconnect_work); - if (!glink_info || glink_info->hdl) + if (!glink_info || !glink_info->hdl) return; atomic_set(&glink_info->opened, 0); diagfwd_channel_close(glink_info->fwd_ctxt); diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c index 55d36abe4679..e86dc8292bf0 100644 --- a/drivers/char/diag/diagfwd_peripheral.c +++ b/drivers/char/diag/diagfwd_peripheral.c @@ -244,9 +244,14 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info, mutex_lock(&driver->hdlc_disable_mutex); mutex_lock(&fwd_info->data_mutex); + peripheral = GET_PD_CTXT(buf->ctxt); if (peripheral == DIAG_ID_MPSS) peripheral = PERIPHERAL_MODEM; + if (peripheral == DIAG_ID_LPASS) + peripheral = PERIPHERAL_LPASS; + if (peripheral == DIAG_ID_CDSP) + peripheral = PERIPHERAL_CDSP; session_info = diag_md_session_get_peripheral(peripheral); @@ -323,15 +328,19 @@ end: static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, unsigned char *buf, int len) { - int len_cpd = 0, len_upd_1 = 0; - int ctxt_cpd = 0, ctxt_upd_1 = 0; + int len_cpd = 0; + int len_upd_1 = 0, len_upd_2 = 0; + int ctxt_cpd = 0; + int ctxt_upd_1 = 0, ctxt_upd_2 = 0; int buf_len = 0, processed = 0; unsigned char *temp_buf_main = NULL; unsigned char *temp_buf_cpd = NULL; unsigned char *temp_buf_upd_1 = NULL; + unsigned char *temp_buf_upd_2 = NULL; struct diagfwd_buf_t *temp_ptr_upd = NULL; struct diagfwd_buf_t *temp_ptr_cpd = NULL; int flag_buf_1 = 0, flag_buf_2 = 0; + uint8_t peripheral; if (!fwd_info || !buf || len <= 0) { diag_ws_release(); @@ -349,23 +358,42 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, diag_ws_release(); return; } + peripheral = fwd_info->peripheral; - if (driver->feature[fwd_info->peripheral].encode_hdlc && - driver->feature[fwd_info->peripheral].untag_header) { + if (driver->feature[peripheral].encode_hdlc && + driver->feature[peripheral].untag_header && + driver->peripheral_untag[peripheral]) { mutex_lock(&driver->diagfwd_untag_mutex); temp_buf_cpd = buf; temp_buf_main = buf; if (fwd_info->buf_1 && fwd_info->buf_1->data_raw == buf) { flag_buf_1 = 1; - if (fwd_info->type == TYPE_DATA) + temp_ptr_cpd = fwd_info->buf_1; + if (fwd_info->type == TYPE_DATA) { temp_buf_upd_1 = fwd_info->buf_upd_1_a->data_raw; - } else { + if (peripheral == + PERIPHERAL_LPASS) + temp_buf_upd_2 = + fwd_info->buf_upd_2_a->data_raw; + } + } else if (fwd_info->buf_2 && + fwd_info->buf_2->data_raw == buf) { flag_buf_2 = 1; + temp_ptr_cpd = fwd_info->buf_2; if (fwd_info->type == TYPE_DATA) temp_buf_upd_1 = fwd_info->buf_upd_1_b->data_raw; + if (peripheral == + PERIPHERAL_LPASS) + temp_buf_upd_2 = + fwd_info->buf_upd_2_b->data_raw; + } else { + pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n", + __func__, buf, peripheral, + fwd_info->type); + goto end; } while (processed < len) { buf_len = @@ -389,31 +417,97 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, temp_buf_upd_1 += buf_len; } break; + case DIAG_ID_LPASS: + ctxt_cpd = DIAG_ID_LPASS; + len_cpd += buf_len; + if (temp_buf_cpd) { + memcpy(temp_buf_cpd, + (temp_buf_main + 4), buf_len); + temp_buf_cpd += buf_len; + } + break; + case DIAG_ID_AUDIO: + ctxt_upd_1 = UPD_AUDIO; + len_upd_1 += buf_len; + if (temp_buf_upd_1) { + memcpy(temp_buf_upd_1, + (temp_buf_main + 4), buf_len); + temp_buf_upd_1 += buf_len; + } + break; + case DIAG_ID_SENSORS: + ctxt_upd_2 = UPD_SENSORS; + len_upd_2 += buf_len; + if (temp_buf_upd_2) { + memcpy(temp_buf_upd_2, + (temp_buf_main + 4), buf_len); + temp_buf_upd_2 += buf_len; + } + break; + case DIAG_ID_CDSP: + ctxt_cpd = DIAG_ID_CDSP; + len_cpd += buf_len; + if (temp_buf_cpd) { + memcpy(temp_buf_cpd, + (temp_buf_main + 4), buf_len); + temp_buf_cpd += buf_len; + } + break; + default: + goto end; } len = len - 4; temp_buf_main += (buf_len + 4); processed += buf_len; } - if (fwd_info->type == TYPE_DATA && len_upd_1) { + if (peripheral == PERIPHERAL_LPASS && + fwd_info->type == TYPE_DATA && len_upd_2) { + if (flag_buf_1) { + driver->upd_len_2_a = len_upd_2; + temp_ptr_upd = fwd_info->buf_upd_2_a; + } else { + driver->upd_len_2_b = len_upd_2; + temp_ptr_upd = fwd_info->buf_upd_2_b; + } + temp_ptr_upd->ctxt &= 0x00FFFFFF; + temp_ptr_upd->ctxt |= + (SET_PD_CTXT(ctxt_upd_2)); + atomic_set(&temp_ptr_upd->in_busy, 1); + diagfwd_data_process_done(fwd_info, + temp_ptr_upd, len_upd_2); + } else { if (flag_buf_1) + driver->upd_len_2_a = 0; + if (flag_buf_2) + driver->upd_len_2_b = 0; + } + if (fwd_info->type == TYPE_DATA && len_upd_1) { + if (flag_buf_1) { + driver->upd_len_1_a[peripheral] = + len_upd_1; temp_ptr_upd = fwd_info->buf_upd_1_a; - else + } else { + driver->upd_len_1_b[peripheral] = + len_upd_1; temp_ptr_upd = fwd_info->buf_upd_1_b; + } temp_ptr_upd->ctxt &= 0x00FFFFFF; temp_ptr_upd->ctxt |= (SET_PD_CTXT(ctxt_upd_1)); atomic_set(&temp_ptr_upd->in_busy, 1); diagfwd_data_process_done(fwd_info, temp_ptr_upd, len_upd_1); + } else { + if (flag_buf_1) + driver->upd_len_1_a[peripheral] = 0; + if (flag_buf_2) + driver->upd_len_1_b[peripheral] = 0; } if (len_cpd) { - if (flag_buf_1) { - driver->cpd_len_1 = len_cpd; - temp_ptr_cpd = fwd_info->buf_1; - } else { - driver->cpd_len_2 = len_cpd; - temp_ptr_cpd = fwd_info->buf_2; - } + if (flag_buf_1) + driver->cpd_len_1[peripheral] = len_cpd; + else + driver->cpd_len_2[peripheral] = len_cpd; temp_ptr_cpd->ctxt &= 0x00FFFFFF; temp_ptr_cpd->ctxt |= (SET_PD_CTXT(ctxt_cpd)); @@ -421,14 +515,24 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info, temp_ptr_cpd, len_cpd); } else { if (flag_buf_1) - driver->cpd_len_1 = 0; + driver->cpd_len_1[peripheral] = 0; if (flag_buf_2) - driver->cpd_len_2 = 0; + driver->cpd_len_2[peripheral] = 0; } mutex_unlock(&driver->diagfwd_untag_mutex); + return; } else { diagfwd_data_read_done(fwd_info, buf, len); + return; + } +end: + diag_ws_release(); + mutex_unlock(&driver->diagfwd_untag_mutex); + if (temp_ptr_cpd) { + diagfwd_write_done(fwd_info->peripheral, fwd_info->type, + GET_BUF_NUM(temp_ptr_cpd->ctxt)); } + diagfwd_queue_read(fwd_info); } static void diagfwd_data_read_done(struct diagfwd_info *fwd_info, @@ -990,10 +1094,20 @@ static void __diag_fwd_open(struct diagfwd_info *fwd_info) if (!fwd_info->inited) return; - if (fwd_info->buf_1) - atomic_set(&fwd_info->buf_1->in_busy, 0); - if (fwd_info->buf_2) - atomic_set(&fwd_info->buf_2->in_busy, 0); + /* + * Logging mode here is reflecting previous mode + * status and will be updated to new mode later. + * + * Keeping the buffers busy for Memory Device Mode. + */ + + if ((driver->logging_mode != DIAG_USB_MODE) || + driver->usb_connected) { + if (fwd_info->buf_1) + atomic_set(&fwd_info->buf_1->in_busy, 0); + if (fwd_info->buf_2) + atomic_set(&fwd_info->buf_2->in_busy, 0); + } if (fwd_info->p_ops && fwd_info->p_ops->open) fwd_info->p_ops->open(fwd_info->ctxt); @@ -1155,18 +1269,78 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt) return; fwd_info = &peripheral_info[type][peripheral]; - if (ctxt == 1 && fwd_info->buf_1) + + if (ctxt == 1 && fwd_info->buf_1) { + /* Buffer 1 for core PD is freed */ atomic_set(&fwd_info->buf_1->in_busy, 0); - else if (ctxt == 2 && fwd_info->buf_2) + driver->cpd_len_1[peripheral] = 0; + } else if (ctxt == 2 && fwd_info->buf_2) { + /* Buffer 2 for core PD is freed */ atomic_set(&fwd_info->buf_2->in_busy, 0); - else if (ctxt == 3 && fwd_info->buf_upd_1_a) { + driver->cpd_len_2[peripheral] = 0; + } else if (ctxt == 3 && fwd_info->buf_upd_1_a) { + /* Buffer 1 for user pd 1 is freed */ atomic_set(&fwd_info->buf_upd_1_a->in_busy, 0); - if (driver->cpd_len_1 == 0) - atomic_set(&fwd_info->buf_1->in_busy, 0); + + if (peripheral == PERIPHERAL_LPASS) { + /* if not data in cpd and other user pd + * free the core pd buffer for LPASS + */ + if (!driver->cpd_len_1[PERIPHERAL_LPASS] && + !driver->upd_len_2_a) + atomic_set(&fwd_info->buf_1->in_busy, 0); + } else { + /* if not data in cpd + * free the core pd buffer for MPSS + */ + if (!driver->cpd_len_1[PERIPHERAL_MODEM]) + atomic_set(&fwd_info->buf_1->in_busy, 0); + } + driver->upd_len_1_a[peripheral] = 0; + } else if (ctxt == 4 && fwd_info->buf_upd_1_b) { + /* Buffer 2 for user pd 1 is freed */ atomic_set(&fwd_info->buf_upd_1_b->in_busy, 0); - if (driver->cpd_len_2 == 0) + if (peripheral == PERIPHERAL_LPASS) { + /* if not data in cpd and other user pd + * free the core pd buffer for LPASS + */ + if (!driver->cpd_len_2[peripheral] && + !driver->upd_len_2_b) + atomic_set(&fwd_info->buf_2->in_busy, 0); + } else { + /* if not data in cpd + * free the core pd buffer for MPSS + */ + if (!driver->cpd_len_2[PERIPHERAL_MODEM]) + atomic_set(&fwd_info->buf_2->in_busy, 0); + } + driver->upd_len_1_b[peripheral] = 0; + + } else if (ctxt == 5 && fwd_info->buf_upd_2_a) { + /* Buffer 1 for user pd 2 is freed */ + atomic_set(&fwd_info->buf_upd_2_a->in_busy, 0); + /* if not data in cpd and other user pd + * free the core pd buffer for LPASS + */ + if (!driver->cpd_len_1[PERIPHERAL_LPASS] && + !driver->upd_len_1_a[PERIPHERAL_LPASS]) + atomic_set(&fwd_info->buf_1->in_busy, 0); + + driver->upd_len_2_a = 0; + + } else if (ctxt == 6 && fwd_info->buf_upd_2_b) { + /* Buffer 2 for user pd 2 is freed */ + atomic_set(&fwd_info->buf_upd_2_b->in_busy, 0); + /* if not data in cpd and other user pd + * free the core pd buffer for LPASS + */ + if (!driver->cpd_len_2[PERIPHERAL_LPASS] && + !driver->upd_len_1_b[PERIPHERAL_LPASS]) atomic_set(&fwd_info->buf_2->in_busy, 0); + + driver->upd_len_2_b = 0; + } else pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt); @@ -1299,7 +1473,8 @@ static void diagfwd_queue_read(struct diagfwd_info *fwd_info) void diagfwd_buffers_init(struct diagfwd_info *fwd_info) { - unsigned char *temp_buf; + struct diagfwd_buf_t *temp_fwd_buf; + unsigned char *temp_char_buf; if (!fwd_info) return; @@ -1311,18 +1486,20 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info) } mutex_lock(&fwd_info->buf_mutex); + if (!fwd_info->buf_1) { fwd_info->buf_1 = kzalloc(sizeof(struct diagfwd_buf_t), GFP_KERNEL); - if (!fwd_info->buf_1) + if (ZERO_OR_NULL_PTR(fwd_info->buf_1)) goto err; kmemleak_not_leak(fwd_info->buf_1); } + if (!fwd_info->buf_1->data) { fwd_info->buf_1->data = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_KERNEL); - if (!fwd_info->buf_1->data) + if (ZERO_OR_NULL_PTR(fwd_info->buf_1->data)) goto err; fwd_info->buf_1->len = PERIPHERAL_BUF_SZ; kmemleak_not_leak(fwd_info->buf_1->data); @@ -1334,7 +1511,7 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info) if (!fwd_info->buf_2) { fwd_info->buf_2 = kzalloc(sizeof(struct diagfwd_buf_t), GFP_KERNEL); - if (!fwd_info->buf_2) + if (ZERO_OR_NULL_PTR(fwd_info->buf_2)) goto err; kmemleak_not_leak(fwd_info->buf_2); } @@ -1343,7 +1520,7 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info) fwd_info->buf_2->data = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_KERNEL); - if (!fwd_info->buf_2->data) + if (ZERO_OR_NULL_PTR(fwd_info->buf_2->data)) goto err; fwd_info->buf_2->len = PERIPHERAL_BUF_SZ; kmemleak_not_leak(fwd_info->buf_2->data); @@ -1352,52 +1529,117 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info) fwd_info->type, 2); } - if (driver->feature[fwd_info->peripheral].untag_header) { + if (driver->feature[fwd_info->peripheral].untag_header) { if (!fwd_info->buf_upd_1_a) { fwd_info->buf_upd_1_a = kzalloc(sizeof(struct diagfwd_buf_t), GFP_KERNEL); - if (!fwd_info->buf_upd_1_a) + if (ZERO_OR_NULL_PTR(fwd_info->buf_upd_1_a)) goto err; kmemleak_not_leak(fwd_info->buf_upd_1_a); } - if (!fwd_info->buf_upd_1_a->data) { + if (fwd_info->buf_upd_1_a && + !fwd_info->buf_upd_1_a->data) { fwd_info->buf_upd_1_a->data = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_KERNEL); - if (!fwd_info->buf_upd_1_a->data) + temp_char_buf = fwd_info->buf_upd_1_a->data; + if (ZERO_OR_NULL_PTR(temp_char_buf)) goto err; fwd_info->buf_upd_1_a->len = PERIPHERAL_BUF_SZ; - kmemleak_not_leak(fwd_info->buf_upd_1_a->data); + kmemleak_not_leak(temp_char_buf); fwd_info->buf_upd_1_a->ctxt = SET_BUF_CTXT( fwd_info->peripheral, fwd_info->type, 3); } + if (!fwd_info->buf_upd_1_b) { - fwd_info->buf_upd_1_b = + fwd_info->buf_upd_1_b = kzalloc(sizeof(struct diagfwd_buf_t), GFP_KERNEL); - if (!fwd_info->buf_upd_1_b) - goto err; - kmemleak_not_leak(fwd_info->buf_upd_1_b); + if (ZERO_OR_NULL_PTR(fwd_info->buf_upd_1_b)) + goto err; + kmemleak_not_leak(fwd_info->buf_upd_1_b); } - if (!fwd_info->buf_upd_1_b->data) { + if (fwd_info->buf_upd_1_b && + !fwd_info->buf_upd_1_b->data) { fwd_info->buf_upd_1_b->data = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_KERNEL); - if (!fwd_info->buf_upd_1_b->data) + temp_char_buf = + fwd_info->buf_upd_1_b->data; + if (ZERO_OR_NULL_PTR(temp_char_buf)) goto err; fwd_info->buf_upd_1_b->len = PERIPHERAL_BUF_SZ; - kmemleak_not_leak(fwd_info->buf_upd_1_b->data); + kmemleak_not_leak(temp_char_buf); fwd_info->buf_upd_1_b->ctxt = SET_BUF_CTXT( fwd_info->peripheral, fwd_info->type, 4); } + if (fwd_info->peripheral == + PERIPHERAL_LPASS) { + if (!fwd_info->buf_upd_2_a) { + fwd_info->buf_upd_2_a = + kzalloc(sizeof(struct diagfwd_buf_t), + GFP_KERNEL); + temp_fwd_buf = + fwd_info->buf_upd_2_a; + if (ZERO_OR_NULL_PTR(temp_fwd_buf)) + goto err; + kmemleak_not_leak(temp_fwd_buf); + } + + if (!fwd_info->buf_upd_2_a->data) { + fwd_info->buf_upd_2_a->data = + kzalloc(PERIPHERAL_BUF_SZ + + APF_DIAG_PADDING, + GFP_KERNEL); + temp_char_buf = + fwd_info->buf_upd_2_a->data; + if (ZERO_OR_NULL_PTR(temp_char_buf)) + goto err; + fwd_info->buf_upd_2_a->len = + PERIPHERAL_BUF_SZ; + kmemleak_not_leak(temp_char_buf); + fwd_info->buf_upd_2_a->ctxt = + SET_BUF_CTXT( + fwd_info->peripheral, + fwd_info->type, 5); + } + if (!fwd_info->buf_upd_2_b) { + fwd_info->buf_upd_2_b = + kzalloc(sizeof(struct diagfwd_buf_t), + GFP_KERNEL); + temp_fwd_buf = + fwd_info->buf_upd_2_b; + if (ZERO_OR_NULL_PTR(temp_fwd_buf)) + goto err; + kmemleak_not_leak(temp_fwd_buf); + } + + if (!fwd_info->buf_upd_2_b->data) { + fwd_info->buf_upd_2_b->data = + kzalloc(PERIPHERAL_BUF_SZ + + APF_DIAG_PADDING, + GFP_KERNEL); + temp_char_buf = + fwd_info->buf_upd_2_b->data; + if (ZERO_OR_NULL_PTR(temp_char_buf)) + goto err; + fwd_info->buf_upd_2_b->len = + PERIPHERAL_BUF_SZ; + kmemleak_not_leak(temp_char_buf); + fwd_info->buf_upd_2_b->ctxt = + SET_BUF_CTXT( + fwd_info->peripheral, + fwd_info->type, 6); + } + } } if (driver->supports_apps_hdlc_encoding) { @@ -1407,51 +1649,87 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info) kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_KERNEL); - if (!fwd_info->buf_1->data_raw) + temp_char_buf = + fwd_info->buf_1->data_raw; + if (ZERO_OR_NULL_PTR(temp_char_buf)) goto err; fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ; - kmemleak_not_leak(fwd_info->buf_1->data_raw); + kmemleak_not_leak(temp_char_buf); } + if (!fwd_info->buf_2->data_raw) { fwd_info->buf_2->data_raw = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_KERNEL); - if (!fwd_info->buf_2->data_raw) + temp_char_buf = + fwd_info->buf_2->data_raw; + if (ZERO_OR_NULL_PTR(temp_char_buf)) goto err; fwd_info->buf_2->len_raw = PERIPHERAL_BUF_SZ; - kmemleak_not_leak(fwd_info->buf_2->data_raw); + kmemleak_not_leak(temp_char_buf); } if (driver->feature[fwd_info->peripheral]. - untag_header) { - if (!fwd_info->buf_upd_1_a->data_raw) { + untag_header) { + if (fwd_info->buf_upd_1_a && + !fwd_info->buf_upd_1_a->data_raw) { fwd_info->buf_upd_1_a->data_raw = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_KERNEL); - if (!fwd_info->buf_upd_1_a->data_raw) + temp_char_buf = + fwd_info->buf_upd_1_a->data_raw; + if (ZERO_OR_NULL_PTR(temp_char_buf)) goto err; fwd_info->buf_upd_1_a->len_raw = PERIPHERAL_BUF_SZ; - temp_buf = - fwd_info->buf_upd_1_a->data_raw; - kmemleak_not_leak(temp_buf); + kmemleak_not_leak(temp_char_buf); } - if (!fwd_info->buf_upd_1_b->data_raw) { + + if (fwd_info->buf_upd_1_b && + !fwd_info->buf_upd_1_b->data_raw) { fwd_info->buf_upd_1_b->data_raw = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_KERNEL); - if (!fwd_info->buf_upd_1_b->data_raw) + temp_char_buf = + fwd_info->buf_upd_1_b->data_raw; + if (ZERO_OR_NULL_PTR(temp_char_buf)) goto err; fwd_info->buf_upd_1_b->len_raw = PERIPHERAL_BUF_SZ; - temp_buf = - fwd_info->buf_upd_1_b->data_raw; - kmemleak_not_leak(temp_buf); + kmemleak_not_leak(temp_char_buf); + } + if (fwd_info->peripheral == PERIPHERAL_LPASS + && !fwd_info->buf_upd_2_a->data_raw) { + fwd_info->buf_upd_2_a->data_raw = + kzalloc(PERIPHERAL_BUF_SZ + + APF_DIAG_PADDING, + GFP_KERNEL); + temp_char_buf = + fwd_info->buf_upd_2_a->data_raw; + if (ZERO_OR_NULL_PTR(temp_char_buf)) + goto err; + fwd_info->buf_upd_2_a->len_raw = + PERIPHERAL_BUF_SZ; + kmemleak_not_leak(temp_char_buf); + } + if (fwd_info->peripheral == PERIPHERAL_LPASS + && !fwd_info->buf_upd_2_b->data_raw) { + fwd_info->buf_upd_2_b->data_raw = + kzalloc(PERIPHERAL_BUF_SZ + + APF_DIAG_PADDING, + GFP_KERNEL); + temp_char_buf = + fwd_info->buf_upd_2_b->data_raw; + if (ZERO_OR_NULL_PTR(temp_char_buf)) + goto err; + fwd_info->buf_upd_2_b->len_raw = + PERIPHERAL_BUF_SZ; + kmemleak_not_leak(temp_char_buf); } } } @@ -1464,10 +1742,12 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info) fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ + APF_DIAG_PADDING, GFP_KERNEL); - if (!fwd_info->buf_1->data_raw) + temp_char_buf = + fwd_info->buf_1->data_raw; + if (ZERO_OR_NULL_PTR(temp_char_buf)) goto err; fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ; - kmemleak_not_leak(fwd_info->buf_1->data_raw); + kmemleak_not_leak(temp_char_buf); } } @@ -1504,6 +1784,38 @@ static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info) kfree(fwd_info->buf_2); fwd_info->buf_2 = NULL; } + if (fwd_info->buf_upd_1_a) { + kfree(fwd_info->buf_upd_1_a->data); + fwd_info->buf_upd_1_a->data = NULL; + kfree(fwd_info->buf_upd_1_a->data_raw); + fwd_info->buf_upd_1_a->data_raw = NULL; + kfree(fwd_info->buf_upd_1_a); + fwd_info->buf_upd_1_a = NULL; + } + if (fwd_info->buf_upd_1_b) { + kfree(fwd_info->buf_upd_1_b->data); + fwd_info->buf_upd_1_b->data = NULL; + kfree(fwd_info->buf_upd_1_b->data_raw); + fwd_info->buf_upd_1_b->data_raw = NULL; + kfree(fwd_info->buf_upd_1_b); + fwd_info->buf_upd_1_b = NULL; + } + if (fwd_info->buf_upd_2_a) { + kfree(fwd_info->buf_upd_2_a->data); + fwd_info->buf_upd_2_a->data = NULL; + kfree(fwd_info->buf_upd_2_a->data_raw); + fwd_info->buf_upd_2_a->data_raw = NULL; + kfree(fwd_info->buf_upd_2_a); + fwd_info->buf_upd_2_a = NULL; + } + if (fwd_info->buf_upd_2_b) { + kfree(fwd_info->buf_upd_2_b->data); + fwd_info->buf_upd_2_b->data = NULL; + kfree(fwd_info->buf_upd_2_b->data_raw); + fwd_info->buf_upd_2_b->data_raw = NULL; + kfree(fwd_info->buf_upd_2_b); + fwd_info->buf_upd_2_b = NULL; + } mutex_unlock(&fwd_info->buf_mutex); } diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h index f483da81cc96..760f139ff428 100644 --- a/drivers/char/diag/diagfwd_peripheral.h +++ b/drivers/char/diag/diagfwd_peripheral.h @@ -80,6 +80,8 @@ struct diagfwd_info { struct diagfwd_buf_t *buf_2; struct diagfwd_buf_t *buf_upd_1_a; struct diagfwd_buf_t *buf_upd_1_b; + struct diagfwd_buf_t *buf_upd_2_a; + struct diagfwd_buf_t *buf_upd_2_b; struct diagfwd_buf_t *buf_ptr[NUM_WRITE_BUFFERS]; struct diag_peripheral_ops *p_ops; struct diag_channel_ops *c_ops; diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c new file mode 100644 index 000000000000..a61d273bfb65 --- /dev/null +++ b/drivers/char/msm_smd_pkt.c @@ -0,0 +1,1397 @@ +/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +/* + * SMD Packet Driver -- Provides a binary SMD non-muxed packet port + * interface. + */ + +#include <linux/slab.h> +#include <linux/cdev.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/mutex.h> +#include <linux/delay.h> +#include <linux/uaccess.h> +#include <linux/workqueue.h> +#include <linux/platform_device.h> +#include <linux/completion.h> +#include <linux/msm_smd_pkt.h> +#include <linux/poll.h> +#include <soc/qcom/smd.h> +#include <soc/qcom/smsm.h> +#include <soc/qcom/subsystem_restart.h> +#include <asm/ioctls.h> +#include <linux/pm.h> +#include <linux/of.h> +#include <linux/ipc_logging.h> + +#define MODULE_NAME "msm_smdpkt" +#define DEVICE_NAME "smdpkt" +#define WAKEUPSOURCE_TIMEOUT (2000) /* two seconds */ + +struct smd_pkt_dev { + struct list_head dev_list; + char dev_name[SMD_MAX_CH_NAME_LEN]; + char ch_name[SMD_MAX_CH_NAME_LEN]; + uint32_t edge; + + struct cdev cdev; + struct device *devicep; + void *pil; + + struct smd_channel *ch; + struct mutex ch_lock; + struct mutex rx_lock; + struct mutex tx_lock; + wait_queue_head_t ch_read_wait_queue; + wait_queue_head_t ch_write_wait_queue; + wait_queue_head_t ch_opened_wait_queue; + + int i; + int ref_cnt; + + int blocking_write; + int is_open; + int poll_mode; + unsigned ch_size; + uint open_modem_wait; + + int has_reset; + int do_reset_notification; + struct completion ch_allocated; + struct wakeup_source pa_ws; /* Packet Arrival Wakeup Source */ + struct work_struct packet_arrival_work; + spinlock_t pa_spinlock; + int ws_locked; +}; + + +struct smd_pkt_driver { + struct list_head list; + int ref_cnt; + char pdriver_name[SMD_MAX_CH_NAME_LEN]; + struct platform_driver driver; +}; + +static DEFINE_MUTEX(smd_pkt_driver_lock_lha1); +static LIST_HEAD(smd_pkt_driver_list); + +struct class *smd_pkt_classp; +static dev_t smd_pkt_number; +static struct delayed_work loopback_work; +static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp); +static void check_and_wakeup_writer(struct smd_pkt_dev *smd_pkt_devp); +static uint32_t is_modem_smsm_inited(void); + +static DEFINE_MUTEX(smd_pkt_dev_lock_lha1); +static LIST_HEAD(smd_pkt_dev_list); +static int num_smd_pkt_ports; + +#define SMD_PKT_IPC_LOG_PAGE_CNT 2 +static void *smd_pkt_ilctxt; + +static int msm_smd_pkt_debug_mask; +module_param_named(debug_mask, msm_smd_pkt_debug_mask, + int, S_IRUGO | S_IWUSR | S_IWGRP); + +enum { + SMD_PKT_STATUS = 1U << 0, + SMD_PKT_READ = 1U << 1, + SMD_PKT_WRITE = 1U << 2, + SMD_PKT_POLL = 1U << 5, +}; + +#define DEBUG + +#ifdef DEBUG + +#define SMD_PKT_LOG_STRING(x...) \ +do { \ + if (smd_pkt_ilctxt) \ + ipc_log_string(smd_pkt_ilctxt, "<SMD_PKT>: "x); \ +} while (0) + +#define D_STATUS(x...) \ +do { \ + if (msm_smd_pkt_debug_mask & SMD_PKT_STATUS) \ + pr_info("Status: "x); \ + SMD_PKT_LOG_STRING(x); \ +} while (0) + +#define D_READ(x...) \ +do { \ + if (msm_smd_pkt_debug_mask & SMD_PKT_READ) \ + pr_info("Read: "x); \ + SMD_PKT_LOG_STRING(x); \ +} while (0) + +#define D_WRITE(x...) \ +do { \ + if (msm_smd_pkt_debug_mask & SMD_PKT_WRITE) \ + pr_info("Write: "x); \ + SMD_PKT_LOG_STRING(x); \ +} while (0) + +#define D_POLL(x...) \ +do { \ + if (msm_smd_pkt_debug_mask & SMD_PKT_POLL) \ + pr_info("Poll: "x); \ + SMD_PKT_LOG_STRING(x); \ +} while (0) + +#define E_SMD_PKT_SSR(x) \ +do { \ + if (x->do_reset_notification) \ + pr_err("%s notifying reset for smd_pkt_dev id:%d\n", \ + __func__, x->i); \ +} while (0) +#else +#define D_STATUS(x...) do {} while (0) +#define D_READ(x...) do {} while (0) +#define D_WRITE(x...) do {} while (0) +#define D_POLL(x...) do {} while (0) +#define E_SMD_PKT_SSR(x) do {} while (0) +#endif + +static ssize_t open_timeout_store(struct device *d, + struct device_attribute *attr, + const char *buf, + size_t n) +{ + struct smd_pkt_dev *smd_pkt_devp; + unsigned long tmp; + + mutex_lock(&smd_pkt_dev_lock_lha1); + list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) { + if (smd_pkt_devp->devicep == d) { + if (!kstrtoul(buf, 10, &tmp)) { + smd_pkt_devp->open_modem_wait = tmp; + mutex_unlock(&smd_pkt_dev_lock_lha1); + return n; + } + mutex_unlock(&smd_pkt_dev_lock_lha1); + pr_err("%s: unable to convert: %s to an int\n", + __func__, buf); + return -EINVAL; + } + } + mutex_unlock(&smd_pkt_dev_lock_lha1); + + pr_err("%s: unable to match device to valid smd_pkt port\n", __func__); + return -EINVAL; +} + +static ssize_t open_timeout_show(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct smd_pkt_dev *smd_pkt_devp; + + mutex_lock(&smd_pkt_dev_lock_lha1); + list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) { + if (smd_pkt_devp->devicep == d) { + mutex_unlock(&smd_pkt_dev_lock_lha1); + return snprintf(buf, PAGE_SIZE, "%d\n", + smd_pkt_devp->open_modem_wait); + } + } + mutex_unlock(&smd_pkt_dev_lock_lha1); + pr_err("%s: unable to match device to valid smd_pkt port\n", __func__); + return -EINVAL; + +} + +static DEVICE_ATTR(open_timeout, 0664, open_timeout_show, open_timeout_store); + +/** + * loopback_edge_store() - Set the edge type for loopback device + * @d: Linux device structure + * @attr: Device attribute structure + * @buf: Input string + * @n: Length of the input string + * + * This function is used to set the loopback device edge runtime + * by writing to the loopback_edge node. + */ +static ssize_t loopback_edge_store(struct device *d, + struct device_attribute *attr, + const char *buf, + size_t n) +{ + struct smd_pkt_dev *smd_pkt_devp; + unsigned long tmp; + + mutex_lock(&smd_pkt_dev_lock_lha1); + list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) { + if (smd_pkt_devp->devicep == d) { + if (!kstrtoul(buf, 10, &tmp)) { + smd_pkt_devp->edge = tmp; + mutex_unlock(&smd_pkt_dev_lock_lha1); + return n; + } + mutex_unlock(&smd_pkt_dev_lock_lha1); + pr_err("%s: unable to convert: %s to an int\n", + __func__, buf); + return -EINVAL; + } + } + mutex_unlock(&smd_pkt_dev_lock_lha1); + pr_err("%s: unable to match device to valid smd_pkt port\n", __func__); + return -EINVAL; +} + +/** + * loopback_edge_show() - Get the edge type for loopback device + * @d: Linux device structure + * @attr: Device attribute structure + * @buf: Output buffer + * + * This function is used to get the loopback device edge runtime + * by reading the loopback_edge node. + */ +static ssize_t loopback_edge_show(struct device *d, + struct device_attribute *attr, + char *buf) +{ + struct smd_pkt_dev *smd_pkt_devp; + + mutex_lock(&smd_pkt_dev_lock_lha1); + list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) { + if (smd_pkt_devp->devicep == d) { + mutex_unlock(&smd_pkt_dev_lock_lha1); + return snprintf(buf, PAGE_SIZE, "%d\n", + smd_pkt_devp->edge); + } + } + mutex_unlock(&smd_pkt_dev_lock_lha1); + pr_err("%s: unable to match device to valid smd_pkt port\n", __func__); + return -EINVAL; + +} + +static DEVICE_ATTR(loopback_edge, 0664, loopback_edge_show, + loopback_edge_store); + +static int notify_reset(struct smd_pkt_dev *smd_pkt_devp) +{ + smd_pkt_devp->do_reset_notification = 0; + + return -ENETRESET; +} + +static void clean_and_signal(struct smd_pkt_dev *smd_pkt_devp) +{ + smd_pkt_devp->do_reset_notification = 1; + smd_pkt_devp->has_reset = 1; + + smd_pkt_devp->is_open = 0; + + wake_up(&smd_pkt_devp->ch_read_wait_queue); + wake_up(&smd_pkt_devp->ch_write_wait_queue); + wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue); + D_STATUS("%s smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i); +} + +static void loopback_probe_worker(struct work_struct *work) +{ + + /* Wait for the modem SMSM to be inited for the SMD + ** Loopback channel to be allocated at the modem. Since + ** the wait need to be done atmost once, using msleep + ** doesn't degrade the performance. + */ + if (!is_modem_smsm_inited()) + schedule_delayed_work(&loopback_work, msecs_to_jiffies(1000)); + else + smsm_change_state(SMSM_APPS_STATE, + 0, SMSM_SMD_LOOPBACK); + +} + +static void packet_arrival_worker(struct work_struct *work) +{ + struct smd_pkt_dev *smd_pkt_devp; + unsigned long flags; + + smd_pkt_devp = container_of(work, struct smd_pkt_dev, + packet_arrival_work); + mutex_lock(&smd_pkt_devp->ch_lock); + spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags); + if (smd_pkt_devp->ch && smd_pkt_devp->ws_locked) { + D_READ("%s locking smd_pkt_dev id:%d wakeup source\n", + __func__, smd_pkt_devp->i); + /* + * Keep system awake long enough to allow userspace client + * to process the packet. + */ + __pm_wakeup_event(&smd_pkt_devp->pa_ws, WAKEUPSOURCE_TIMEOUT); + } + spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags); + mutex_unlock(&smd_pkt_devp->ch_lock); +} + +static long smd_pkt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + struct smd_pkt_dev *smd_pkt_devp; + uint32_t val; + + smd_pkt_devp = file->private_data; + if (!smd_pkt_devp) + return -EINVAL; + + mutex_lock(&smd_pkt_devp->ch_lock); + switch (cmd) { + case TIOCMGET: + D_STATUS("%s TIOCMGET command on smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + ret = smd_tiocmget(smd_pkt_devp->ch); + break; + case TIOCMSET: + ret = get_user(val, (uint32_t *)arg); + if (ret) { + pr_err("Error getting TIOCMSET value\n"); + mutex_unlock(&smd_pkt_devp->ch_lock); + return ret; + } + D_STATUS("%s TIOCSET command on smd_pkt_dev id:%d arg[0x%x]\n", + __func__, smd_pkt_devp->i, val); + ret = smd_tiocmset(smd_pkt_devp->ch, val, ~val); + break; + case SMD_PKT_IOCTL_BLOCKING_WRITE: + ret = get_user(smd_pkt_devp->blocking_write, (int *)arg); + break; + default: + pr_err_ratelimited("%s: Unrecognized ioctl command %d\n", + __func__, cmd); + ret = -ENOIOCTLCMD; + } + mutex_unlock(&smd_pkt_devp->ch_lock); + + return ret; +} + +ssize_t smd_pkt_read(struct file *file, + char __user *_buf, + size_t count, + loff_t *ppos) +{ + int r; + int bytes_read; + int pkt_size; + struct smd_pkt_dev *smd_pkt_devp; + unsigned long flags; + void *buf; + + smd_pkt_devp = file->private_data; + + if (!smd_pkt_devp) { + pr_err_ratelimited("%s on NULL smd_pkt_dev\n", __func__); + return -EINVAL; + } + + if (!smd_pkt_devp->ch) { + pr_err_ratelimited("%s on a closed smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + return -EINVAL; + } + + if (smd_pkt_devp->do_reset_notification) { + /* notify client that a reset occurred */ + E_SMD_PKT_SSR(smd_pkt_devp); + return notify_reset(smd_pkt_devp); + } + D_READ("Begin %s on smd_pkt_dev id:%d buffer_size %zu\n", + __func__, smd_pkt_devp->i, count); + + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + +wait_for_packet: + r = wait_event_interruptible(smd_pkt_devp->ch_read_wait_queue, + !smd_pkt_devp->ch || + (smd_cur_packet_size(smd_pkt_devp->ch) > 0 + && smd_read_avail(smd_pkt_devp->ch)) || + smd_pkt_devp->has_reset); + + mutex_lock(&smd_pkt_devp->rx_lock); + if (smd_pkt_devp->has_reset) { + mutex_unlock(&smd_pkt_devp->rx_lock); + E_SMD_PKT_SSR(smd_pkt_devp); + kfree(buf); + return notify_reset(smd_pkt_devp); + } + + if (!smd_pkt_devp->ch) { + mutex_unlock(&smd_pkt_devp->rx_lock); + pr_err_ratelimited("%s on a closed smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + kfree(buf); + return -EINVAL; + } + + if (r < 0) { + mutex_unlock(&smd_pkt_devp->rx_lock); + /* qualify error message */ + if (r != -ERESTARTSYS) { + /* we get this anytime a signal comes in */ + pr_err_ratelimited("%s: wait_event_interruptible on smd_pkt_dev id:%d ret %i\n", + __func__, smd_pkt_devp->i, r); + } + kfree(buf); + return r; + } + + /* Here we have a whole packet waiting for us */ + pkt_size = smd_cur_packet_size(smd_pkt_devp->ch); + + if (!pkt_size) { + pr_err_ratelimited("%s: No data on smd_pkt_dev id:%d, False wakeup\n", + __func__, smd_pkt_devp->i); + mutex_unlock(&smd_pkt_devp->rx_lock); + goto wait_for_packet; + } + + if (pkt_size < 0) { + pr_err_ratelimited("%s: Error %d obtaining packet size for Channel %s", + __func__, pkt_size, smd_pkt_devp->ch_name); + kfree(buf); + return pkt_size; + } + + if ((uint32_t)pkt_size > count) { + pr_err_ratelimited("%s: failure on smd_pkt_dev id: %d - packet size %d > buffer size %zu,", + __func__, smd_pkt_devp->i, + pkt_size, count); + mutex_unlock(&smd_pkt_devp->rx_lock); + kfree(buf); + return -ETOOSMALL; + } + + bytes_read = 0; + do { + r = smd_read(smd_pkt_devp->ch, + (buf + bytes_read), + (pkt_size - bytes_read)); + if (r < 0) { + mutex_unlock(&smd_pkt_devp->rx_lock); + if (smd_pkt_devp->has_reset) { + E_SMD_PKT_SSR(smd_pkt_devp); + return notify_reset(smd_pkt_devp); + } + pr_err_ratelimited("%s Error while reading %d\n", + __func__, r); + kfree(buf); + return r; + } + bytes_read += r; + if (pkt_size != bytes_read) + wait_event(smd_pkt_devp->ch_read_wait_queue, + smd_read_avail(smd_pkt_devp->ch) || + smd_pkt_devp->has_reset); + if (smd_pkt_devp->has_reset) { + mutex_unlock(&smd_pkt_devp->rx_lock); + E_SMD_PKT_SSR(smd_pkt_devp); + kfree(buf); + return notify_reset(smd_pkt_devp); + } + } while (pkt_size != bytes_read); + mutex_unlock(&smd_pkt_devp->rx_lock); + + mutex_lock(&smd_pkt_devp->ch_lock); + spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags); + if (smd_pkt_devp->poll_mode && + !smd_cur_packet_size(smd_pkt_devp->ch)) { + __pm_relax(&smd_pkt_devp->pa_ws); + smd_pkt_devp->ws_locked = 0; + smd_pkt_devp->poll_mode = 0; + D_READ("%s unlocked smd_pkt_dev id:%d wakeup_source\n", + __func__, smd_pkt_devp->i); + } + spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags); + mutex_unlock(&smd_pkt_devp->ch_lock); + + r = copy_to_user(_buf, buf, bytes_read); + if (r) { + kfree(buf); + return -EFAULT; + } + D_READ("Finished %s on smd_pkt_dev id:%d %d bytes\n", + __func__, smd_pkt_devp->i, bytes_read); + kfree(buf); + + /* check and wakeup read threads waiting on this device */ + check_and_wakeup_reader(smd_pkt_devp); + + return bytes_read; +} + +ssize_t smd_pkt_write(struct file *file, + const char __user *_buf, + size_t count, + loff_t *ppos) +{ + int r = 0, bytes_written; + struct smd_pkt_dev *smd_pkt_devp; + DEFINE_WAIT(write_wait); + void *buf; + + smd_pkt_devp = file->private_data; + + if (!smd_pkt_devp) { + pr_err_ratelimited("%s on NULL smd_pkt_dev\n", __func__); + return -EINVAL; + } + + if (!smd_pkt_devp->ch) { + pr_err_ratelimited("%s on a closed smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + return -EINVAL; + } + + if (smd_pkt_devp->do_reset_notification || smd_pkt_devp->has_reset) { + E_SMD_PKT_SSR(smd_pkt_devp); + /* notify client that a reset occurred */ + return notify_reset(smd_pkt_devp); + } + D_WRITE("Begin %s on smd_pkt_dev id:%d data_size %zu\n", + __func__, smd_pkt_devp->i, count); + + buf = kmalloc(count, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + r = copy_from_user(buf, _buf, count); + if (r) { + kfree(buf); + return -EFAULT; + } + + mutex_lock(&smd_pkt_devp->tx_lock); + if (!smd_pkt_devp->blocking_write) { + if (smd_write_avail(smd_pkt_devp->ch) < count) { + pr_err_ratelimited("%s: Not enough space in smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + mutex_unlock(&smd_pkt_devp->tx_lock); + kfree(buf); + return -ENOMEM; + } + } + + r = smd_write_start(smd_pkt_devp->ch, count); + if (r < 0) { + mutex_unlock(&smd_pkt_devp->tx_lock); + pr_err_ratelimited("%s: Error:%d in smd_pkt_dev id:%d @ smd_write_start\n", + __func__, r, smd_pkt_devp->i); + kfree(buf); + return r; + } + + bytes_written = 0; + do { + prepare_to_wait(&smd_pkt_devp->ch_write_wait_queue, + &write_wait, TASK_UNINTERRUPTIBLE); + if (!smd_write_segment_avail(smd_pkt_devp->ch) && + !smd_pkt_devp->has_reset) { + smd_enable_read_intr(smd_pkt_devp->ch); + schedule(); + } + finish_wait(&smd_pkt_devp->ch_write_wait_queue, &write_wait); + smd_disable_read_intr(smd_pkt_devp->ch); + + if (smd_pkt_devp->has_reset) { + mutex_unlock(&smd_pkt_devp->tx_lock); + E_SMD_PKT_SSR(smd_pkt_devp); + kfree(buf); + return notify_reset(smd_pkt_devp); + } + r = smd_write_segment(smd_pkt_devp->ch, + (void *)(buf + bytes_written), + (count - bytes_written)); + if (r < 0) { + mutex_unlock(&smd_pkt_devp->tx_lock); + if (smd_pkt_devp->has_reset) { + E_SMD_PKT_SSR(smd_pkt_devp); + return notify_reset(smd_pkt_devp); + } + pr_err_ratelimited("%s on smd_pkt_dev id:%d failed r:%d\n", + __func__, smd_pkt_devp->i, r); + kfree(buf); + return r; + } + bytes_written += r; + } while (bytes_written != count); + smd_write_end(smd_pkt_devp->ch); + mutex_unlock(&smd_pkt_devp->tx_lock); + D_WRITE("Finished %s on smd_pkt_dev id:%d %zu bytes\n", + __func__, smd_pkt_devp->i, count); + + kfree(buf); + return count; +} + +static unsigned int smd_pkt_poll(struct file *file, poll_table *wait) +{ + struct smd_pkt_dev *smd_pkt_devp; + unsigned int mask = 0; + + smd_pkt_devp = file->private_data; + if (!smd_pkt_devp) { + pr_err_ratelimited("%s on a NULL device\n", __func__); + return POLLERR; + } + + smd_pkt_devp->poll_mode = 1; + poll_wait(file, &smd_pkt_devp->ch_read_wait_queue, wait); + mutex_lock(&smd_pkt_devp->ch_lock); + if (smd_pkt_devp->has_reset || !smd_pkt_devp->ch) { + mutex_unlock(&smd_pkt_devp->ch_lock); + return POLLERR; + } + + if (smd_read_avail(smd_pkt_devp->ch)) { + mask |= POLLIN | POLLRDNORM; + D_POLL("%s sets POLLIN for smd_pkt_dev id: %d\n", + __func__, smd_pkt_devp->i); + } + mutex_unlock(&smd_pkt_devp->ch_lock); + + return mask; +} + +static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp) +{ + int sz; + unsigned long flags; + + if (!smd_pkt_devp) { + pr_err("%s on a NULL device\n", __func__); + return; + } + + if (!smd_pkt_devp->ch) { + pr_err("%s on a closed smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + return; + } + + sz = smd_cur_packet_size(smd_pkt_devp->ch); + if (sz == 0) { + D_READ("%s: No packet in smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + return; + } + if (!smd_read_avail(smd_pkt_devp->ch)) { + D_READ( + "%s: packet size is %d in smd_pkt_dev id:%d - but the data isn't here\n", + __func__, sz, smd_pkt_devp->i); + return; + } + + /* here we have a packet of size sz ready */ + spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags); + __pm_stay_awake(&smd_pkt_devp->pa_ws); + smd_pkt_devp->ws_locked = 1; + spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags); + wake_up(&smd_pkt_devp->ch_read_wait_queue); + schedule_work(&smd_pkt_devp->packet_arrival_work); + D_READ("%s: wake_up smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i); +} + +static void check_and_wakeup_writer(struct smd_pkt_dev *smd_pkt_devp) +{ + int sz; + + if (!smd_pkt_devp) { + pr_err("%s on a NULL device\n", __func__); + return; + } + + if (!smd_pkt_devp->ch) { + pr_err("%s on a closed smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + return; + } + + sz = smd_write_segment_avail(smd_pkt_devp->ch); + if (sz) { + D_WRITE("%s: %d bytes write space in smd_pkt_dev id:%d\n", + __func__, sz, smd_pkt_devp->i); + smd_disable_read_intr(smd_pkt_devp->ch); + wake_up(&smd_pkt_devp->ch_write_wait_queue); + } +} + +static void ch_notify(void *priv, unsigned event) +{ + struct smd_pkt_dev *smd_pkt_devp = priv; + + if (smd_pkt_devp->ch == 0) { + if (event != SMD_EVENT_CLOSE) + pr_err("%s on a closed smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + return; + } + + switch (event) { + case SMD_EVENT_DATA: { + D_STATUS("%s: DATA event in smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + check_and_wakeup_reader(smd_pkt_devp); + if (smd_pkt_devp->blocking_write) + check_and_wakeup_writer(smd_pkt_devp); + break; + } + case SMD_EVENT_OPEN: + D_STATUS("%s: OPEN event in smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + smd_pkt_devp->has_reset = 0; + smd_pkt_devp->is_open = 1; + wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue); + break; + case SMD_EVENT_CLOSE: + D_STATUS("%s: CLOSE event in smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + smd_pkt_devp->is_open = 0; + /* put port into reset state */ + clean_and_signal(smd_pkt_devp); + if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK")) + schedule_delayed_work(&loopback_work, + msecs_to_jiffies(1000)); + break; + } +} + +static int smd_pkt_dummy_probe(struct platform_device *pdev) +{ + struct smd_pkt_dev *smd_pkt_devp; + + mutex_lock(&smd_pkt_dev_lock_lha1); + list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) { + if (smd_pkt_devp->edge == pdev->id + && !strcmp(pdev->name, smd_pkt_devp->ch_name)) { + complete_all(&smd_pkt_devp->ch_allocated); + D_STATUS("%s allocated SMD ch for smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + break; + } + } + mutex_unlock(&smd_pkt_dev_lock_lha1); + return 0; +} + +static uint32_t is_modem_smsm_inited(void) +{ + uint32_t modem_state; + uint32_t ready_state = (SMSM_INIT | SMSM_SMDINIT); + + modem_state = smsm_get_state(SMSM_MODEM_STATE); + return (modem_state & ready_state) == ready_state; +} + +/** + * smd_pkt_add_driver() - Add platform drivers for smd pkt device + * + * @smd_pkt_devp: pointer to the smd pkt device structure + * + * @returns: 0 for success, standard Linux error code otherwise + * + * This function is used to register platform driver once for all + * smd pkt devices which have same names and increment the reference + * count for 2nd to nth devices. + */ +static int smd_pkt_add_driver(struct smd_pkt_dev *smd_pkt_devp) +{ + int r = 0; + struct smd_pkt_driver *smd_pkt_driverp; + struct smd_pkt_driver *item; + + if (!smd_pkt_devp) { + pr_err("%s on a NULL device\n", __func__); + return -EINVAL; + } + D_STATUS("Begin %s on smd_pkt_ch[%s]\n", __func__, + smd_pkt_devp->ch_name); + + mutex_lock(&smd_pkt_driver_lock_lha1); + list_for_each_entry(item, &smd_pkt_driver_list, list) { + if (!strcmp(item->pdriver_name, smd_pkt_devp->ch_name)) { + D_STATUS("%s:%s Already Platform driver reg. cnt:%d\n", + __func__, smd_pkt_devp->ch_name, item->ref_cnt); + ++item->ref_cnt; + goto exit; + } + } + + smd_pkt_driverp = kzalloc(sizeof(*smd_pkt_driverp), GFP_KERNEL); + if (IS_ERR_OR_NULL(smd_pkt_driverp)) { + pr_err("%s: kzalloc() failed for smd_pkt_driver[%s]\n", + __func__, smd_pkt_devp->ch_name); + r = -ENOMEM; + goto exit; + } + + smd_pkt_driverp->driver.probe = smd_pkt_dummy_probe; + scnprintf(smd_pkt_driverp->pdriver_name, SMD_MAX_CH_NAME_LEN, + "%s", smd_pkt_devp->ch_name); + smd_pkt_driverp->driver.driver.name = smd_pkt_driverp->pdriver_name; + smd_pkt_driverp->driver.driver.owner = THIS_MODULE; + r = platform_driver_register(&smd_pkt_driverp->driver); + if (r) { + pr_err("%s: %s Platform driver reg. failed\n", + __func__, smd_pkt_devp->ch_name); + kfree(smd_pkt_driverp); + goto exit; + } + ++smd_pkt_driverp->ref_cnt; + list_add(&smd_pkt_driverp->list, &smd_pkt_driver_list); + +exit: + D_STATUS("End %s on smd_pkt_ch[%s]\n", __func__, smd_pkt_devp->ch_name); + mutex_unlock(&smd_pkt_driver_lock_lha1); + return r; +} + +/** + * smd_pkt_remove_driver() - Remove the platform drivers for smd pkt device + * + * @smd_pkt_devp: pointer to the smd pkt device structure + * + * This function is used to decrement the reference count on + * platform drivers for smd pkt devices and removes the drivers + * when the reference count becomes zero. + */ +static void smd_pkt_remove_driver(struct smd_pkt_dev *smd_pkt_devp) +{ + struct smd_pkt_driver *smd_pkt_driverp; + bool found_item = false; + + if (!smd_pkt_devp) { + pr_err("%s on a NULL device\n", __func__); + return; + } + + D_STATUS("Begin %s on smd_pkt_ch[%s]\n", __func__, + smd_pkt_devp->ch_name); + mutex_lock(&smd_pkt_driver_lock_lha1); + list_for_each_entry(smd_pkt_driverp, &smd_pkt_driver_list, list) { + if (!strcmp(smd_pkt_driverp->pdriver_name, + smd_pkt_devp->ch_name)) { + found_item = true; + D_STATUS("%s:%s Platform driver cnt:%d\n", + __func__, smd_pkt_devp->ch_name, + smd_pkt_driverp->ref_cnt); + if (smd_pkt_driverp->ref_cnt > 0) + --smd_pkt_driverp->ref_cnt; + else + pr_warn("%s reference count <= 0\n", __func__); + break; + } + } + if (!found_item) + pr_err("%s:%s No item found in list.\n", + __func__, smd_pkt_devp->ch_name); + + if (found_item && smd_pkt_driverp->ref_cnt == 0) { + platform_driver_unregister(&smd_pkt_driverp->driver); + smd_pkt_driverp->driver.probe = NULL; + list_del(&smd_pkt_driverp->list); + kfree(smd_pkt_driverp); + } + mutex_unlock(&smd_pkt_driver_lock_lha1); + D_STATUS("End %s on smd_pkt_ch[%s]\n", __func__, smd_pkt_devp->ch_name); +} + +int smd_pkt_open(struct inode *inode, struct file *file) +{ + int r = 0; + struct smd_pkt_dev *smd_pkt_devp; + const char *peripheral = NULL; + + smd_pkt_devp = container_of(inode->i_cdev, struct smd_pkt_dev, cdev); + + if (!smd_pkt_devp) { + pr_err_ratelimited("%s on a NULL device\n", __func__); + return -EINVAL; + } + D_STATUS("Begin %s on smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i); + + file->private_data = smd_pkt_devp; + + mutex_lock(&smd_pkt_devp->ch_lock); + if (smd_pkt_devp->ch == 0) { + unsigned open_wait_rem = smd_pkt_devp->open_modem_wait * 1000; + + reinit_completion(&smd_pkt_devp->ch_allocated); + + r = smd_pkt_add_driver(smd_pkt_devp); + if (r) { + pr_err_ratelimited("%s: %s Platform driver reg. failed\n", + __func__, smd_pkt_devp->ch_name); + goto out; + } + + peripheral = smd_edge_to_pil_str(smd_pkt_devp->edge); + if (!IS_ERR_OR_NULL(peripheral)) { + smd_pkt_devp->pil = subsystem_get(peripheral); + if (IS_ERR(smd_pkt_devp->pil)) { + r = PTR_ERR(smd_pkt_devp->pil); + pr_err_ratelimited("%s failed on smd_pkt_dev id:%d - subsystem_get failed for %s\n", + __func__, smd_pkt_devp->i, peripheral); + /* + * Sleep inorder to reduce the frequency of + * retry by user-space modules and to avoid + * possible watchdog bite. + */ + msleep(open_wait_rem); + goto release_pd; + } + } + + /* Wait for the modem SMSM to be inited for the SMD + ** Loopback channel to be allocated at the modem. Since + ** the wait need to be done atmost once, using msleep + ** doesn't degrade the performance. + */ + if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK")) { + if (!is_modem_smsm_inited()) + msleep(5000); + smsm_change_state(SMSM_APPS_STATE, + 0, SMSM_SMD_LOOPBACK); + msleep(100); + } + + /* + * Wait for a packet channel to be allocated so we know + * the modem is ready enough. + */ + if (open_wait_rem) { + r = wait_for_completion_interruptible_timeout( + &smd_pkt_devp->ch_allocated, + msecs_to_jiffies(open_wait_rem)); + if (r >= 0) + open_wait_rem = jiffies_to_msecs(r); + if (r == 0) + r = -ETIMEDOUT; + if (r == -ERESTARTSYS) { + pr_info_ratelimited("%s: wait on smd_pkt_dev id:%d allocation interrupted\n", + __func__, smd_pkt_devp->i); + goto release_pil; + } + if (r < 0) { + pr_err_ratelimited("%s: wait on smd_pkt_dev id:%d allocation failed rc:%d\n", + __func__, smd_pkt_devp->i, r); + goto release_pil; + } + } + + r = smd_named_open_on_edge(smd_pkt_devp->ch_name, + smd_pkt_devp->edge, + &smd_pkt_devp->ch, + smd_pkt_devp, + ch_notify); + if (r < 0) { + pr_err_ratelimited("%s: %s open failed %d\n", __func__, + smd_pkt_devp->ch_name, r); + goto release_pil; + } + + open_wait_rem = max_t(unsigned, 2000, open_wait_rem); + r = wait_event_interruptible_timeout( + smd_pkt_devp->ch_opened_wait_queue, + smd_pkt_devp->is_open, + msecs_to_jiffies(open_wait_rem)); + if (r == 0) + r = -ETIMEDOUT; + + if (r < 0) { + /* close the ch to sync smd's state with smd_pkt */ + smd_close(smd_pkt_devp->ch); + smd_pkt_devp->ch = NULL; + } + + if (r == -ERESTARTSYS) { + pr_info_ratelimited("%s: wait on smd_pkt_dev id:%d OPEN interrupted\n", + __func__, smd_pkt_devp->i); + } else if (r < 0) { + pr_err_ratelimited("%s: wait on smd_pkt_dev id:%d OPEN event failed rc:%d\n", + __func__, smd_pkt_devp->i, r); + } else if (!smd_pkt_devp->is_open) { + pr_err_ratelimited("%s: Invalid OPEN event on smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + r = -ENODEV; + } else { + smd_disable_read_intr(smd_pkt_devp->ch); + smd_pkt_devp->ch_size = + smd_write_avail(smd_pkt_devp->ch); + r = 0; + smd_pkt_devp->ref_cnt++; + D_STATUS("Finished %s on smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + } + } else { + smd_pkt_devp->ref_cnt++; + } +release_pil: + if (peripheral && (r < 0)) { + subsystem_put(smd_pkt_devp->pil); + smd_pkt_devp->pil = NULL; + } + +release_pd: + if (r < 0) + smd_pkt_remove_driver(smd_pkt_devp); +out: + mutex_unlock(&smd_pkt_devp->ch_lock); + + + return r; +} + +int smd_pkt_release(struct inode *inode, struct file *file) +{ + int r = 0; + struct smd_pkt_dev *smd_pkt_devp = file->private_data; + unsigned long flags; + + if (!smd_pkt_devp) { + pr_err_ratelimited("%s on a NULL device\n", __func__); + return -EINVAL; + } + D_STATUS("Begin %s on smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + + mutex_lock(&smd_pkt_devp->ch_lock); + mutex_lock(&smd_pkt_devp->rx_lock); + mutex_lock(&smd_pkt_devp->tx_lock); + if (smd_pkt_devp->ref_cnt > 0) + smd_pkt_devp->ref_cnt--; + + if (smd_pkt_devp->ch != 0 && smd_pkt_devp->ref_cnt == 0) { + clean_and_signal(smd_pkt_devp); + r = smd_close(smd_pkt_devp->ch); + smd_pkt_devp->ch = 0; + smd_pkt_devp->blocking_write = 0; + smd_pkt_devp->poll_mode = 0; + smd_pkt_remove_driver(smd_pkt_devp); + if (smd_pkt_devp->pil) + subsystem_put(smd_pkt_devp->pil); + smd_pkt_devp->has_reset = 0; + smd_pkt_devp->do_reset_notification = 0; + spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags); + if (smd_pkt_devp->ws_locked) { + __pm_relax(&smd_pkt_devp->pa_ws); + smd_pkt_devp->ws_locked = 0; + } + spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags); + } + mutex_unlock(&smd_pkt_devp->tx_lock); + mutex_unlock(&smd_pkt_devp->rx_lock); + mutex_unlock(&smd_pkt_devp->ch_lock); + + if (flush_work(&smd_pkt_devp->packet_arrival_work)) + D_STATUS("%s: Flushed work for smd_pkt_dev id:%d\n", __func__, + smd_pkt_devp->i); + + D_STATUS("Finished %s on smd_pkt_dev id:%d\n", + __func__, smd_pkt_devp->i); + + return r; +} + +static const struct file_operations smd_pkt_fops = { + .owner = THIS_MODULE, + .open = smd_pkt_open, + .release = smd_pkt_release, + .read = smd_pkt_read, + .write = smd_pkt_write, + .poll = smd_pkt_poll, + .unlocked_ioctl = smd_pkt_ioctl, + .compat_ioctl = smd_pkt_ioctl, +}; + +static int smd_pkt_init_add_device(struct smd_pkt_dev *smd_pkt_devp, int i) +{ + int r = 0; + + smd_pkt_devp->i = i; + + init_waitqueue_head(&smd_pkt_devp->ch_read_wait_queue); + init_waitqueue_head(&smd_pkt_devp->ch_write_wait_queue); + smd_pkt_devp->is_open = 0; + smd_pkt_devp->poll_mode = 0; + smd_pkt_devp->ws_locked = 0; + init_waitqueue_head(&smd_pkt_devp->ch_opened_wait_queue); + + spin_lock_init(&smd_pkt_devp->pa_spinlock); + mutex_init(&smd_pkt_devp->ch_lock); + mutex_init(&smd_pkt_devp->rx_lock); + mutex_init(&smd_pkt_devp->tx_lock); + wakeup_source_init(&smd_pkt_devp->pa_ws, smd_pkt_devp->dev_name); + INIT_WORK(&smd_pkt_devp->packet_arrival_work, packet_arrival_worker); + init_completion(&smd_pkt_devp->ch_allocated); + + cdev_init(&smd_pkt_devp->cdev, &smd_pkt_fops); + smd_pkt_devp->cdev.owner = THIS_MODULE; + + r = cdev_add(&smd_pkt_devp->cdev, (smd_pkt_number + i), 1); + if (IS_ERR_VALUE(r)) { + pr_err("%s: cdev_add() failed for smd_pkt_dev id:%d ret:%i\n", + __func__, i, r); + return r; + } + + smd_pkt_devp->devicep = + device_create(smd_pkt_classp, + NULL, + (smd_pkt_number + i), + NULL, + smd_pkt_devp->dev_name); + + if (IS_ERR_OR_NULL(smd_pkt_devp->devicep)) { + pr_err("%s: device_create() failed for smd_pkt_dev id:%d\n", + __func__, i); + r = -ENOMEM; + cdev_del(&smd_pkt_devp->cdev); + wakeup_source_trash(&smd_pkt_devp->pa_ws); + return r; + } + if (device_create_file(smd_pkt_devp->devicep, + &dev_attr_open_timeout)) + pr_err("%s: unable to create device attr for smd_pkt_dev id:%d\n", + __func__, i); + + if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK")) { + if (device_create_file(smd_pkt_devp->devicep, + &dev_attr_loopback_edge)) + pr_err("%s: unable to create device attr for smd_pkt_dev id:%d\n", + __func__, i); + } + mutex_lock(&smd_pkt_dev_lock_lha1); + list_add(&smd_pkt_devp->dev_list, &smd_pkt_dev_list); + mutex_unlock(&smd_pkt_dev_lock_lha1); + + return r; +} + +static void smd_pkt_core_deinit(void) +{ + struct smd_pkt_dev *smd_pkt_devp; + struct smd_pkt_dev *index; + + mutex_lock(&smd_pkt_dev_lock_lha1); + list_for_each_entry_safe(smd_pkt_devp, index, &smd_pkt_dev_list, + dev_list) { + cdev_del(&smd_pkt_devp->cdev); + list_del(&smd_pkt_devp->dev_list); + device_destroy(smd_pkt_classp, + MKDEV(MAJOR(smd_pkt_number), smd_pkt_devp->i)); + kfree(smd_pkt_devp); + } + mutex_unlock(&smd_pkt_dev_lock_lha1); + + if (!IS_ERR_OR_NULL(smd_pkt_classp)) + class_destroy(smd_pkt_classp); + + unregister_chrdev_region(MAJOR(smd_pkt_number), num_smd_pkt_ports); +} + +static int smd_pkt_alloc_chrdev_region(void) +{ + int r = alloc_chrdev_region(&smd_pkt_number, + 0, + num_smd_pkt_ports, + DEVICE_NAME); + + if (IS_ERR_VALUE(r)) { + pr_err("%s: alloc_chrdev_region() failed ret:%i\n", + __func__, r); + return r; + } + + smd_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME); + if (IS_ERR(smd_pkt_classp)) { + pr_err("%s: class_create() failed ENOMEM\n", __func__); + r = -ENOMEM; + unregister_chrdev_region(MAJOR(smd_pkt_number), + num_smd_pkt_ports); + return r; + } + + return 0; +} + +static int parse_smdpkt_devicetree(struct device_node *node, + struct smd_pkt_dev *smd_pkt_devp) +{ + int edge; + char *key; + const char *ch_name; + const char *dev_name; + const char *remote_ss; + + key = "qcom,smdpkt-remote"; + remote_ss = of_get_property(node, key, NULL); + if (!remote_ss) + goto error; + + edge = smd_remote_ss_to_edge(remote_ss); + if (edge < 0) + goto error; + + smd_pkt_devp->edge = edge; + D_STATUS("%s: %s = %d", __func__, key, edge); + + key = "qcom,smdpkt-port-name"; + ch_name = of_get_property(node, key, NULL); + if (!ch_name) + goto error; + + strlcpy(smd_pkt_devp->ch_name, ch_name, SMD_MAX_CH_NAME_LEN); + D_STATUS("%s ch_name = %s\n", __func__, ch_name); + + key = "qcom,smdpkt-dev-name"; + dev_name = of_get_property(node, key, NULL); + if (!dev_name) + goto error; + + strlcpy(smd_pkt_devp->dev_name, dev_name, SMD_MAX_CH_NAME_LEN); + D_STATUS("%s dev_name = %s\n", __func__, dev_name); + + return 0; + +error: + pr_err("%s: missing key: %s\n", __func__, key); + return -ENODEV; + +} + +static int smd_pkt_devicetree_init(struct platform_device *pdev) +{ + int ret; + int i = 0; + struct device_node *node; + struct smd_pkt_dev *smd_pkt_devp; + int subnode_num = 0; + + for_each_child_of_node(pdev->dev.of_node, node) + ++subnode_num; + + num_smd_pkt_ports = subnode_num; + + ret = smd_pkt_alloc_chrdev_region(); + if (ret) { + pr_err("%s: smd_pkt_alloc_chrdev_region() failed ret:%i\n", + __func__, ret); + return ret; + } + + for_each_child_of_node(pdev->dev.of_node, node) { + smd_pkt_devp = kzalloc(sizeof(struct smd_pkt_dev), GFP_KERNEL); + if (IS_ERR_OR_NULL(smd_pkt_devp)) { + pr_err("%s: kzalloc() failed for smd_pkt_dev id:%d\n", + __func__, i); + ret = -ENOMEM; + goto error_destroy; + } + + ret = parse_smdpkt_devicetree(node, smd_pkt_devp); + if (ret) { + pr_err(" failed to parse_smdpkt_devicetree %d\n", i); + kfree(smd_pkt_devp); + goto error_destroy; + } + + ret = smd_pkt_init_add_device(smd_pkt_devp, i); + if (ret < 0) { + pr_err("add device failed for idx:%d ret=%d\n", i, ret); + kfree(smd_pkt_devp); + goto error_destroy; + } + i++; + } + + INIT_DELAYED_WORK(&loopback_work, loopback_probe_worker); + + D_STATUS("SMD Packet Port Driver Initialized.\n"); + return 0; + +error_destroy: + smd_pkt_core_deinit(); + return ret; +} + +static int msm_smd_pkt_probe(struct platform_device *pdev) +{ + int ret; + + if (pdev) { + if (pdev->dev.of_node) { + D_STATUS("%s device tree implementation\n", __func__); + ret = smd_pkt_devicetree_init(pdev); + if (ret) + pr_err("%s: device tree init failed\n", + __func__); + } + } + + return 0; +} + +static const struct of_device_id msm_smd_pkt_match_table[] = { + { .compatible = "qcom,smdpkt" }, + {}, +}; + +static struct platform_driver msm_smd_pkt_driver = { + .probe = msm_smd_pkt_probe, + .driver = { + .name = MODULE_NAME, + .owner = THIS_MODULE, + .of_match_table = msm_smd_pkt_match_table, + }, +}; + +static int __init smd_pkt_init(void) +{ + int rc; + + INIT_LIST_HEAD(&smd_pkt_dev_list); + INIT_LIST_HEAD(&smd_pkt_driver_list); + rc = platform_driver_register(&msm_smd_pkt_driver); + if (rc) { + pr_err("%s: msm_smd_driver register failed %d\n", + __func__, rc); + return rc; + } + + smd_pkt_ilctxt = ipc_log_context_create(SMD_PKT_IPC_LOG_PAGE_CNT, + "smd_pkt", 0); + return 0; +} + +static void __exit smd_pkt_cleanup(void) +{ + smd_pkt_core_deinit(); +} + +module_init(smd_pkt_init); +module_exit(smd_pkt_cleanup); + +MODULE_DESCRIPTION("MSM Shared Memory Packet Port"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/msm/clock-local2.c b/drivers/clk/msm/clock-local2.c index 19956f030ae9..adb07cdb7e8d 100644 --- a/drivers/clk/msm/clock-local2.c +++ b/drivers/clk/msm/clock-local2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -928,7 +928,8 @@ static unsigned long branch_clk_get_rate(struct clk *c) { struct branch_clk *branch = to_branch_clk(c); - if (branch->max_div) + if (branch->max_div || + (branch->aggr_sibling_rates && !branch->is_prepared)) return branch->c.rate; return clk_get_rate(c->parent); diff --git a/drivers/clk/msm/clock-mmss-8998.c b/drivers/clk/msm/clock-mmss-8998.c index 6ebb3ed6ed91..fdaaa723accd 100644 --- a/drivers/clk/msm/clock-mmss-8998.c +++ b/drivers/clk/msm/clock-mmss-8998.c @@ -359,6 +359,7 @@ static struct rcg_clk mdp_clk_src = { .set_rate = set_rate_hid, .freq_tbl = ftbl_mdp_clk_src, .current_freq = &rcg_dummy_freq, + .non_local_children = true, .base = &virt_base, .c = { .dbg_name = "mdp_clk_src", diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c index e51cd437cf7c..eb69ed35f46d 100644 --- a/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c +++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8998.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -38,41 +38,76 @@ /* Register Offsets from PLL base address */ #define PLL_ANALOG_CONTROLS_ONE 0x000 #define PLL_ANALOG_CONTROLS_TWO 0x004 +#define PLL_INT_LOOP_SETTINGS 0x008 +#define PLL_INT_LOOP_SETTINGS_TWO 0x00c #define PLL_ANALOG_CONTROLS_THREE 0x010 -#define PLL_DSM_DIVIDER 0x01c +#define PLL_ANALOG_CONTROLS_FOUR 0x014 +#define PLL_INT_LOOP_CONTROLS 0x018 +#define PLL_DSM_DIVIDER 0x01c #define PLL_FEEDBACK_DIVIDER 0x020 #define PLL_SYSTEM_MUXES 0x024 +#define PLL_FREQ_UPDATE_CONTROL_OVERRIDES 0x028 #define PLL_CMODE 0x02c #define PLL_CALIBRATION_SETTINGS 0x030 +#define PLL_BAND_SEL_CAL_TIMER_LOW 0x034 +#define PLL_BAND_SEL_CAL_TIMER_HIGH 0x038 +#define PLL_BAND_SEL_CAL_SETTINGS 0x03c +#define PLL_BAND_SEL_MIN 0x040 +#define PLL_BAND_SEL_MAX 0x044 +#define PLL_BAND_SEL_PFILT 0x048 +#define PLL_BAND_SEL_IFILT 0x04c +#define PLL_BAND_SEL_CAL_SETTINGS_TWO 0x050 #define PLL_BAND_SEL_CAL_SETTINGS_THREE 0x054 +#define PLL_BAND_SEL_CAL_SETTINGS_FOUR 0x058 +#define PLL_BAND_SEL_ICODE_HIGH 0x05c +#define PLL_BAND_SEL_ICODE_LOW 0x060 #define PLL_FREQ_DETECT_SETTINGS_ONE 0x064 #define PLL_PFILT 0x07c #define PLL_IFILT 0x080 +#define PLL_GAIN 0x084 +#define PLL_ICODE_LOW 0x088 +#define PLL_ICODE_HIGH 0x08c +#define PLL_LOCKDET 0x090 #define PLL_OUTDIV 0x094 -#define PLL_CORE_OVERRIDE 0x0a4 +#define PLL_FASTLOCK_CONTROL 0x098 +#define PLL_PASS_OUT_OVERRIDE_ONE 0x09c +#define PLL_PASS_OUT_OVERRIDE_TWO 0x0a0 +#define PLL_CORE_OVERRIDE 0x0a4 #define PLL_CORE_INPUT_OVERRIDE 0x0a8 +#define PLL_RATE_CHANGE 0x0ac +#define PLL_PLL_DIGITAL_TIMERS 0x0b0 #define PLL_PLL_DIGITAL_TIMERS_TWO 0x0b4 +#define PLL_DEC_FRAC_MUXES 0x0c8 #define PLL_DECIMAL_DIV_START_1 0x0cc #define PLL_FRAC_DIV_START_LOW_1 0x0d0 #define PLL_FRAC_DIV_START_MID_1 0x0d4 #define PLL_FRAC_DIV_START_HIGH_1 0x0d8 +#define PLL_MASH_CONTROL 0x0ec +#define PLL_SSC_MUX_CONTROL 0x108 #define PLL_SSC_STEPSIZE_LOW_1 0x10c #define PLL_SSC_STEPSIZE_HIGH_1 0x110 #define PLL_SSC_DIV_PER_LOW_1 0x114 #define PLL_SSC_DIV_PER_HIGH_1 0x118 #define PLL_SSC_DIV_ADJPER_LOW_1 0x11c #define PLL_SSC_DIV_ADJPER_HIGH_1 0x120 -#define PLL_SSC_CONTROL 0x13c -#define PLL_PLL_OUTDIV_RATE 0x140 +#define PLL_SSC_CONTROL 0x13c +#define PLL_PLL_OUTDIV_RATE 0x140 #define PLL_PLL_LOCKDET_RATE_1 0x144 #define PLL_PLL_PROP_GAIN_RATE_1 0x14c #define PLL_PLL_BAND_SET_RATE_1 0x154 #define PLL_PLL_INT_GAIN_IFILT_BAND_1 0x15c #define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x164 -#define PLL_PLL_LOCK_OVERRIDE 0x180 -#define PLL_PLL_LOCK_DELAY 0x184 -#define PLL_CLOCK_INVERTERS 0x18c -#define PLL_COMMON_STATUS_ONE 0x1a0 +#define PLL_FASTLOCK_EN_BAND 0x16c +#define PLL_FREQ_TUNE_ACCUM_INIT_MUX 0x17c +#define PLL_PLL_LOCK_OVERRIDE 0x180 +#define PLL_PLL_LOCK_DELAY 0x184 +#define PLL_PLL_LOCK_MIN_DELAY 0x188 +#define PLL_CLOCK_INVERTERS 0x18c +#define PLL_SPARE_AND_JPC_OVERRIDES 0x190 +#define PLL_BIAS_CONTROL_1 0x194 +#define PLL_BIAS_CONTROL_2 0x198 +#define PLL_ALOG_OBSV_BUS_CTRL_1 0x19c +#define PLL_COMMON_STATUS_ONE 0x1a0 /* Register Offsets from PHY base address */ #define PHY_CMN_CLK_CFG0 0x010 @@ -384,6 +419,49 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_8998 *pll, MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x3f); } +static void dsi_pll_init_val(struct mdss_pll_resources *rsc) +{ + void __iomem *pll_base = rsc->pll_base; + + MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x10); + MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS, 0x3f); + MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS_TWO, 0x0); + MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FOUR, 0x0); + MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_CONTROLS, 0x80); + MDSS_PLL_REG_W(pll_base, PLL_FREQ_UPDATE_CONTROL_OVERRIDES, 0x0); + MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_LOW, 0x0); + MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_HIGH, 0x02); + MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS, 0x82); + MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MIN, 0x00); + MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MAX, 0xff); + MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_PFILT, 0x00); + MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_IFILT, 0x00); + MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_TWO, 0x25); + MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_FOUR, 0x4f); + MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_HIGH, 0x0a); + MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_LOW, 0x0); + MDSS_PLL_REG_W(pll_base, PLL_GAIN, 0x42); + MDSS_PLL_REG_W(pll_base, PLL_ICODE_LOW, 0x00); + MDSS_PLL_REG_W(pll_base, PLL_ICODE_HIGH, 0x00); + MDSS_PLL_REG_W(pll_base, PLL_LOCKDET, 0x30); + MDSS_PLL_REG_W(pll_base, PLL_FASTLOCK_CONTROL, 0x04); + MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_ONE, 0x00); + MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_TWO, 0x00); + MDSS_PLL_REG_W(pll_base, PLL_RATE_CHANGE, 0x01); + MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS, 0x08); + MDSS_PLL_REG_W(pll_base, PLL_DEC_FRAC_MUXES, 0x00); + MDSS_PLL_REG_W(pll_base, PLL_MASH_CONTROL, 0x03); + MDSS_PLL_REG_W(pll_base, PLL_SSC_MUX_CONTROL, 0x0); + MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL, 0x0); + MDSS_PLL_REG_W(pll_base, PLL_FASTLOCK_EN_BAND, 0x03); + MDSS_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_MUX, 0x0); + MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_MIN_DELAY, 0x19); + MDSS_PLL_REG_W(pll_base, PLL_SPARE_AND_JPC_OVERRIDES, 0x0); + MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_1, 0x40); + MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_2, 0x20); + MDSS_PLL_REG_W(pll_base, PLL_ALOG_OBSV_BUS_CTRL_1, 0x0); +} + static void dsi_pll_commit(struct dsi_pll_8998 *pll, struct mdss_pll_resources *rsc) { @@ -440,6 +518,8 @@ static int vco_8998_set_rate(struct clk *c, unsigned long rate) return rc; } + dsi_pll_init_val(rsc); + dsi_pll_setup_config(pll, rsc); dsi_pll_calc_dec_frac(pll, rsc); @@ -554,7 +634,6 @@ error: static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc) { - dsi_pll_disable_global_clk(rsc); MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0); dsi_pll_disable_pll_bias(rsc); } @@ -573,11 +652,20 @@ static void dsi_pll_disable(struct dsi_pll_vco_clk *vco) pr_debug("stop PLL (%d)\n", rsc->index); + /* + * To avoid any stray glitches while + * abruptly powering down the PLL + * make sure to gate the clock using + * the clock enable bit before powering + * down the PLL + **/ + dsi_pll_disable_global_clk(rsc); MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0); dsi_pll_disable_sub(rsc); - if (rsc->slave) + if (rsc->slave) { + dsi_pll_disable_global_clk(rsc->slave); dsi_pll_disable_sub(rsc->slave); - + } /* flush, ensure all register writes are done*/ wmb(); rsc->pll_on = false; diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c index f82ddc3b008b..d3914ab5f47c 100644 --- a/drivers/clk/qcom/clk-cpu-osm.c +++ b/drivers/clk/qcom/clk-cpu-osm.c @@ -772,7 +772,7 @@ static const char * const gcc_parent_names_1[] = { }; static struct freq_tbl ftbl_osm_clk_src[] = { - F(200000000, LMH_LITE_CLK_SRC, 3, 0, 0), + F(200000000, LMH_LITE_CLK_SRC, 1.5, 0, 0), { } }; diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c index b55310e091af..b10f9ca9fe1a 100644 --- a/drivers/clk/qcom/gcc-sdm660.c +++ b/drivers/clk/qcom/gcc-sdm660.c @@ -732,6 +732,7 @@ static struct clk_rcg2 gp3_clk_src = { }; static const struct freq_tbl ftbl_hmss_gpll0_clk_src[] = { + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0), { } }; @@ -2755,6 +2756,9 @@ static int gcc_660_probe(struct platform_device *pdev) /* Keep bimc gfx clock port on all the time */ clk_prepare_enable(gcc_bimc_gfx_clk.clkr.hw.clk); + /* Set the HMSS_GPLL0_SRC for 300MHz to CPU subsystem */ + clk_set_rate(hmss_gpll0_clk_src.clkr.hw.clk, 300000000); + dev_info(&pdev->dev, "Registered GCC clocks\n"); return ret; diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index ce67145bb142..4ba8f12f4b19 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -43,6 +43,7 @@ #include <soc/qcom/event_timer.h> #include <soc/qcom/lpm-stats.h> #include <soc/qcom/jtag.h> +#include <soc/qcom/minidump.h> #include <asm/cputype.h> #include <asm/arch_timer.h> #include <asm/cacheflush.h> @@ -72,6 +73,8 @@ enum debug_event { CLUSTER_ENTER, CLUSTER_EXIT, PRE_PC_CB, + CPU_HP_STARTING, + CPU_HP_DYING, }; struct lpm_debug { @@ -341,10 +344,16 @@ static int lpm_cpu_callback(struct notifier_block *cpu_nb, switch (action & ~CPU_TASKS_FROZEN) { case CPU_DYING: + update_debug_pc_event(CPU_HP_DYING, cpu, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], false); cluster_prepare(cluster, get_cpu_mask((unsigned int) cpu), NR_LPM_LEVELS, false, 0); break; case CPU_STARTING: + update_debug_pc_event(CPU_HP_STARTING, cpu, + cluster->num_children_in_sync.bits[0], + cluster->child_cpus.bits[0], false); cluster_unprepare(cluster, get_cpu_mask((unsigned int) cpu), NR_LPM_LEVELS, false, 0); break; @@ -1846,6 +1855,7 @@ static int lpm_probe(struct platform_device *pdev) int ret; int size; struct kobject *module_kobj = NULL; + struct md_region md_entry; get_online_cpus(); lpm_root_node = lpm_of_parse_cluster(pdev); @@ -1906,6 +1916,14 @@ static int lpm_probe(struct platform_device *pdev) goto failed; } + /* Add lpm_debug to Minidump*/ + strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name)); + md_entry.virt_addr = (uintptr_t)lpm_debug; + md_entry.phys_addr = lpm_debug_phys; + md_entry.size = size; + if (msm_minidump_add_region(&md_entry)) + pr_info("Failed to add lpm_debug in Minidump\n"); + return 0; failed: free_cluster_node(lpm_root_node); diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c index ab21334d5813..4002a5b57250 100644 --- a/drivers/crypto/msm/ice.c +++ b/drivers/crypto/msm/ice.c @@ -1608,7 +1608,8 @@ static struct ice_device *get_ice_device_from_storage_type list_for_each_entry(ice_dev, &ice_devices, list) { if (!strcmp(ice_dev->ice_instance_type, storage_type)) { - pr_info("%s: found ice device %p\n", __func__, ice_dev); + pr_debug("%s: found ice device %pK\n", + __func__, ice_dev); break; } } diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c index 7459401979fe..d04ca6f28f90 100644 --- a/drivers/crypto/msm/qcedev.c +++ b/drivers/crypto/msm/qcedev.c @@ -56,6 +56,7 @@ static uint8_t _std_init_vector_sha256_uint8[] = { static DEFINE_MUTEX(send_cmd_lock); static DEFINE_MUTEX(qcedev_sent_bw_req); +static DEFINE_MUTEX(hash_access_lock); static void qcedev_ce_high_bw_req(struct qcedev_control *podev, bool high_bw_req) @@ -1648,12 +1649,18 @@ long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg) (void __user *)arg, sizeof(struct qcedev_sha_op_req))) return -EFAULT; - if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) + mutex_lock(&hash_access_lock); + if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) { + mutex_unlock(&hash_access_lock); return -EINVAL; + } qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA; err = qcedev_hash_init(&qcedev_areq, handle, &sg_src); - if (err) + if (err) { + mutex_unlock(&hash_access_lock); return err; + } + mutex_unlock(&hash_access_lock); if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req, sizeof(struct qcedev_sha_op_req))) return -EFAULT; @@ -1671,32 +1678,42 @@ long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg) (void __user *)arg, sizeof(struct qcedev_sha_op_req))) return -EFAULT; - if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) + mutex_lock(&hash_access_lock); + if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) { + mutex_unlock(&hash_access_lock); return -EINVAL; + } qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA; if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) { err = qcedev_hash_cmac(&qcedev_areq, handle, &sg_src); - if (err) + if (err) { + mutex_unlock(&hash_access_lock); return err; + } } else { if (handle->sha_ctxt.init_done == false) { pr_err("%s Init was not called\n", __func__); + mutex_unlock(&hash_access_lock); return -EINVAL; } err = qcedev_hash_update(&qcedev_areq, handle, &sg_src); - if (err) + if (err) { + mutex_unlock(&hash_access_lock); return err; + } } if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) { pr_err("Invalid sha_ctxt.diglen %d\n", handle->sha_ctxt.diglen); + mutex_unlock(&hash_access_lock); return -EINVAL; } memcpy(&qcedev_areq.sha_op_req.digest[0], &handle->sha_ctxt.digest[0], handle->sha_ctxt.diglen); + mutex_unlock(&hash_access_lock); if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req, sizeof(struct qcedev_sha_op_req))) return -EFAULT; @@ -1713,16 +1730,22 @@ long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg) (void __user *)arg, sizeof(struct qcedev_sha_op_req))) return -EFAULT; - if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) + mutex_lock(&hash_access_lock); + if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) { + mutex_unlock(&hash_access_lock); return -EINVAL; + } qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA; err = qcedev_hash_final(&qcedev_areq, handle); - if (err) + if (err) { + mutex_unlock(&hash_access_lock); return err; + } qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen; memcpy(&qcedev_areq.sha_op_req.digest[0], &handle->sha_ctxt.digest[0], handle->sha_ctxt.diglen); + mutex_unlock(&hash_access_lock); if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req, sizeof(struct qcedev_sha_op_req))) return -EFAULT; @@ -1737,20 +1760,28 @@ long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg) (void __user *)arg, sizeof(struct qcedev_sha_op_req))) return -EFAULT; - if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) + mutex_lock(&hash_access_lock); + if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) { + mutex_unlock(&hash_access_lock); return -EINVAL; + } qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA; qcedev_hash_init(&qcedev_areq, handle, &sg_src); err = qcedev_hash_update(&qcedev_areq, handle, &sg_src); - if (err) + if (err) { + mutex_unlock(&hash_access_lock); return err; + } err = qcedev_hash_final(&qcedev_areq, handle); - if (err) + if (err) { + mutex_unlock(&hash_access_lock); return err; + } qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen; memcpy(&qcedev_areq.sha_op_req.digest[0], &handle->sha_ctxt.digest[0], handle->sha_ctxt.diglen); + mutex_unlock(&hash_access_lock); if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req, sizeof(struct qcedev_sha_op_req))) return -EFAULT; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index cc1e16fd7e76..02a60a1df50d 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -89,6 +89,14 @@ struct detailed_mode_closure { #define LEVEL_GTF2 2 #define LEVEL_CVT 3 +/*Enum storing luminance types for HDR blocks in EDID*/ +enum luminance_value { + NO_LUMINANCE_DATA = 3, + MAXIMUM_LUMINANCE = 4, + FRAME_AVERAGE_LUMINANCE = 5, + MINIMUM_LUMINANCE = 6 +}; + static struct edid_quirk { char vendor[4]; int product_id; @@ -992,6 +1000,221 @@ static const struct drm_display_mode edid_cea_modes[] = { 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 65 - 1280x720@24Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 66 - 1280x720@25Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, + 3740, 3960, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 67 - 1280x720@30Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 68 - 1280x720@50Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 69 - 1280x720@60Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 70 - 1280x720@100Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, + 1760, 1980, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 71 - 1280x720@120Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, + 1430, 1650, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 72 - 1920x1080@24Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, + 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 73 - 1920x1080@25Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 74 - 1920x1080@30Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 75 - 1920x1080@50Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 76 - 1920x1080@60Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 77 - 1920x1080@100Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, + 2492, 2640, 0, 1080, 1084, 1094, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 78 - 1920x1080@120Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, + 2052, 2200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 79 - 1680x720@24Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040, + 3080, 3300, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 80 - 1680x720@25Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908, + 2948, 3168, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 81 - 1680x720@30Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380, + 2420, 2640, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 82 - 1680x720@50Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940, + 1980, 2200, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 83 - 1680x720@60Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940, + 1980, 2200, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 84 - 1680x720@100Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740, + 1780, 2000, 0, 720, 725, 730, 825, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 85 - 1680x720@120Hz */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740, + 1780, 2000, 0, 720, 725, 730, 825, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 86 - 2560x1080@24Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558, + 3602, 3750, 0, 1080, 1084, 1089, 1100, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 87 - 2560x1080@25Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008, + 3052, 3200, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 88 - 2560x1080@30Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328, + 3372, 3520, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 89 - 2560x1080@50Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108, + 3152, 3300, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 90 - 2560x1080@60Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808, + 2852, 3000, 0, 1080, 1084, 1089, 1100, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 91 - 2560x1080@100Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778, + 2822, 2970, 0, 1080, 1084, 1089, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 92 - 2560x1080@120Hz */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108, + 3152, 3300, 0, 1080, 1084, 1089, 1250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 93 - 3840x2160p@24Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9,}, + /* 94 - 3840x2160p@25Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9}, + /* 95 - 3840x2160p@30Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9}, + /* 96 - 3840x2160p@50Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9}, + /* 97 - 3840x2160p@60Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9}, + /* 98 - 4096x2160p@24Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135}, + /* 99 - 4096x2160p@25Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064, + 5152, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135}, + /* 100 - 4096x2160p@30Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184, + 4272, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135}, + /* 101 - 4096x2160p@50Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064, + 5152, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135}, + /* 102 - 4096x2160p@60Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184, + 4272, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135}, + /* 103 - 3840x2160p@24Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27}, + /* 104 - 3840x2160p@25Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27}, + /* 105 - 3840x2160p@30Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27}, + /* 106 - 3840x2160p@50Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27}, + /* 107 - 3840x2160p@60Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27}, }; /* @@ -2482,12 +2705,15 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, return closure.modes; } - +#define VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK 0x0 #define AUDIO_BLOCK 0x01 #define VIDEO_BLOCK 0x02 #define VENDOR_BLOCK 0x03 #define SPEAKER_BLOCK 0x04 +#define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06 +#define EXTENDED_TAG 0x07 #define VIDEO_CAPABILITY_BLOCK 0x07 +#define Y420_VIDEO_DATA_BLOCK 0x0E #define EDID_BASIC_AUDIO (1 << 6) #define EDID_CEA_YCRCB444 (1 << 5) #define EDID_CEA_YCRCB422 (1 << 4) @@ -3076,6 +3302,21 @@ static bool cea_db_is_hdmi_vsdb(const u8 *db) return hdmi_id == HDMI_IEEE_OUI; } +static bool cea_db_is_hdmi_hf_vsdb(const u8 *db) +{ + int hdmi_id; + + if (cea_db_tag(db) != VENDOR_BLOCK) + return false; + + if (cea_db_payload_len(db) < 7) + return false; + + hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); + + return hdmi_id == HDMI_IEEE_OUI_HF; +} + #define for_each_cea_db(cea, i, start, end) \ for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1) @@ -3199,6 +3440,258 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db) } static void +parse_hdmi_hf_vsdb(struct drm_connector *connector, const u8 *db) +{ + u8 len = cea_db_payload_len(db); + + if (len < 7) + return; + + if (db[4] != 1) + return; /* invalid version */ + + connector->max_tmds_char = db[5] * 5; + connector->scdc_present = db[6] & (1 << 7); + connector->rr_capable = db[6] & (1 << 6); + connector->flags_3d = db[6] & 0x7; + connector->supports_scramble = connector->scdc_present && + (db[6] & (1 << 3)); + + DRM_DEBUG_KMS("HDMI v2: max TMDS char %d, " + "scdc %s, " + "rr %s, " + "3D flags 0x%x, " + "scramble %s\n", + connector->max_tmds_char, + connector->scdc_present ? "available" : "not available", + connector->rr_capable ? "capable" : "not capable", + connector->flags_3d, + connector->supports_scramble ? + "supported" : "not supported"); +} + +static void +drm_hdmi_extract_vsdbs_info(struct drm_connector *connector, struct edid *edid) +{ + const u8 *cea = drm_find_cea_extension(edid); + const u8 *db = NULL; + + if (cea && cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) + return; + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + + if (cea_db_tag(db) == VENDOR_BLOCK) { + /* HDMI Vendor-Specific Data Block */ + if (cea_db_is_hdmi_vsdb(db)) + parse_hdmi_vsdb(connector, db); + /* HDMI Forum Vendor-Specific Data Block */ + else if (cea_db_is_hdmi_hf_vsdb(db)) + parse_hdmi_hf_vsdb(connector, db); + } + } + } +} + +/* + * drm_extract_vcdb_info - Parse the HDMI Video Capability Data Block + * @connector: connector corresponding to the HDMI sink + * @db: start of the CEA vendor specific block + * + * Parses the HDMI VCDB to extract sink info for @connector. + */ +static void +drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db) +{ + /* + * Check if the sink specifies underscan + * support for: + * BIT 5: preferred video format + * BIT 3: IT video format + * BIT 1: CE video format + */ + + connector->pt_scan_info = + (db[2] & (BIT(4) | BIT(5))) >> 4; + connector->it_scan_info = + (db[2] & (BIT(3) | BIT(2))) >> 2; + connector->ce_scan_info = + db[2] & (BIT(1) | BIT(0)); + + DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)", + (int) connector->pt_scan_info, + (int) connector->it_scan_info, + (int) connector->ce_scan_info); +} + +static bool drm_edid_is_luminance_value_present( +u32 block_length, enum luminance_value value) +{ + return block_length > NO_LUMINANCE_DATA && value <= block_length; +} + +/* + * drm_extract_hdr_db - Parse the HDMI HDR extended block + * @connector: connector corresponding to the HDMI sink + * @db: start of the HDMI HDR extended block + * + * Parses the HDMI HDR extended block to extract sink info for @connector. + */ +static void +drm_extract_hdr_db(struct drm_connector *connector, const u8 *db) +{ + + u8 len = 0; + + if (!db) { + DRM_ERROR("invalid db\n"); + return; + } + + len = db[0] & 0x1f; + /* Byte 3: Electro-Optical Transfer Functions */ + connector->hdr_eotf = db[2] & 0x3F; + + /* Byte 4: Static Metadata Descriptor Type 1 */ + connector->hdr_metadata_type_one = (db[3] & BIT(0)); + + /* Byte 5: Desired Content Maximum Luminance */ + if (drm_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE)) + connector->hdr_max_luminance = + db[MAXIMUM_LUMINANCE]; + + /* Byte 6: Desired Content Max Frame-average Luminance */ + if (drm_edid_is_luminance_value_present(len, FRAME_AVERAGE_LUMINANCE)) + connector->hdr_avg_luminance = + db[FRAME_AVERAGE_LUMINANCE]; + + /* Byte 7: Desired Content Min Luminance */ + if (drm_edid_is_luminance_value_present(len, MINIMUM_LUMINANCE)) + connector->hdr_min_luminance = + db[MINIMUM_LUMINANCE]; + + connector->hdr_supported = true; + + DRM_DEBUG_KMS("HDR electro-optical %d\n", connector->hdr_eotf); + DRM_DEBUG_KMS("metadata desc 1 %d\n", connector->hdr_metadata_type_one); + DRM_DEBUG_KMS("max luminance %d\n", connector->hdr_max_luminance); + DRM_DEBUG_KMS("avg luminance %d\n", connector->hdr_avg_luminance); + DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance); +} + +/* + * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks + * @connector: connector corresponding to the HDMI sink + * @edid: handle to the EDID structure + * Parses the all extended tag blocks extract sink info for @connector. + */ +static void +drm_hdmi_extract_extended_blk_info(struct drm_connector *connector, +struct edid *edid) +{ + const u8 *cea = drm_find_cea_extension(edid); + const u8 *db = NULL; + + if (cea && cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) + return; + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + + if (cea_db_tag(db) == EXTENDED_TAG) { + DRM_DEBUG_KMS("found extended tag block = %d\n", + db[1]); + switch (db[1]) { + case VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK: + drm_extract_vcdb_info(connector, db); + break; + case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK: + drm_extract_hdr_db(connector, db); + break; + default: + break; + } + } + } + } +} + +static u8 * +drm_edid_find_extended_tag_block(struct edid *edid, int blk_id) +{ + u8 *db = NULL; + u8 *cea = NULL; + + if (!edid) { + pr_err("%s: invalid input\n", __func__); + return NULL; + } + + cea = drm_find_cea_extension(edid); + + if (cea && cea_revision(cea) >= 3) { + int i, start, end; + + if (cea_db_offsets(cea, &start, &end)) + return NULL; + + for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + if ((cea_db_tag(db) == EXTENDED_TAG) && + (db[1] == blk_id)) + return db; + } + } + return NULL; +} + +/* + * add_YCbCr420VDB_modes - add the modes found in Ycbcr420 VDB block + * @connector: connector corresponding to the HDMI sink + * @edid: handle to the EDID structure + * Parses the YCbCr420 VDB block and adds the modes to @connector. + */ +static int +add_YCbCr420VDB_modes(struct drm_connector *connector, struct edid *edid) +{ + + const u8 *db = NULL; + u32 i = 0; + u32 modes = 0; + u32 video_format = 0; + u8 len = 0; + + /*Find the YCbCr420 VDB*/ + db = drm_edid_find_extended_tag_block(edid, Y420_VIDEO_DATA_BLOCK); + /* Offset to byte 3 */ + if (db) { + len = db[0] & 0x1F; + db += 2; + for (i = 0; i < len - 1; i++) { + struct drm_display_mode *mode; + + video_format = *(db + i) & 0x7F; + mode = drm_display_mode_from_vic_index(connector, + db, len-1, i); + if (mode) { + DRM_DEBUG_KMS("Adding mode for vic = %d\n", + video_format); + drm_mode_probed_add(connector, mode); + modes++; + } + } + } + return modes; +} + +static void monitor_name(struct detailed_timing *t, void *data) { if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME) @@ -3277,6 +3770,9 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) /* HDMI Vendor-Specific Data Block */ if (cea_db_is_hdmi_vsdb(db)) parse_hdmi_vsdb(connector, db); + /* HDMI Forum Vendor-Specific Data Block */ + else if (cea_db_is_hdmi_hf_vsdb(db)) + parse_hdmi_hf_vsdb(connector, db); break; default: break; @@ -3733,6 +4229,10 @@ static void drm_add_display_info(struct edid *edid, info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; } + /* Extract audio and video latency fields for the sink */ + drm_hdmi_extract_vsdbs_info(connector, edid); + /* Extract info from extended tag blocks */ + drm_hdmi_extract_extended_blk_info(connector, edid); /* HDMI deep color modes supported? Assign to info, if so */ drm_assign_hdmi_deep_color_info(edid, info, connector); @@ -3775,6 +4275,148 @@ static void drm_add_display_info(struct edid *edid, info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; } +static int validate_displayid(u8 *displayid, int length, int idx) +{ + int i; + u8 csum = 0; + struct displayid_hdr *base; + + base = (struct displayid_hdr *)&displayid[idx]; + + DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n", + base->rev, base->bytes, base->prod_id, base->ext_count); + + if (base->bytes + 5 > length - idx) + return -EINVAL; + for (i = idx; i <= base->bytes + 5; i++) + csum += displayid[i]; + + if (csum) { + DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", + csum); + return -EINVAL; + } + return 0; +} + +static struct drm_display_mode * +drm_mode_displayid_detailed(struct drm_device *dev, +struct displayid_detailed_timings_1 *timings) +{ + struct drm_display_mode *mode; + unsigned pixel_clock = (timings->pixel_clock[0] | + (timings->pixel_clock[1] << 8) | + (timings->pixel_clock[2] << 16)); + unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1; + unsigned hblank = + (timings->hblank[0] | + timings->hblank[1] << 8) + 1; + unsigned hsync = (timings->hsync[0] | + (timings->hsync[1] & 0x7f) << 8) + 1; + unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1; + unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1; + unsigned vblank = + (timings->vblank[0] | + timings->vblank[1] << 8) + 1; + unsigned vsync = + (timings->vsync[0] | + (timings->vsync[1] & 0x7f) << 8) + 1; + unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1; + bool hsync_positive = (timings->hsync[1] >> 7) & 0x1; + bool vsync_positive = (timings->vsync[1] >> 7) & 0x1; + + mode = drm_mode_create(dev); + if (!mode) + return NULL; + + mode->clock = pixel_clock * 10; + mode->hdisplay = hactive; + mode->hsync_start = mode->hdisplay + hsync; + mode->hsync_end = mode->hsync_start + hsync_width; + mode->htotal = mode->hdisplay + hblank; + + mode->vdisplay = vactive; + mode->vsync_start = mode->vdisplay + vsync; + mode->vsync_end = mode->vsync_start + vsync_width; + mode->vtotal = mode->vdisplay + vblank; + + mode->flags = 0; + mode->flags |= hsync_positive ? + DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC; + mode->flags |= vsync_positive ? + DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC; + mode->type = DRM_MODE_TYPE_DRIVER; + + if (timings->flags & 0x80) + mode->type |= DRM_MODE_TYPE_PREFERRED; + mode->vrefresh = drm_mode_vrefresh(mode); + drm_mode_set_name(mode); + + return mode; +} + +static int add_displayid_detailed_1_modes(struct drm_connector *connector, + struct displayid_block *block) +{ + struct displayid_detailed_timing_block *det = + (struct displayid_detailed_timing_block *)block; + int i; + int num_timings; + struct drm_display_mode *newmode; + int num_modes = 0; + /* blocks must be multiple of 20 bytes length */ + if (block->num_bytes % 20) + return 0; + + num_timings = block->num_bytes / 20; + for (i = 0; i < num_timings; i++) { + struct displayid_detailed_timings_1 *timings = &det->timings[i]; + + newmode = drm_mode_displayid_detailed(connector->dev, timings); + if (!newmode) + continue; + + drm_mode_probed_add(connector, newmode); + num_modes++; + } + return num_modes; +} + +static int add_displayid_detailed_modes(struct drm_connector *connector, + struct edid *edid) +{ + u8 *displayid; + int ret; + int idx = 1; + int length = EDID_LENGTH; + struct displayid_block *block; + int num_modes = 0; + + displayid = drm_find_displayid_extension(edid); + if (!displayid) + return 0; + + ret = validate_displayid(displayid, length, idx); + if (ret) + return 0; + + idx += sizeof(struct displayid_hdr); + while (block = (struct displayid_block *)&displayid[idx], + idx + sizeof(struct displayid_block) <= length && + idx + sizeof(struct displayid_block) + + block->num_bytes <= length && + block->num_bytes > 0) { + idx += block->num_bytes + sizeof(struct displayid_block); + switch (block->tag) { + case DATA_BLOCK_TYPE_1_DETAILED_TIMING: + num_modes += add_displayid_detailed_1_modes(connector, + block); + break; + } + } + return num_modes; +} + /** * drm_add_edid_modes - add modes from EDID data, if available * @connector: connector we're probing @@ -3820,6 +4462,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) num_modes += add_established_modes(connector, edid); num_modes += add_cea_modes(connector, edid); num_modes += add_alternate_cea_modes(connector, edid); + num_modes += add_displayid_detailed_modes(connector, edid); + num_modes += add_YCbCr420VDB_modes(connector, edid); if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) num_modes += add_inferred_modes(connector, edid); @@ -4029,96 +4673,105 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, } EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode); +static int drm_parse_tiled_block(struct drm_connector *connector, + struct displayid_block *block) +{ + struct displayid_tiled_block *tile = + (struct displayid_tiled_block *)block; + u16 w, h; + u8 tile_v_loc, tile_h_loc; + u8 num_v_tile, num_h_tile; + struct drm_tile_group *tg; + + w = tile->tile_size[0] | tile->tile_size[1] << 8; + h = tile->tile_size[2] | tile->tile_size[3] << 8; + + num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30); + num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30); + tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4); + tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4); + + connector->has_tile = true; + if (tile->tile_cap & 0x80) + connector->tile_is_single_monitor = true; + + connector->num_h_tile = num_h_tile + 1; + connector->num_v_tile = num_v_tile + 1; + connector->tile_h_loc = tile_h_loc; + connector->tile_v_loc = tile_v_loc; + connector->tile_h_size = w + 1; + connector->tile_v_size = h + 1; + + DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap); + DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1); + DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n", + num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc); + DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], + tile->topology_id[1], tile->topology_id[2]); + + tg = drm_mode_get_tile_group(connector->dev, tile->topology_id); + if (!tg) + tg = drm_mode_create_tile_group(connector->dev, + tile->topology_id); + + if (!tg) + return -ENOMEM; + + if (connector->tile_group != tg) { + /* if we haven't got a pointer, + * take the reference, drop ref to old tile group + */ + if (connector->tile_group) + drm_mode_put_tile_group(connector->dev, + connector->tile_group); + + connector->tile_group = tg; + } else + /* if same tile group, then release the ref we just took. */ + drm_mode_put_tile_group(connector->dev, tg); + return 0; +} + static int drm_parse_display_id(struct drm_connector *connector, u8 *displayid, int length, bool is_edid_extension) { /* if this is an EDID extension the first byte will be 0x70 */ int idx = 0; - struct displayid_hdr *base; struct displayid_block *block; - u8 csum = 0; - int i; + int ret; if (is_edid_extension) idx = 1; - base = (struct displayid_hdr *)&displayid[idx]; - - DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n", - base->rev, base->bytes, base->prod_id, base->ext_count); - - if (base->bytes + 5 > length - idx) - return -EINVAL; - - for (i = idx; i <= base->bytes + 5; i++) { - csum += displayid[i]; - } - if (csum) { - DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum); - return -EINVAL; - } + ret = validate_displayid(displayid, length, idx); + if (ret) + return ret; - block = (struct displayid_block *)&displayid[idx + 4]; - DRM_DEBUG_KMS("block id %d, rev %d, len %d\n", - block->tag, block->rev, block->num_bytes); - - switch (block->tag) { - case DATA_BLOCK_TILED_DISPLAY: { - struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block; - - u16 w, h; - u8 tile_v_loc, tile_h_loc; - u8 num_v_tile, num_h_tile; - struct drm_tile_group *tg; - - w = tile->tile_size[0] | tile->tile_size[1] << 8; - h = tile->tile_size[2] | tile->tile_size[3] << 8; - - num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30); - num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30); - tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4); - tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4); - - connector->has_tile = true; - if (tile->tile_cap & 0x80) - connector->tile_is_single_monitor = true; - - connector->num_h_tile = num_h_tile + 1; - connector->num_v_tile = num_v_tile + 1; - connector->tile_h_loc = tile_h_loc; - connector->tile_v_loc = tile_v_loc; - connector->tile_h_size = w + 1; - connector->tile_v_size = h + 1; - - DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap); - DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1); - DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n", - num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc); - DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]); - - tg = drm_mode_get_tile_group(connector->dev, tile->topology_id); - if (!tg) { - tg = drm_mode_create_tile_group(connector->dev, tile->topology_id); + idx += sizeof(struct displayid_hdr); + while (block = (struct displayid_block *)&displayid[idx], + idx + sizeof(struct displayid_block) <= length && + idx + sizeof(struct displayid_block) + + block->num_bytes <= length && + block->num_bytes > 0) { + idx += block->num_bytes + sizeof(struct displayid_block); + DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n", + block->tag, block->rev, block->num_bytes); + + switch (block->tag) { + case DATA_BLOCK_TILED_DISPLAY: + ret = drm_parse_tiled_block(connector, block); + if (ret) + return ret; + break; + case DATA_BLOCK_TYPE_1_DETAILED_TIMING: + /* handled in mode gathering code. */ + break; + default: + DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", + block->tag); + break; } - if (!tg) - return -ENOMEM; - - if (connector->tile_group != tg) { - /* if we haven't got a pointer, - take the reference, drop ref to old tile group */ - if (connector->tile_group) { - drm_mode_put_tile_group(connector->dev, connector->tile_group); - } - connector->tile_group = tg; - } else - /* if same tile group, then release the ref we just took. */ - drm_mode_put_tile_group(connector->dev, tg); - } - break; - default: - printk("unknown displayid tag %d\n", block->tag); - break; } return 0; } diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index ebf8be80a3d9..3d0617dbc514 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -49,6 +49,7 @@ msm_drm-y := \ sde/sde_vbif.o \ sde_dbg_evtlog.o \ sde_io_util.o \ + sde_edid_parser.o # use drm gpu driver only if qcom_kgsl driver not available ifneq ($(CONFIG_QCOM_KGSL),y) @@ -102,7 +103,6 @@ msm_drm-$(CONFIG_DRM_SDE_HDMI) += \ hdmi-staging/sde_hdmi.o \ hdmi-staging/sde_hdmi_bridge.o \ hdmi-staging/sde_hdmi_audio.o \ - hdmi-staging/sde_hdmi_edid.o msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \ dsi/pll/dsi_pll_28nm.o diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 02c4f2e3155d..32b2c7fab839 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -613,7 +613,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */ gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, - REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000); + REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000); gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, @@ -1039,8 +1039,10 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu) if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) a5xx_gpmu_err_irq(gpu); - if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) + if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) { + a5xx_preempt_trigger(gpu); msm_gpu_retire(gpu); + } if (status & A5XX_RBBM_INT_0_MASK_CP_SW) a5xx_preempt_irq(gpu); diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c index 347b78886b24..6a6d02c5444d 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c @@ -32,6 +32,22 @@ static DEFINE_MUTEX(sde_hdmi_list_lock); static LIST_HEAD(sde_hdmi_list); +/* HDMI SCDC register offsets */ +#define HDMI_SCDC_UPDATE_0 0x10 +#define HDMI_SCDC_UPDATE_1 0x11 +#define HDMI_SCDC_TMDS_CONFIG 0x20 +#define HDMI_SCDC_SCRAMBLER_STATUS 0x21 +#define HDMI_SCDC_CONFIG_0 0x30 +#define HDMI_SCDC_STATUS_FLAGS_0 0x40 +#define HDMI_SCDC_STATUS_FLAGS_1 0x41 +#define HDMI_SCDC_ERR_DET_0_L 0x50 +#define HDMI_SCDC_ERR_DET_0_H 0x51 +#define HDMI_SCDC_ERR_DET_1_L 0x52 +#define HDMI_SCDC_ERR_DET_1_H 0x53 +#define HDMI_SCDC_ERR_DET_2_L 0x54 +#define HDMI_SCDC_ERR_DET_2_H 0x55 +#define HDMI_SCDC_ERR_DET_CHECKSUM 0x56 + static const struct of_device_id sde_hdmi_dt_match[] = { {.compatible = "qcom,hdmi-display"}, {} @@ -69,16 +85,328 @@ static ssize_t _sde_hdmi_debugfs_dump_info_read(struct file *file, return len; } +static ssize_t _sde_hdmi_debugfs_edid_modes_read(struct file *file, + char __user *buff, + size_t count, + loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char *buf; + u32 len = 0; + struct drm_connector *connector; + u32 mode_count = 0; + struct drm_display_mode *mode; + + if (!display) + return -ENODEV; + + if (!display->ctrl.ctrl || + !display->ctrl.ctrl->connector) { + SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n", + display); + return -ENOMEM; + } + + if (*ppos) + return 0; + + connector = display->ctrl.ctrl->connector; + + list_for_each_entry(mode, &connector->modes, head) { + mode_count++; + } + + /* Adding one more to store title */ + mode_count++; + + buf = kzalloc((mode_count * sizeof(*mode)), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len += snprintf(buf + len, PAGE_SIZE - len, + "name refresh (Hz) hdisp hss hse htot vdisp"); + + len += snprintf(buf + len, PAGE_SIZE - len, + " vss vse vtot flags\n"); + + list_for_each_entry(mode, &connector->modes, head) { + len += snprintf(buf + len, SZ_4K - len, + "%s %d %d %d %d %d %d %d %d %d 0x%x\n", + mode->name, mode->vrefresh, mode->hdisplay, + mode->hsync_start, mode->hsync_end, mode->htotal, + mode->vdisplay, mode->vsync_start, mode->vsync_end, + mode->vtotal, mode->flags); + } + + if (copy_to_user(buff, buf, len)) { + kfree(buf); + return -EFAULT; + } + + *ppos += len; + + kfree(buf); + return len; +} + +static ssize_t _sde_hdmi_debugfs_edid_vsdb_info_read(struct file *file, + char __user *buff, + size_t count, + loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char buf[200]; + u32 len = 0; + struct drm_connector *connector; + + if (!display) + return -ENODEV; + + if (!display->ctrl.ctrl || + !display->ctrl.ctrl->connector) { + SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n", + display); + return -ENOMEM; + } + + SDE_HDMI_DEBUG("%s +", __func__); + if (*ppos) + return 0; + + connector = display->ctrl.ctrl->connector; + len += snprintf(buf + len, sizeof(buf) - len, + "max_tmds_clock = %d\n", + connector->max_tmds_clock); + len += snprintf(buf + len, sizeof(buf) - len, + "latency_present %d %d\n", + connector->latency_present[0], + connector->latency_present[1]); + len += snprintf(buf + len, sizeof(buf) - len, + "video_latency %d %d\n", + connector->video_latency[0], + connector->video_latency[1]); + len += snprintf(buf + len, sizeof(buf) - len, + "audio_latency %d %d\n", + connector->audio_latency[0], + connector->audio_latency[1]); + len += snprintf(buf + len, sizeof(buf) - len, + "dvi_dual %d\n", + (int)connector->dvi_dual); + + if (copy_to_user(buff, buf, len)) + return -EFAULT; + + *ppos += len; + SDE_HDMI_DEBUG("%s - ", __func__); + return len; +} + +static ssize_t _sde_hdmi_debugfs_edid_hdr_info_read(struct file *file, + char __user *buff, + size_t count, + loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char buf[200]; + u32 len = 0; + struct drm_connector *connector; + + if (!display) + return -ENODEV; + + if (!display->ctrl.ctrl || + !display->ctrl.ctrl->connector) { + SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n", + display); + return -ENOMEM; + } + + SDE_HDMI_DEBUG("%s +", __func__); + if (*ppos) + return 0; + + connector = display->ctrl.ctrl->connector; + len += snprintf(buf, sizeof(buf), "hdr_eotf = %d\n" + "hdr_metadata_type_one %d\n" + "hdr_max_luminance %d\n" + "hdr_avg_luminance %d\n" + "hdr_min_luminance %d\n" + "hdr_supported %d\n", + connector->hdr_eotf, + connector->hdr_metadata_type_one, + connector->hdr_max_luminance, + connector->hdr_avg_luminance, + connector->hdr_min_luminance, + (int)connector->hdr_supported); + + if (copy_to_user(buff, buf, len)) + return -EFAULT; + + *ppos += len; + SDE_HDMI_DEBUG("%s - ", __func__); + return len; +} + +static ssize_t _sde_hdmi_debugfs_edid_hfvsdb_info_read(struct file *file, + char __user *buff, + size_t count, + loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char buf[200]; + u32 len = 0; + struct drm_connector *connector; + + if (!display) + return -ENODEV; + + if (!display->ctrl.ctrl || + !display->ctrl.ctrl->connector) { + SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n", + display); + return -ENOMEM; + } + + SDE_HDMI_DEBUG("%s +", __func__); + if (*ppos) + return 0; + + connector = display->ctrl.ctrl->connector; + len += snprintf(buf, PAGE_SIZE - len, "max_tmds_char = %d\n" + "scdc_present %d\n" + "rr_capable %d\n" + "supports_scramble %d\n" + "flags_3d %d\n", + connector->max_tmds_char, + (int)connector->scdc_present, + (int)connector->rr_capable, + (int)connector->supports_scramble, + connector->flags_3d); + + if (copy_to_user(buff, buf, len)) + return -EFAULT; + + *ppos += len; + return len; +} + +static ssize_t _sde_hdmi_debugfs_edid_vcdb_info_read(struct file *file, + char __user *buff, + size_t count, + loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char buf[100]; + u32 len = 0; + struct drm_connector *connector; + + if (!display) + return -ENODEV; + + if (!display->ctrl.ctrl || + !display->ctrl.ctrl->connector) { + SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n", + display); + return -ENOMEM; + } + + SDE_HDMI_DEBUG("%s +", __func__); + if (*ppos) + return 0; + + connector = display->ctrl.ctrl->connector; + len += snprintf(buf, PAGE_SIZE - len, "pt_scan_info = %d\n" + "it_scan_info = %d\n" + "ce_scan_info = %d\n", + (int)connector->pt_scan_info, + (int)connector->it_scan_info, + (int)connector->ce_scan_info); + + if (copy_to_user(buff, buf, len)) + return -EFAULT; + + *ppos += len; + SDE_HDMI_DEBUG("%s - ", __func__); + return len; +} + +static ssize_t _sde_hdmi_edid_vendor_name_read(struct file *file, + char __user *buff, + size_t count, + loff_t *ppos) +{ + struct sde_hdmi *display = file->private_data; + char buf[100]; + u32 len = 0; + struct drm_connector *connector; + + if (!display) + return -ENODEV; + + if (!display->ctrl.ctrl || + !display->ctrl.ctrl->connector) { + SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n", + display); + return -ENOMEM; + } + + SDE_HDMI_DEBUG("%s +", __func__); + if (*ppos) + return 0; + + connector = display->ctrl.ctrl->connector; + len += snprintf(buf, PAGE_SIZE - len, "Vendor ID is %s\n", + display->edid_ctrl->vendor_id); + + if (copy_to_user(buff, buf, len)) + return -EFAULT; + + *ppos += len; + SDE_HDMI_DEBUG("%s - ", __func__); + return len; +} static const struct file_operations dump_info_fops = { .open = simple_open, .read = _sde_hdmi_debugfs_dump_info_read, }; +static const struct file_operations edid_modes_fops = { + .open = simple_open, + .read = _sde_hdmi_debugfs_edid_modes_read, +}; + +static const struct file_operations edid_vsdb_info_fops = { + .open = simple_open, + .read = _sde_hdmi_debugfs_edid_vsdb_info_read, +}; + +static const struct file_operations edid_hdr_info_fops = { + .open = simple_open, + .read = _sde_hdmi_debugfs_edid_hdr_info_read, +}; + +static const struct file_operations edid_hfvsdb_info_fops = { + .open = simple_open, + .read = _sde_hdmi_debugfs_edid_hfvsdb_info_read, +}; + +static const struct file_operations edid_vcdb_info_fops = { + .open = simple_open, + .read = _sde_hdmi_debugfs_edid_vcdb_info_read, +}; + +static const struct file_operations edid_vendor_name_fops = { + .open = simple_open, + .read = _sde_hdmi_edid_vendor_name_read, +}; + static int _sde_hdmi_debugfs_init(struct sde_hdmi *display) { int rc = 0; - struct dentry *dir, *dump_file; + struct dentry *dir, *dump_file, *edid_modes; + struct dentry *edid_vsdb_info, *edid_hdr_info, *edid_hfvsdb_info; + struct dentry *edid_vcdb_info, *edid_vendor_name; dir = debugfs_create_dir(display->name, NULL); if (!dir) { @@ -100,6 +428,83 @@ static int _sde_hdmi_debugfs_init(struct sde_hdmi *display) goto error_remove_dir; } + edid_modes = debugfs_create_file("edid_modes", + 0444, + dir, + display, + &edid_modes_fops); + + if (IS_ERR_OR_NULL(edid_modes)) { + rc = PTR_ERR(edid_modes); + SDE_ERROR("[%s]debugfs create file failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + + edid_vsdb_info = debugfs_create_file("edid_vsdb_info", + 0444, + dir, + display, + &edid_vsdb_info_fops); + + if (IS_ERR_OR_NULL(edid_vsdb_info)) { + rc = PTR_ERR(edid_vsdb_info); + SDE_ERROR("[%s]debugfs create file failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + + edid_hdr_info = debugfs_create_file("edid_hdr_info", + 0444, + dir, + display, + &edid_hdr_info_fops); + if (IS_ERR_OR_NULL(edid_hdr_info)) { + rc = PTR_ERR(edid_hdr_info); + SDE_ERROR("[%s]debugfs create file failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + + edid_hfvsdb_info = debugfs_create_file("edid_hfvsdb_info", + 0444, + dir, + display, + &edid_hfvsdb_info_fops); + + if (IS_ERR_OR_NULL(edid_hfvsdb_info)) { + rc = PTR_ERR(edid_hfvsdb_info); + SDE_ERROR("[%s]debugfs create file failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + + edid_vcdb_info = debugfs_create_file("edid_vcdb_info", + 0444, + dir, + display, + &edid_vcdb_info_fops); + + if (IS_ERR_OR_NULL(edid_vcdb_info)) { + rc = PTR_ERR(edid_vcdb_info); + SDE_ERROR("[%s]debugfs create file failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + + edid_vendor_name = debugfs_create_file("edid_vendor_name", + 0444, + dir, + display, + &edid_vendor_name_fops); + + if (IS_ERR_OR_NULL(edid_vendor_name)) { + rc = PTR_ERR(edid_vendor_name); + SDE_ERROR("[%s]debugfs create file failed, rc=%d\n", + display->name, rc); + goto error_remove_dir; + } + display->root = dir; return rc; error_remove_dir: @@ -395,20 +800,28 @@ static void _sde_hdmi_hotplug_work(struct work_struct *work) struct sde_hdmi *sde_hdmi = container_of(work, struct sde_hdmi, hpd_work); struct drm_connector *connector; + struct hdmi *hdmi = NULL; + u32 hdmi_ctrl; if (!sde_hdmi || !sde_hdmi->ctrl.ctrl || - !sde_hdmi->ctrl.ctrl->connector) { + !sde_hdmi->ctrl.ctrl->connector || + !sde_hdmi->edid_ctrl) { SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n", sde_hdmi); return; } - + hdmi = sde_hdmi->ctrl.ctrl; connector = sde_hdmi->ctrl.ctrl->connector; - if (sde_hdmi->connected) - sde_hdmi_get_edid(connector, sde_hdmi); - else - sde_hdmi_free_edid(sde_hdmi); + if (sde_hdmi->connected) { + hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL); + hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE); + sde_get_edid(connector, hdmi->i2c, + (void **)&sde_hdmi->edid_ctrl); + hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); + hdmi->hdmi_mode = sde_detect_hdmi_monitor(sde_hdmi->edid_ctrl); + } else + sde_free_edid((void **)&sde_hdmi->edid_ctrl); sde_hdmi_notify_clients(connector, sde_hdmi->connected); drm_helper_hpd_irq_event(connector->dev); @@ -431,7 +844,7 @@ static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi) hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, HDMI_HPD_INT_CTRL_INT_ACK); - DRM_DEBUG("status=%04x, ctrl=%04x", hpd_int_status, + SDE_HDMI_DEBUG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl); /* detect disconnect if we are connected or visa versa: */ @@ -504,11 +917,11 @@ static int _sde_hdmi_get_audio_edid_blk(struct platform_device *pdev, return -ENODEV; } - blk->audio_data_blk = display->edid.audio_data_block; - blk->audio_data_blk_size = display->edid.adb_size; + blk->audio_data_blk = display->edid_ctrl->audio_data_block; + blk->audio_data_blk_size = display->edid_ctrl->adb_size; - blk->spk_alloc_data_blk = display->edid.spkr_alloc_data_block; - blk->spk_alloc_data_blk_size = display->edid.sadb_size; + blk->spk_alloc_data_blk = display->edid_ctrl->spkr_alloc_data_block; + blk->spk_alloc_data_blk_size = display->edid_ctrl->sadb_size; return 0; } @@ -634,10 +1047,249 @@ void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on) hdmi_write(hdmi, REG_HDMI_CTRL, ctrl); spin_unlock_irqrestore(&hdmi->reg_lock, flags); - DRM_DEBUG("HDMI Core: %s, HDMI_CTRL=0x%08x\n", + SDE_HDMI_DEBUG("HDMI Core: %s, HDMI_CTRL=0x%08x\n", power_on ? "Enable" : "Disable", ctrl); } +int sde_hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset, + u8 *data, u16 data_len) +{ + int rc; + int retry = 5; + struct i2c_msg msgs[] = { + { + .addr = addr >> 1, + .flags = 0, + .len = 1, + .buf = &offset, + }, { + .addr = addr >> 1, + .flags = I2C_M_RD, + .len = data_len, + .buf = data, + } + }; + + SDE_HDMI_DEBUG("Start DDC read"); + retry: + rc = i2c_transfer(hdmi->i2c, msgs, 2); + + retry--; + if (rc == 2) + rc = 0; + else if (retry > 0) + goto retry; + else + rc = -EIO; + + SDE_HDMI_DEBUG("End DDC read %d", rc); + + return rc; +} + +#define DDC_WRITE_MAX_BYTE_NUM 32 + +int sde_hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset, + u8 *data, u16 data_len) +{ + int rc; + int retry = 10; + u8 buf[DDC_WRITE_MAX_BYTE_NUM]; + struct i2c_msg msgs[] = { + { + .addr = addr >> 1, + .flags = 0, + .len = 1, + } + }; + + SDE_HDMI_DEBUG("Start DDC write"); + if (data_len > (DDC_WRITE_MAX_BYTE_NUM - 1)) { + SDE_ERROR("%s: write size too big\n", __func__); + return -ERANGE; + } + + buf[0] = offset; + memcpy(&buf[1], data, data_len); + msgs[0].buf = buf; + msgs[0].len = data_len + 1; + retry: + rc = i2c_transfer(hdmi->i2c, msgs, 1); + + retry--; + if (rc == 1) + rc = 0; + else if (retry > 0) + goto retry; + else + rc = -EIO; + + SDE_HDMI_DEBUG("End DDC write %d", rc); + + return rc; +} + +int sde_hdmi_scdc_read(struct hdmi *hdmi, u32 data_type, u32 *val) +{ + int rc = 0; + u8 data_buf[2] = {0}; + u16 dev_addr, data_len; + u8 offset; + + if (!hdmi || !hdmi->i2c || !val) { + SDE_ERROR("Bad Parameters\n"); + return -EINVAL; + } + + if (data_type >= HDMI_TX_SCDC_MAX) { + SDE_ERROR("Unsupported data type\n"); + return -EINVAL; + } + + dev_addr = 0xA8; + + switch (data_type) { + case HDMI_TX_SCDC_SCRAMBLING_STATUS: + data_len = 1; + offset = HDMI_SCDC_SCRAMBLER_STATUS; + break; + case HDMI_TX_SCDC_SCRAMBLING_ENABLE: + case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE: + data_len = 1; + offset = HDMI_SCDC_TMDS_CONFIG; + break; + case HDMI_TX_SCDC_CLOCK_DET_STATUS: + case HDMI_TX_SCDC_CH0_LOCK_STATUS: + case HDMI_TX_SCDC_CH1_LOCK_STATUS: + case HDMI_TX_SCDC_CH2_LOCK_STATUS: + data_len = 1; + offset = HDMI_SCDC_STATUS_FLAGS_0; + break; + case HDMI_TX_SCDC_CH0_ERROR_COUNT: + data_len = 2; + offset = HDMI_SCDC_ERR_DET_0_L; + break; + case HDMI_TX_SCDC_CH1_ERROR_COUNT: + data_len = 2; + offset = HDMI_SCDC_ERR_DET_1_L; + break; + case HDMI_TX_SCDC_CH2_ERROR_COUNT: + data_len = 2; + offset = HDMI_SCDC_ERR_DET_2_L; + break; + case HDMI_TX_SCDC_READ_ENABLE: + data_len = 1; + offset = HDMI_SCDC_CONFIG_0; + break; + default: + break; + } + + rc = sde_hdmi_ddc_read(hdmi, dev_addr, offset, data_buf, data_len); + if (rc) { + SDE_ERROR("DDC Read failed for %d\n", data_type); + return rc; + } + + switch (data_type) { + case HDMI_TX_SCDC_SCRAMBLING_STATUS: + *val = (data_buf[0] & BIT(0)) ? 1 : 0; + break; + case HDMI_TX_SCDC_SCRAMBLING_ENABLE: + *val = (data_buf[0] & BIT(0)) ? 1 : 0; + break; + case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE: + *val = (data_buf[0] & BIT(1)) ? 1 : 0; + break; + case HDMI_TX_SCDC_CLOCK_DET_STATUS: + *val = (data_buf[0] & BIT(0)) ? 1 : 0; + break; + case HDMI_TX_SCDC_CH0_LOCK_STATUS: + *val = (data_buf[0] & BIT(1)) ? 1 : 0; + break; + case HDMI_TX_SCDC_CH1_LOCK_STATUS: + *val = (data_buf[0] & BIT(2)) ? 1 : 0; + break; + case HDMI_TX_SCDC_CH2_LOCK_STATUS: + *val = (data_buf[0] & BIT(3)) ? 1 : 0; + break; + case HDMI_TX_SCDC_CH0_ERROR_COUNT: + case HDMI_TX_SCDC_CH1_ERROR_COUNT: + case HDMI_TX_SCDC_CH2_ERROR_COUNT: + if (data_buf[1] & BIT(7)) + *val = (data_buf[0] | ((data_buf[1] & 0x7F) << 8)); + else + *val = 0; + break; + case HDMI_TX_SCDC_READ_ENABLE: + *val = (data_buf[0] & BIT(0)) ? 1 : 0; + break; + default: + break; + } + + return 0; +} + +int sde_hdmi_scdc_write(struct hdmi *hdmi, u32 data_type, u32 val) +{ + int rc = 0; + u8 data_buf[2] = {0}; + u8 read_val = 0; + u16 dev_addr, data_len; + u8 offset; + + if (!hdmi || !hdmi->i2c) { + SDE_ERROR("Bad Parameters\n"); + return -EINVAL; + } + + if (data_type >= HDMI_TX_SCDC_MAX) { + SDE_ERROR("Unsupported data type\n"); + return -EINVAL; + } + + dev_addr = 0xA8; + + switch (data_type) { + case HDMI_TX_SCDC_SCRAMBLING_ENABLE: + case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE: + dev_addr = 0xA8; + data_len = 1; + offset = HDMI_SCDC_TMDS_CONFIG; + rc = sde_hdmi_ddc_read(hdmi, dev_addr, offset, &read_val, + data_len); + if (rc) { + SDE_ERROR("scdc read failed\n"); + return rc; + } + if (data_type == HDMI_TX_SCDC_SCRAMBLING_ENABLE) { + data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(0))) | + ((u8)(val & BIT(0)))); + } else { + data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(1))) | + (((u8)(val & BIT(0))) << 1)); + } + break; + case HDMI_TX_SCDC_READ_ENABLE: + data_len = 1; + offset = HDMI_SCDC_CONFIG_0; + data_buf[0] = (u8)(val & 0x1); + break; + default: + SDE_ERROR("Cannot write to read only reg (%d)\n", + data_type); + return -EINVAL; + } + + rc = sde_hdmi_ddc_write(hdmi, dev_addr, offset, data_buf, data_len); + if (rc) { + SDE_ERROR("DDC Read failed for %d\n", data_type); + return rc; + } + return 0; +} + int sde_hdmi_get_info(struct msm_display_info *info, void *display) { @@ -746,7 +1398,7 @@ int sde_hdmi_connector_post_init(struct drm_connector *connector, if (info) sde_kms_info_add_keystr(info, - "DISPLAY_TYPE", + "display type", sde_hdmi->display_type); hdmi->connector = connector; @@ -775,8 +1427,6 @@ sde_hdmi_connector_detect(struct drm_connector *connector, return status; } - SDE_DEBUG("\n"); - /* get display dsi_info */ memset(&info, 0x0, sizeof(info)); rc = sde_hdmi_get_info(&info, display); @@ -797,25 +1447,6 @@ sde_hdmi_connector_detect(struct drm_connector *connector, return status; } -int _sde_hdmi_update_modes(struct drm_connector *connector, - struct sde_hdmi *display) -{ - int rc = 0; - struct hdmi_edid_ctrl *edid_ctrl = &display->edid; - - if (edid_ctrl->edid) { - drm_mode_connector_update_edid_property(connector, - edid_ctrl->edid); - - rc = drm_add_edid_modes(connector, edid_ctrl->edid); - return rc; - } - - drm_mode_connector_update_edid_property(connector, NULL); - - return rc; -} - int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display) { struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display; @@ -828,8 +1459,6 @@ int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display) return 0; } - SDE_DEBUG("\n"); - if (hdmi_display->non_pluggable) { list_for_each_entry(mode, &hdmi_display->mode_list, head) { m = drm_mode_duplicate(connector->dev, mode); @@ -842,7 +1471,9 @@ int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display) } ret = hdmi_display->num_of_modes; } else { - ret = _sde_hdmi_update_modes(connector, display); + /* pluggable case assumes EDID is read when HPD */ + ret = _sde_edid_update_modes(connector, + hdmi_display->edid_ctrl); } return ret; @@ -864,8 +1495,6 @@ enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector, return 0; } - SDE_DEBUG("\n"); - hdmi = hdmi_display->ctrl.ctrl; priv = connector->dev->dev_private; kms = priv->kms; @@ -873,7 +1502,7 @@ enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector, actual = kms->funcs->round_pixclk(kms, requested, hdmi->encoder); - SDE_DEBUG("requested=%ld, actual=%ld", requested, actual); + SDE_HDMI_DEBUG("requested=%ld, actual=%ld", requested, actual); if (actual != requested) return MODE_CLOCK_RANGE; @@ -909,7 +1538,7 @@ static int sde_hdmi_bind(struct device *dev, struct device *master, void *data) struct msm_drm_private *priv = NULL; struct platform_device *pdev = to_platform_device(dev); - SDE_ERROR("E\n"); + SDE_HDMI_DEBUG(" %s +\n", __func__); if (!dev || !pdev || !master) { pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n", dev, pdev, master); @@ -941,18 +1570,20 @@ static int sde_hdmi_bind(struct device *dev, struct device *master, void *data) goto error; } - rc = sde_hdmi_edid_init(display); - if (rc) { - SDE_ERROR("[%s]Ext Disp init failed, rc=%d\n", - display->name, rc); + display->edid_ctrl = sde_edid_init(); + if (!display->edid_ctrl) { + SDE_ERROR("[%s]sde edid init failed\n", + display->name); + rc = -ENOMEM; goto error; } display_ctrl = &display->ctrl; display_ctrl->ctrl = priv->hdmi; - SDE_ERROR("display_ctrl->ctrl=%p\n", display_ctrl->ctrl); display->drm_dev = drm; + mutex_unlock(&display->display_lock); + return rc; error: (void)_sde_hdmi_debugfs_deinit(display); debug_error: @@ -978,7 +1609,7 @@ static void sde_hdmi_unbind(struct device *dev, struct device *master, } mutex_lock(&display->display_lock); (void)_sde_hdmi_debugfs_deinit(display); - (void)sde_hdmi_edid_deinit(display); + (void)sde_edid_deinit((void **)&display->edid_ctrl); display->drm_dev = NULL; mutex_unlock(&display->display_lock); } diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h index 869d1bebf9db..bb3061a6ed00 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h @@ -25,9 +25,13 @@ #include <drm/drm_crtc.h> #include "hdmi.h" -#define MAX_NUMBER_ADB 5 -#define MAX_AUDIO_DATA_BLOCK_SIZE 30 -#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3 +#include "sde_edid_parser.h" + +#ifdef HDMI_DEBUG_ENABLE +#define SDE_HDMI_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args) +#else +#define SDE_HDMI_DEBUG(fmt, args...) SDE_DEBUG(fmt, ##args) +#endif /** * struct sde_hdmi_info - defines hdmi display properties @@ -64,14 +68,6 @@ struct sde_hdmi_ctrl { u32 hdmi_ctrl_idx; }; -struct hdmi_edid_ctrl { - struct edid *edid; - u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE]; - int adb_size; - u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE]; - int sadb_size; -}; - /** * struct sde_hdmi - hdmi display information * @pdev: Pointer to platform device. @@ -102,7 +98,7 @@ struct sde_hdmi { struct platform_device *ext_pdev; struct msm_ext_disp_init_data ext_audio_data; - struct hdmi_edid_ctrl edid; + struct sde_edid_ctrl *edid_ctrl; bool non_pluggable; u32 num_of_modes; @@ -116,6 +112,38 @@ struct sde_hdmi { struct dentry *root; }; +/** + * hdmi_tx_scdc_access_type() - hdmi 2.0 DDC functionalities. + */ +enum hdmi_tx_scdc_access_type { + HDMI_TX_SCDC_SCRAMBLING_STATUS, + HDMI_TX_SCDC_SCRAMBLING_ENABLE, + HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE, + HDMI_TX_SCDC_CLOCK_DET_STATUS, + HDMI_TX_SCDC_CH0_LOCK_STATUS, + HDMI_TX_SCDC_CH1_LOCK_STATUS, + HDMI_TX_SCDC_CH2_LOCK_STATUS, + HDMI_TX_SCDC_CH0_ERROR_COUNT, + HDMI_TX_SCDC_CH1_ERROR_COUNT, + HDMI_TX_SCDC_CH2_ERROR_COUNT, + HDMI_TX_SCDC_READ_ENABLE, + HDMI_TX_SCDC_MAX, +}; + +#define HDMI_KHZ_TO_HZ 1000 +#define HDMI_MHZ_TO_HZ 1000000 +/** + * hdmi_tx_ddc_timer_type() - hdmi DDC timer functionalities. + */ +enum hdmi_tx_ddc_timer_type { + HDMI_TX_DDC_TIMER_HDCP2P2_RD_MSG, + HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS, + HDMI_TX_DDC_TIMER_UPDATE_FLAGS, + HDMI_TX_DDC_TIMER_STATUS_FLAGS, + HDMI_TX_DDC_TIMER_CED, + HDMI_TX_DDC_TIMER_MAX, + }; + #ifdef CONFIG_DRM_SDE_HDMI /** * sde_hdmi_get_num_of_displays() - returns number of display devices @@ -259,6 +287,52 @@ struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi); void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on); /** + * sde_hdmi_ddc_read() - common hdmi ddc read API. + * @hdmi: Handle to the hdmi. + * @addr: Command address. + * @offset: Command offset. + * @data: Data buffer for read back. + * @data_len: Data buffer length. + * + * Return: error code. + */ +int sde_hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset, + u8 *data, u16 data_len); + +/** + * sde_hdmi_ddc_write() - common hdmi ddc write API. + * @hdmi: Handle to the hdmi. + * @addr: Command address. + * @offset: Command offset. + * @data: Data buffer for write. + * @data_len: Data buffer length. + * + * Return: error code. + */ +int sde_hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset, + u8 *data, u16 data_len); + +/** + * sde_hdmi_scdc_read() - hdmi 2.0 ddc read API. + * @hdmi: Handle to the hdmi. + * @data_type: DDC data type, refer to enum hdmi_tx_scdc_access_type. + * @val: Read back value. + * + * Return: error code. + */ +int sde_hdmi_scdc_read(struct hdmi *hdmi, u32 data_type, u32 *val); + +/** + * sde_hdmi_scdc_write() - hdmi 2.0 ddc write API. + * @hdmi: Handle to the hdmi. + * @data_type: DDC data type, refer to enum hdmi_tx_scdc_access_type. + * @val: Value write through DDC. + * + * Return: error code. + */ +int sde_hdmi_scdc_write(struct hdmi *hdmi, u32 data_type, u32 val); + +/** * sde_hdmi_audio_on() - enable hdmi audio. * @hdmi: Handle to the hdmi. * @params: audio setup parameters from codec. @@ -305,40 +379,6 @@ void sde_hdmi_notify_clients(struct drm_connector *connector, void sde_hdmi_ack_state(struct drm_connector *connector, enum drm_connector_status status); -/** - * sde_hdmi_edid_init() - init edid structure. - * @display: Handle to the sde_hdmi. - * - * Return: error code. - */ -int sde_hdmi_edid_init(struct sde_hdmi *display); - -/** - * sde_hdmi_edid_deinit() - deinit edid structure. - * @display: Handle to the sde_hdmi. - * - * Return: error code. - */ -int sde_hdmi_edid_deinit(struct sde_hdmi *display); - -/** - * sde_hdmi_get_edid() - get edid info. - * @connector: Handle to the drm_connector. - * @display: Handle to the sde_hdmi. - * - * Return: void. - */ -void sde_hdmi_get_edid(struct drm_connector *connector, - struct sde_hdmi *display); - -/** - * sde_hdmi_free_edid() - free edid structure. - * @display: Handle to the sde_hdmi. - * - * Return: error code. - */ -int sde_hdmi_free_edid(struct sde_hdmi *display); - #else /*#ifdef CONFIG_DRM_SDE_HDMI*/ static inline u32 sde_hdmi_get_num_of_displays(void) diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c index 681dca501f9b..c76e42c67885 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c @@ -28,6 +28,13 @@ struct sde_hdmi_bridge { }; #define to_hdmi_bridge(x) container_of(x, struct sde_hdmi_bridge, base) +/* TX major version that supports scrambling */ +#define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04 +#define HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ 340000 +#define HDMI_TX_SCRAMBLER_TIMEOUT_MSEC 200 +/* default hsyncs for 4k@60 for 200ms */ +#define HDMI_DEFAULT_TIMEOUT_HSYNC 28571 + /* for AVI program */ #define HDMI_AVI_INFOFRAME_BUFFER_SIZE \ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE) @@ -101,6 +108,265 @@ static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge) } } +static int _sde_hdmi_bridge_ddc_clear_irq(struct hdmi *hdmi, + char *what) +{ + u32 ddc_int_ctrl, ddc_status, in_use, timeout; + u32 sw_done_mask = BIT(2); + u32 sw_done_ack = BIT(1); + u32 in_use_by_sw = BIT(0); + u32 in_use_by_hw = BIT(1); + + /* clear and enable interrutps */ + ddc_int_ctrl = sw_done_mask | sw_done_ack; + + hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL, ddc_int_ctrl); + + /* wait until DDC HW is free */ + timeout = 100; + do { + ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS); + in_use = ddc_status & (in_use_by_sw | in_use_by_hw); + if (in_use) { + SDE_DEBUG("ddc is in use by %s, timeout(%d)\n", + ddc_status & in_use_by_sw ? "sw" : "hw", + timeout); + udelay(100); + } + } while (in_use && --timeout); + + if (!timeout) { + SDE_ERROR("%s: timedout\n", what); + return -ETIMEDOUT; + } + + return 0; +} + +static int _sde_hdmi_bridge_scrambler_ddc_check_status(struct hdmi *hdmi) +{ + int rc = 0; + u32 reg_val; + + /* check for errors and clear status */ + reg_val = hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS); + if (reg_val & BIT(4)) { + SDE_ERROR("ddc aborted\n"); + reg_val |= BIT(5); + rc = -ECONNABORTED; + } + + if (reg_val & BIT(8)) { + SDE_ERROR("timed out\n"); + reg_val |= BIT(9); + rc = -ETIMEDOUT; + } + + if (reg_val & BIT(12)) { + SDE_ERROR("NACK0\n"); + reg_val |= BIT(13); + rc = -EIO; + } + if (reg_val & BIT(14)) { + SDE_ERROR("NACK1\n"); + reg_val |= BIT(15); + rc = -EIO; + } + hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS, reg_val); + + return rc; +} + +static void _sde_hdmi_bridge_scrambler_ddc_reset(struct hdmi *hdmi) +{ + u32 reg_val; + + /* clear ack and disable interrupts */ + reg_val = BIT(14) | BIT(9) | BIT(5) | BIT(1); + hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL2, reg_val); + + /* Reset DDC timers */ + reg_val = BIT(0) | hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL); + hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val); + + reg_val = hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL); + reg_val &= ~BIT(0); + hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val); +} + +static void _sde_hdmi_bridge_scrambler_ddc_disable(struct hdmi *hdmi) +{ + u32 reg_val; + + _sde_hdmi_bridge_scrambler_ddc_reset(hdmi); + /* Disable HW DDC access to RxStatus register */ + reg_val = hdmi_read(hdmi, REG_HDMI_HW_DDC_CTRL); + reg_val &= ~(BIT(8) | BIT(9)); + hdmi_write(hdmi, REG_HDMI_HW_DDC_CTRL, reg_val); +} + +static int _sde_hdmi_bridge_scrambler_status_timer_setup(struct hdmi *hdmi, + u32 timeout_hsync) +{ + u32 reg_val; + int rc; + + _sde_hdmi_bridge_ddc_clear_irq(hdmi, "scrambler"); + + hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL, + timeout_hsync); + hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2, + timeout_hsync); + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL5); + reg_val |= BIT(10); + hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL5, reg_val); + + reg_val = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL2); + /* Trigger interrupt if scrambler status is 0 or DDC failure */ + reg_val |= BIT(10); + reg_val &= ~(BIT(15) | BIT(16)); + reg_val |= BIT(16); + hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL2, reg_val); + + /* Enable DDC access */ + reg_val = hdmi_read(hdmi, REG_HDMI_HW_DDC_CTRL); + + reg_val &= ~(BIT(8) | BIT(9)); + reg_val |= BIT(8); + hdmi_write(hdmi, REG_HDMI_HW_DDC_CTRL, reg_val); + + /* WAIT for 200ms as per HDMI 2.0 standard for sink to respond */ + msleep(200); + + /* clear the scrambler status */ + rc = _sde_hdmi_bridge_scrambler_ddc_check_status(hdmi); + if (rc) + SDE_ERROR("scrambling ddc error %d\n", rc); + + _sde_hdmi_bridge_scrambler_ddc_disable(hdmi); + + return rc; +} + +static int _sde_hdmi_bridge_setup_ddc_timers(struct hdmi *hdmi, + u32 type, u32 to_in_num_lines) +{ + if (type >= HDMI_TX_DDC_TIMER_MAX) { + SDE_ERROR("Invalid timer type %d\n", type); + return -EINVAL; + } + + switch (type) { + case HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS: + _sde_hdmi_bridge_scrambler_status_timer_setup(hdmi, + to_in_num_lines); + break; + default: + SDE_ERROR("%d type not supported\n", type); + return -EINVAL; + } + + return 0; +} + +static inline int _sde_hdmi_bridge_get_timeout_in_hysnc( + struct drm_display_mode *mode, u32 timeout_ms) +{ + /* + * pixel clock = h_total * v_total * fps + * 1 sec = pixel clock number of pixels are transmitted. + * time taken by one line (h_total) = 1s / (v_total * fps). + * lines for give time = (time_ms * 1000) / (1000000 / (v_total * fps)) + * = (time_ms * clock) / h_total + */ + + return (timeout_ms * mode->clock / mode->htotal); +} + +static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi, + struct drm_display_mode *mode) +{ + int rc = 0; + int timeout_hsync; + u32 reg_val = 0; + u32 tmds_clock_ratio = 0; + bool scrambler_on = false; + + struct drm_connector *connector = NULL; + + if (!hdmi || !mode) { + SDE_ERROR("invalid input\n"); + return -EINVAL; + } + connector = hdmi->connector; + + /* Read HDMI version */ + reg_val = hdmi_read(hdmi, REG_HDMI_VERSION); + reg_val = (reg_val & 0xF0000000) >> 28; + /* Scrambling is supported from HDMI TX 4.0 */ + if (reg_val < HDMI_TX_SCRAMBLER_MIN_TX_VERSION) { + DRM_INFO("scrambling not supported by tx\n"); + return 0; + } + + if (mode->clock > HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ) { + scrambler_on = true; + tmds_clock_ratio = 1; + } else { + scrambler_on = connector->supports_scramble; + } + + DRM_INFO("scrambler %s\n", scrambler_on ? "on" : "off"); + + if (scrambler_on) { + rc = sde_hdmi_scdc_write(hdmi, + HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE, + tmds_clock_ratio); + if (rc) { + SDE_ERROR("TMDS CLK RATIO ERR\n"); + return rc; + } + + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val |= BIT(31); /* Enable Update DATAPATH_MODE */ + reg_val |= BIT(28); /* Set SCRAMBLER_EN bit */ + + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + + rc = sde_hdmi_scdc_write(hdmi, + HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x1); + if (rc) { + SDE_ERROR("failed to enable scrambling\n"); + return rc; + } + + /* + * Setup hardware to periodically check for scrambler + * status bit on the sink. Sink should set this bit + * with in 200ms after scrambler is enabled. + */ + timeout_hsync = _sde_hdmi_bridge_get_timeout_in_hysnc( + mode, + HDMI_TX_SCRAMBLER_TIMEOUT_MSEC); + if (timeout_hsync <= 0) { + SDE_ERROR("err in timeout hsync calc\n"); + timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC; + } + SDE_DEBUG("timeout for scrambling en: %d hsyncs\n", + timeout_hsync); + + rc = _sde_hdmi_bridge_setup_ddc_timers(hdmi, + HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS, timeout_hsync); + } else { + sde_hdmi_scdc_write(hdmi, HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x0); + reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); + reg_val &= ~BIT(31); /* Disable Update DATAPATH_MODE */ + reg_val &= ~BIT(28); /* Unset SCRAMBLER_EN bit */ + hdmi_write(hdmi, REG_HDMI_CTRL, reg_val); + } + return rc; +} + static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge) { struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge); @@ -373,6 +639,7 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge, _sde_hdmi_bridge_set_spd_infoframe(hdmi, mode); DRM_DEBUG("hdmi setup info frame\n"); } + _sde_hdmi_bridge_setup_scrambler(hdmi, mode); } static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = { diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c deleted file mode 100644 index 57c79e2aa812..000000000000 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c +++ /dev/null @@ -1,227 +0,0 @@ -/* Copyright (c) 2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <drm/drm_edid.h> - -#include "sde_kms.h" -#include "sde_hdmi.h" - -/* TODO: copy from drm_edid.c and mdss_hdmi_edid.c. remove if using ELD */ -#define DBC_START_OFFSET 4 -#define EDID_DTD_LEN 18 - -enum data_block_types { - RESERVED, - AUDIO_DATA_BLOCK, - VIDEO_DATA_BLOCK, - VENDOR_SPECIFIC_DATA_BLOCK, - SPEAKER_ALLOCATION_DATA_BLOCK, - VESA_DTC_DATA_BLOCK, - RESERVED2, - USE_EXTENDED_TAG -}; - -static u8 *_sde_hdmi_edid_find_cea_extension(struct edid *edid) -{ - u8 *edid_ext = NULL; - int i; - - /* No EDID or EDID extensions */ - if (edid == NULL || edid->extensions == 0) - return NULL; - - /* Find CEA extension */ - for (i = 0; i < edid->extensions; i++) { - edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); - if (edid_ext[0] == CEA_EXT) - break; - } - - if (i == edid->extensions) - return NULL; - - return edid_ext; -} - -static const u8 *_sde_hdmi_edid_find_block(const u8 *in_buf, u32 start_offset, - u8 type, u8 *len) -{ - /* the start of data block collection, start of Video Data Block */ - u32 offset = start_offset; - u32 dbc_offset = in_buf[2]; - - /* - * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block - * collection present. - * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block - * collection present and no DTD data present. - */ - if ((dbc_offset == 0) || (dbc_offset == 4)) { - SDE_ERROR("EDID: no DTD or non-DTD data present\n"); - return NULL; - } - - while (offset < dbc_offset) { - u8 block_len = in_buf[offset] & 0x1F; - - if ((offset + block_len <= dbc_offset) && - (in_buf[offset] >> 5) == type) { - *len = block_len; - SDE_DEBUG("EDID: block=%d found @ 0x%x w/ len=%d\n", - type, offset, block_len); - - return in_buf + offset; - } - offset += 1 + block_len; - } - - return NULL; -} - -static void _sde_hdmi_extract_audio_data_blocks( - struct hdmi_edid_ctrl *edid_ctrl) -{ - u8 len = 0; - u8 adb_max = 0; - const u8 *adb = NULL; - u32 offset = DBC_START_OFFSET; - u8 *cea = NULL; - - if (!edid_ctrl) { - SDE_ERROR("invalid edid_ctrl\n"); - return; - } - - cea = _sde_hdmi_edid_find_cea_extension(edid_ctrl->edid); - if (!cea) { - SDE_DEBUG("CEA extension not found\n"); - return; - } - - edid_ctrl->adb_size = 0; - - memset(edid_ctrl->audio_data_block, 0, - sizeof(edid_ctrl->audio_data_block)); - - do { - len = 0; - adb = _sde_hdmi_edid_find_block(cea, offset, AUDIO_DATA_BLOCK, - &len); - - if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE || - adb_max >= MAX_NUMBER_ADB)) { - if (!edid_ctrl->adb_size) { - SDE_DEBUG("No/Invalid Audio Data Block\n"); - return; - } - - continue; - } - - memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size, - adb + 1, len); - offset = (adb - cea) + 1 + len; - - edid_ctrl->adb_size += len; - adb_max++; - } while (adb); - -} - -static void _sde_hdmi_extract_speaker_allocation_data( - struct hdmi_edid_ctrl *edid_ctrl) -{ - u8 len; - const u8 *sadb = NULL; - u8 *cea = NULL; - - if (!edid_ctrl) { - SDE_ERROR("invalid edid_ctrl\n"); - return; - } - - cea = _sde_hdmi_edid_find_cea_extension(edid_ctrl->edid); - if (!cea) { - SDE_DEBUG("CEA extension not found\n"); - return; - } - - sadb = _sde_hdmi_edid_find_block(cea, DBC_START_OFFSET, - SPEAKER_ALLOCATION_DATA_BLOCK, &len); - if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) { - SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n"); - return; - } - - memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len); - edid_ctrl->sadb_size = len; - - SDE_DEBUG("EDID: speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n", - sadb[1], - (sadb[1] & BIT(0)) ? "FL/FR," : "", - (sadb[1] & BIT(1)) ? "LFE," : "", - (sadb[1] & BIT(2)) ? "FC," : "", - (sadb[1] & BIT(3)) ? "RL/RR," : "", - (sadb[1] & BIT(4)) ? "RC," : "", - (sadb[1] & BIT(5)) ? "FLC/FRC," : "", - (sadb[1] & BIT(6)) ? "RLC/RRC," : ""); -} - -int sde_hdmi_edid_init(struct sde_hdmi *display) -{ - int rc = 0; - - if (!display) { - SDE_ERROR("[%s]Invalid params\n", display->name); - return -EINVAL; - } - - memset(&display->edid, 0, sizeof(display->edid)); - - return rc; -} - -int sde_hdmi_free_edid(struct sde_hdmi *display) -{ - struct hdmi_edid_ctrl *edid_ctrl = &display->edid; - - kfree(edid_ctrl->edid); - edid_ctrl->edid = NULL; - - return 0; -} - -int sde_hdmi_edid_deinit(struct sde_hdmi *display) -{ - return sde_hdmi_free_edid(display); -} - -void sde_hdmi_get_edid(struct drm_connector *connector, - struct sde_hdmi *display) -{ - u32 hdmi_ctrl; - struct hdmi_edid_ctrl *edid_ctrl = &display->edid; - struct hdmi *hdmi = display->ctrl.ctrl; - - /* Read EDID */ - hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL); - hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE); - edid_ctrl->edid = drm_get_edid(connector, hdmi->i2c); - hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl); - - if (edid_ctrl->edid) { - hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid_ctrl->edid); - - _sde_hdmi_extract_audio_data_blocks(edid_ctrl); - _sde_hdmi_extract_speaker_allocation_data(edid_ctrl); - } -}; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h index aef20f76bf02..0956617442af 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h @@ -563,6 +563,20 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val) #define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370 +#define REG_HDMI_DDC_INT_CTRL0 0x00000430 +#define REG_HDMI_DDC_INT_CTRL1 0x00000434 +#define REG_HDMI_DDC_INT_CTRL2 0x00000438 +#define REG_HDMI_DDC_INT_CTRL3 0x0000043C +#define REG_HDMI_DDC_INT_CTRL4 0x00000440 +#define REG_HDMI_DDC_INT_CTRL5 0x00000444 +#define REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL 0x00000464 +#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL 0x00000468 +#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2 0x0000046C +#define REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS 0x00000470 +#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS 0x00000474 +#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS2 0x00000478 +#define REG_HDMI_HW_DDC_CTRL 0x000004CC + #define REG_HDMI_8x60_PHY_REG0 0x00000300 #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c #define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2 diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 82da4c1bc86c..41322045ced3 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -51,6 +51,9 @@ #define LEFT_MIXER 0 #define RIGHT_MIXER 1 +/* indicates pending page flip events */ +#define PENDING_FLIP 0x2 + static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc) { struct msm_drm_private *priv = crtc->dev->dev_private; @@ -399,13 +402,18 @@ static void sde_crtc_vblank_cb(void *data) { struct drm_crtc *crtc = (struct drm_crtc *)data; struct sde_crtc *sde_crtc = to_sde_crtc(crtc); + unsigned pending; + pending = atomic_xchg(&sde_crtc->pending, 0); /* keep statistics on vblank callback - with auto reset via debugfs */ if (ktime_equal(sde_crtc->vblank_cb_time, ktime_set(0, 0))) sde_crtc->vblank_cb_time = ktime_get(); else sde_crtc->vblank_cb_count++; - _sde_crtc_complete_flip(crtc, NULL); + + if (pending & PENDING_FLIP) + _sde_crtc_complete_flip(crtc, NULL); + drm_crtc_handle_vblank(crtc); DRM_DEBUG_VBL("crtc%d\n", crtc->base.id); SDE_EVT32_IRQ(DRMID(crtc)); @@ -519,6 +527,28 @@ static void sde_crtc_frame_event_cb(void *data, u32 event) queue_kthread_work(&priv->disp_thread[pipe_id].worker, &fevent->work); } +/** + * sde_crtc_request_flip_cb - callback to request page_flip events + * Once the HW flush is complete , userspace must be notified of + * PAGE_FLIP completed event in the next vblank event. + * Using this callback, a hint is set to signal any callers waiting + * for a PAGE_FLIP complete event. + * This is called within the enc_spinlock. + * @data: Pointer to cb private data + */ +static void sde_crtc_request_flip_cb(void *data) +{ + struct drm_crtc *crtc = (struct drm_crtc *)data; + struct sde_crtc *sde_crtc; + + if (!crtc) { + SDE_ERROR("invalid parameters\n"); + return; + } + sde_crtc = to_sde_crtc(crtc); + atomic_or(PENDING_FLIP, &sde_crtc->pending); +} + void sde_crtc_complete_commit(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -679,7 +709,6 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc, { struct sde_crtc *sde_crtc; struct drm_device *dev; - unsigned long flags; u32 i; if (!crtc) { @@ -701,14 +730,6 @@ static void sde_crtc_atomic_begin(struct drm_crtc *crtc, if (!sde_crtc->num_mixers) _sde_crtc_setup_mixers(crtc); - if (sde_crtc->event) { - WARN_ON(sde_crtc->event); - } else { - spin_lock_irqsave(&dev->event_lock, flags); - sde_crtc->event = crtc->state->event; - spin_unlock_irqrestore(&dev->event_lock, flags); - } - /* Reset flush mask from previous commit */ for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) { struct sde_hw_ctl *ctl = sde_crtc->mixers[i].hw_ctl; @@ -763,7 +784,8 @@ static void sde_crtc_atomic_flush(struct drm_crtc *crtc, dev = crtc->dev; if (sde_crtc->event) { - SDE_DEBUG("already received sde_crtc->event\n"); + SDE_ERROR("%s already received sde_crtc->event\n", + sde_crtc->name); } else { spin_lock_irqsave(&dev->event_lock, flags); sde_crtc->event = crtc->state->event; @@ -999,6 +1021,7 @@ static void sde_crtc_disable(struct drm_crtc *crtc) if (encoder->crtc != crtc) continue; sde_encoder_register_frame_event_callback(encoder, NULL, NULL); + sde_encoder_register_request_flip_callback(encoder, NULL, NULL); } memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers)); @@ -1039,6 +1062,8 @@ static void sde_crtc_enable(struct drm_crtc *crtc) continue; sde_encoder_register_frame_event_callback(encoder, sde_crtc_frame_event_cb, (void *)crtc); + sde_encoder_register_request_flip_callback(encoder, + sde_crtc_request_flip_cb, (void *)crtc); } for (i = 0; i < sde_crtc->num_mixers; i++) { diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index 25a93e882e6d..97a20b987ef5 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -89,6 +89,7 @@ struct sde_crtc_frame_event { * @frame_pending : Whether or not an update is pending * @frame_events : static allocation of in-flight frame events * @frame_event_list : available frame event list + * @pending : Whether any page-flip events are pending signal * @spin_lock : spin lock for frame event, transaction status, etc... */ struct sde_crtc { @@ -109,7 +110,7 @@ struct sde_crtc { /* output fence support */ struct sde_fence output_fence; - + atomic_t pending; struct sde_hw_stage_cfg stage_cfg; struct dentry *debugfs_root; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 8cffb03fdfbb..030b192e5df4 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -83,6 +83,8 @@ * Bit0 = phys_encs[0] etc. * @crtc_frame_event_cb: callback handler for frame event * @crtc_frame_event_cb_data: callback handler private data + * @crtc_request_flip_cb: callback handler for requesting page-flip event + * @crtc_request_flip_cb_data: callback handler private data * @crtc_frame_event: callback event * @frame_done_timeout: frame done timeout in Hz * @frame_done_timer: watchdog timer for frame done event @@ -107,8 +109,9 @@ struct sde_encoder_virt { DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); void (*crtc_frame_event_cb)(void *, u32 event); void *crtc_frame_event_cb_data; + void (*crtc_request_flip_cb)(void *); + void *crtc_request_flip_cb_data; u32 crtc_frame_event; - atomic_t frame_done_timeout; struct timer_list frame_done_timer; }; @@ -584,6 +587,24 @@ void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc, spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags); } +void sde_encoder_register_request_flip_callback(struct drm_encoder *drm_enc, + void (*request_flip_cb)(void *), + void *request_flip_cb_data) +{ + struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); + unsigned long lock_flags; + + if (!drm_enc) { + SDE_ERROR("invalid encoder\n"); + return; + } + + spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags); + sde_enc->crtc_request_flip_cb = request_flip_cb; + sde_enc->crtc_request_flip_cb_data = request_flip_cb_data; + spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags); +} + static void sde_encoder_frame_done_callback( struct drm_encoder *drm_enc, struct sde_encoder_phys *ready_phys, u32 event) @@ -759,6 +780,11 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) pending_flush); } + /* HW flush has happened, request a flip complete event now */ + if (sde_enc->crtc_request_flip_cb) + sde_enc->crtc_request_flip_cb( + sde_enc->crtc_request_flip_cb_data); + _sde_encoder_trigger_start(sde_enc->cur_master); spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h index 82576b479bf8..6b74dca13ae9 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder.h @@ -72,6 +72,17 @@ void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder, void (*cb)(void *, u32), void *data); /** + * sde_encoder_register_request_flip_callback - provide callback to encoder that + * will be called after HW flush is complete to request + * a page flip event from CRTC. + * @encoder: encoder pointer + * @cb: callback pointer, provide NULL to deregister + * @data: user data provided to callback + */ +void sde_encoder_register_request_flip_callback(struct drm_encoder *encoder, + void (*cb)(void *), void *data); + +/** * sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl * path (i.e. ctl flush and start) at next appropriate time. * Immediately: if no previous commit is outstanding. diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c new file mode 100644 index 000000000000..69ab367307ea --- /dev/null +++ b/drivers/gpu/drm/msm/sde_edid_parser.c @@ -0,0 +1,512 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <drm/drm_edid.h> + +#include "sde_kms.h" +#include "sde_edid_parser.h" + +/* TODO: copy from drm_edid.c and mdss_hdmi_edid.c. remove if using ELD */ +#define DBC_START_OFFSET 4 +#define EDID_DTD_LEN 18 + +enum data_block_types { + RESERVED, + AUDIO_DATA_BLOCK, + VIDEO_DATA_BLOCK, + VENDOR_SPECIFIC_DATA_BLOCK, + SPEAKER_ALLOCATION_DATA_BLOCK, + VESA_DTC_DATA_BLOCK, + RESERVED2, + USE_EXTENDED_TAG +}; + +static u8 *sde_find_edid_extension(struct edid *edid, int ext_id) +{ + u8 *edid_ext = NULL; + int i; + + /* No EDID or EDID extensions */ + if (edid == NULL || edid->extensions == 0) + return NULL; + + /* Find CEA extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); + if (edid_ext[0] == ext_id) + break; + } + + if (i == edid->extensions) + return NULL; + + return edid_ext; +} + +static u8 *sde_find_cea_extension(struct edid *edid) +{ + return sde_find_edid_extension(edid, SDE_CEA_EXT); +} + +static int +sde_cea_db_payload_len(const u8 *db) +{ + return db[0] & 0x1f; +} + +static int +sde_cea_db_tag(const u8 *db) +{ + return db[0] >> 5; +} + +static int +sde_cea_revision(const u8 *cea) +{ + return cea[1]; +} + +static int +sde_cea_db_offsets(const u8 *cea, int *start, int *end) +{ + /* Data block offset in CEA extension block */ + *start = 4; + *end = cea[2]; + if (*end == 0) + *end = 127; + if (*end < 4 || *end > 127) + return -ERANGE; + return 0; +} + +#define sde_for_each_cea_db(cea, i, start, end) \ +for ((i) = (start); \ +(i) < (end) && (i) + sde_cea_db_payload_len(&(cea)[(i)]) < (end); \ +(i) += sde_cea_db_payload_len(&(cea)[(i)]) + 1) + +static u8 *sde_edid_find_extended_tag_block(struct edid *edid, int blk_id) +{ + u8 *db = NULL; + u8 *cea = NULL; + + if (!edid) { + pr_err("%s: invalid input\n", __func__); + return NULL; + } + + cea = sde_find_cea_extension(edid); + + if (cea && sde_cea_revision(cea) >= 3) { + int i, start, end; + + if (sde_cea_db_offsets(cea, &start, &end)) + return NULL; + + sde_for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + if ((sde_cea_db_tag(db) == SDE_EXTENDED_TAG) && + (db[1] == blk_id)) + return db; + } + } + return NULL; +} + +static u8 * +sde_edid_find_block(struct edid *edid, int blk_id) +{ + u8 *db = NULL; + u8 *cea = NULL; + + if (!edid) { + pr_err("%s: invalid input\n", __func__); + return NULL; + } + + cea = sde_find_cea_extension(edid); + + if (cea && sde_cea_revision(cea) >= 3) { + int i, start, end; + + if (sde_cea_db_offsets(cea, &start, &end)) + return 0; + + sde_for_each_cea_db(cea, i, start, end) { + db = &cea[i]; + if (sde_cea_db_tag(db) == blk_id) + return db; + } + } + return NULL; +} + + +static const u8 *_sde_edid_find_block(const u8 *in_buf, u32 start_offset, + u8 type, u8 *len) +{ + /* the start of data block collection, start of Video Data Block */ + u32 offset = start_offset; + u32 dbc_offset = in_buf[2]; + + SDE_EDID_DEBUG("%s +", __func__); + /* + * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block + * collection present. + * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block + * collection present and no DTD data present. + */ + if ((dbc_offset == 0) || (dbc_offset == 4)) { + SDE_ERROR("EDID: no DTD or non-DTD data present\n"); + return NULL; + } + + while (offset < dbc_offset) { + u8 block_len = in_buf[offset] & 0x1F; + + if ((offset + block_len <= dbc_offset) && + (in_buf[offset] >> 5) == type) { + *len = block_len; + SDE_EDID_DEBUG("block=%d found @ 0x%x w/ len=%d\n", + type, offset, block_len); + + return in_buf + offset; + } + offset += 1 + block_len; + } + + return NULL; +} + +static void sde_edid_extract_vendor_id(struct sde_edid_ctrl *edid_ctrl) +{ + char *vendor_id; + u32 id_codes; + + SDE_EDID_DEBUG("%s +", __func__); + if (!edid_ctrl) { + SDE_ERROR("%s: invalid input\n", __func__); + return; + } + + vendor_id = edid_ctrl->vendor_id; + id_codes = ((u32)edid_ctrl->edid->mfg_id[0] << 8) + + edid_ctrl->edid->mfg_id[1]; + + vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F); + vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F); + vendor_id[2] = 'A' - 1 + (id_codes & 0x1F); + vendor_id[3] = 0; + SDE_EDID_DEBUG("vendor id is %s ", vendor_id); + SDE_EDID_DEBUG("%s -", __func__); +} + +static void sde_edid_set_y420_support(struct drm_connector *connector, +u32 video_format) +{ + u8 cea_mode = 0; + struct drm_display_mode *mode; + + /* Need to add Y420 support flag to the modes */ + list_for_each_entry(mode, &connector->probed_modes, head) { + cea_mode = drm_match_cea_mode(mode); + if ((cea_mode != 0) && (cea_mode == video_format)) { + SDE_EDID_DEBUG("%s found match for %d ", __func__, + video_format); + mode->flags |= DRM_MODE_FLAG_SUPPORTS_YUV; + } + } +} + +static void sde_edid_parse_Y420CMDB( +struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl, +const u8 *db) +{ + u32 offset = 0; + u8 len = 0; + u8 svd_len = 0; + const u8 *svd = NULL; + u32 i = 0, j = 0; + u32 video_format = 0; + + if (!edid_ctrl) { + DEV_ERR("%s: edid_ctrl is NULL\n", __func__); + return; + } + + if (!db) { + DEV_ERR("%s: invalid input\n", __func__); + return; + } + SDE_EDID_DEBUG("%s +\n", __func__); + len = db[0] & 0x1f; + + if (len < 7) + return; + /* Byte 3 to L+1 contain SVDs */ + offset += 2; + + svd = sde_edid_find_block(edid_ctrl->edid, VIDEO_DATA_BLOCK); + + if (svd) { + /*moving to the next byte as vic info begins there*/ + ++svd; + svd_len = svd[0] & 0x1f; + } + + for (i = 0; i < svd_len; i++, j++) { + video_format = *svd & 0x7F; + if (db[offset] & (1 << j)) + sde_edid_set_y420_support(connector, video_format); + + if (j & 0x80) { + j = j/8; + offset++; + if (offset >= len) + break; + } + } + + SDE_EDID_DEBUG("%s -\n", __func__); + +} + +static void sde_edid_parse_Y420VDB( +struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl, +const u8 *db) +{ + u8 len = db[0] & 0x1f; + u32 i = 0; + u32 video_format = 0; + + if (!edid_ctrl) { + DEV_ERR("%s: invalid input\n", __func__); + return; + } + + SDE_EDID_DEBUG("%s +\n", __func__); + + /* Offset to byte 3 */ + db += 2; + for (i = 0; i < len - 1; i++) { + video_format = *(db + i) & 0x7F; + /* + * mode was already added in get_modes() + * only need to set the Y420 support flag + */ + sde_edid_set_y420_support(connector, video_format); + } + SDE_EDID_DEBUG("%s -", __func__); +} + +static void sde_edid_set_mode_format( +struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl) +{ + const u8 *db = NULL; + struct drm_display_mode *mode; + + SDE_EDID_DEBUG("%s +\n", __func__); + /* Set YUV mode support flags for YCbcr420VDB */ + db = sde_edid_find_extended_tag_block(edid_ctrl->edid, + Y420_VIDEO_DATA_BLOCK); + if (db) + sde_edid_parse_Y420VDB(connector, edid_ctrl, db); + else + SDE_EDID_DEBUG("YCbCr420 VDB is not present\n"); + + /* Set RGB supported on all modes where YUV is not set */ + list_for_each_entry(mode, &connector->probed_modes, head) { + if (!(mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV)) + mode->flags |= DRM_MODE_FLAG_SUPPORTS_RGB; + } + + + db = sde_edid_find_extended_tag_block(edid_ctrl->edid, + Y420_CAPABILITY_MAP_DATA_BLOCK); + if (db) + sde_edid_parse_Y420CMDB(connector, edid_ctrl, db); + else + SDE_EDID_DEBUG("YCbCr420 CMDB is not present\n"); + + SDE_EDID_DEBUG("%s -\n", __func__); +} + +static void _sde_edid_extract_audio_data_blocks( + struct sde_edid_ctrl *edid_ctrl) +{ + u8 len = 0; + u8 adb_max = 0; + const u8 *adb = NULL; + u32 offset = DBC_START_OFFSET; + u8 *cea = NULL; + + if (!edid_ctrl) { + SDE_ERROR("invalid edid_ctrl\n"); + return; + } + SDE_EDID_DEBUG("%s +", __func__); + cea = sde_find_cea_extension(edid_ctrl->edid); + if (!cea) { + SDE_DEBUG("CEA extension not found\n"); + return; + } + + edid_ctrl->adb_size = 0; + + memset(edid_ctrl->audio_data_block, 0, + sizeof(edid_ctrl->audio_data_block)); + + do { + len = 0; + adb = _sde_edid_find_block(cea, offset, AUDIO_DATA_BLOCK, + &len); + + if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE || + adb_max >= MAX_NUMBER_ADB)) { + if (!edid_ctrl->adb_size) { + SDE_DEBUG("No/Invalid Audio Data Block\n"); + return; + } + + continue; + } + + memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size, + adb + 1, len); + offset = (adb - cea) + 1 + len; + + edid_ctrl->adb_size += len; + adb_max++; + } while (adb); + SDE_EDID_DEBUG("%s -", __func__); +} + +static void _sde_edid_extract_speaker_allocation_data( + struct sde_edid_ctrl *edid_ctrl) +{ + u8 len; + const u8 *sadb = NULL; + u8 *cea = NULL; + + if (!edid_ctrl) { + SDE_ERROR("invalid edid_ctrl\n"); + return; + } + SDE_EDID_DEBUG("%s +", __func__); + cea = sde_find_cea_extension(edid_ctrl->edid); + if (!cea) { + SDE_DEBUG("CEA extension not found\n"); + return; + } + + sadb = _sde_edid_find_block(cea, DBC_START_OFFSET, + SPEAKER_ALLOCATION_DATA_BLOCK, &len); + if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) { + SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n"); + return; + } + + memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len); + edid_ctrl->sadb_size = len; + + SDE_EDID_DEBUG("speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n", + sadb[1], + (sadb[1] & BIT(0)) ? "FL/FR," : "", + (sadb[1] & BIT(1)) ? "LFE," : "", + (sadb[1] & BIT(2)) ? "FC," : "", + (sadb[1] & BIT(3)) ? "RL/RR," : "", + (sadb[1] & BIT(4)) ? "RC," : "", + (sadb[1] & BIT(5)) ? "FLC/FRC," : "", + (sadb[1] & BIT(6)) ? "RLC/RRC," : ""); + SDE_EDID_DEBUG("%s -", __func__); +} + +struct sde_edid_ctrl *sde_edid_init(void) +{ + struct sde_edid_ctrl *edid_ctrl = NULL; + + SDE_EDID_DEBUG("%s +\n", __func__); + edid_ctrl = kzalloc(sizeof(*edid_ctrl), GFP_KERNEL); + if (!edid_ctrl) { + SDE_ERROR("edid_ctrl alloc failed\n"); + return NULL; + } + memset((edid_ctrl), 0, sizeof(*edid_ctrl)); + SDE_EDID_DEBUG("%s -\n", __func__); + return edid_ctrl; +} + +void sde_free_edid(void **input) +{ + struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input); + + SDE_EDID_DEBUG("%s +", __func__); + kfree(edid_ctrl->edid); + edid_ctrl->edid = NULL; +} + +void sde_edid_deinit(void **input) +{ + struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input); + + SDE_EDID_DEBUG("%s +", __func__); + sde_free_edid((void *)&edid_ctrl); + kfree(edid_ctrl); + SDE_EDID_DEBUG("%s -", __func__); +} + +int _sde_edid_update_modes(struct drm_connector *connector, + void *input) +{ + int rc = 0; + struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input); + + SDE_EDID_DEBUG("%s +", __func__); + if (edid_ctrl->edid) { + drm_mode_connector_update_edid_property(connector, + edid_ctrl->edid); + + rc = drm_add_edid_modes(connector, edid_ctrl->edid); + sde_edid_set_mode_format(connector, edid_ctrl); + SDE_EDID_DEBUG("%s -", __func__); + return rc; + } + + drm_mode_connector_update_edid_property(connector, NULL); + SDE_EDID_DEBUG("%s null edid -", __func__); + return rc; +} + +bool sde_detect_hdmi_monitor(void *input) +{ + struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input); + + return drm_detect_hdmi_monitor(edid_ctrl->edid); +} + +void sde_get_edid(struct drm_connector *connector, + struct i2c_adapter *adapter, void **input) +{ + struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input); + + edid_ctrl->edid = drm_get_edid(connector, adapter); + SDE_EDID_DEBUG("%s +\n", __func__); + + if (!edid_ctrl->edid) + SDE_ERROR("EDID read failed\n"); + + if (edid_ctrl->edid) { + sde_edid_extract_vendor_id(edid_ctrl); + _sde_edid_extract_audio_data_blocks(edid_ctrl); + _sde_edid_extract_speaker_allocation_data(edid_ctrl); + } + SDE_EDID_DEBUG("%s -\n", __func__); +}; diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h new file mode 100644 index 000000000000..1143dc2c7bec --- /dev/null +++ b/drivers/gpu/drm/msm/sde_edid_parser.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _SDE_EDID_PARSER_H_ +#define _SDE_EDID_PARSER_H_ + +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/debugfs.h> +#include <linux/of_device.h> +#include <linux/i2c.h> +#include <drm/drmP.h> +#include <drm/drm_crtc.h> +#include <drm/drm_edid.h> + + +#define MAX_NUMBER_ADB 5 +#define MAX_AUDIO_DATA_BLOCK_SIZE 30 +#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3 +#define EDID_VENDOR_ID_SIZE 4 + +#define SDE_CEA_EXT 0x02 +#define SDE_EXTENDED_TAG 0x07 + +enum extended_data_block_types { + VIDEO_CAPABILITY_DATA_BLOCK = 0x0, + VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01, + HDMI_VIDEO_DATA_BLOCK = 0x04, + HDR_STATIC_METADATA_DATA_BLOCK = 0x06, + Y420_VIDEO_DATA_BLOCK = 0x0E, + VIDEO_FORMAT_PREFERENCE_DATA_BLOCK = 0x0D, + Y420_CAPABILITY_MAP_DATA_BLOCK = 0x0F, + VENDOR_SPECIFIC_AUDIO_DATA_BLOCK = 0x11, + INFOFRAME_DATA_BLOCK = 0x20, +}; + +#ifdef SDE_EDID_DEBUG_ENABLE +#define SDE_EDID_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args) +#else +#define SDE_EDID_DEBUG(fmt, args...) SDE_DEBUG(fmt, ##args) +#endif + +/* + * struct hdmi_edid_hdr_data - HDR Static Metadata + * @eotf: Electro-Optical Transfer Function + * @metadata_type_one: Static Metadata Type 1 support + * @max_luminance: Desired Content Maximum Luminance + * @avg_luminance: Desired Content Frame-average Luminance + * @min_luminance: Desired Content Minimum Luminance + */ +struct sde_edid_hdr_data { + u32 eotf; + bool metadata_type_one; + u32 max_luminance; + u32 avg_luminance; + u32 min_luminance; +}; + +struct sde_edid_sink_caps { + u32 max_pclk_in_hz; + bool scdc_present; + bool scramble_support; /* scramble support for less than 340Mcsc */ + bool read_req_support; + bool osd_disparity; + bool dual_view_support; + bool ind_view_support; +}; + +struct sde_edid_ctrl { + struct edid *edid; + u8 pt_scan_info; + u8 it_scan_info; + u8 ce_scan_info; + u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE]; + int adb_size; + u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE]; + int sadb_size; + bool hdr_supported; + char vendor_id[EDID_VENDOR_ID_SIZE]; + struct sde_edid_sink_caps sink_caps; + struct sde_edid_hdr_data hdr_data; +}; + +/** + * sde_edid_init() - init edid structure. + * @edid_ctrl: Handle to the edid_ctrl structure. + * Return: handle to sde_edid_ctrl for the client. + */ +struct sde_edid_ctrl *sde_edid_init(void); + +/** + * sde_edid_deinit() - deinit edid structure. + * @edid_ctrl: Handle to the edid_ctrl structure. + * + * Return: void. + */ +void sde_edid_deinit(void **edid_ctrl); + +/** + * sde_get_edid() - get edid info. + * @connector: Handle to the drm_connector. + * @adapter: handle to i2c adapter for DDC read + * @edid_ctrl: Handle to the edid_ctrl structure. + * + * Return: void. + */ +void sde_get_edid(struct drm_connector *connector, +struct i2c_adapter *adapter, +void **edid_ctrl); + +/** + * sde_free_edid() - free edid structure. + * @edid_ctrl: Handle to the edid_ctrl structure. + * + * Return: void. + */ +void sde_free_edid(void **edid_ctrl); + +/** + * sde_detect_hdmi_monitor() - detect HDMI mode. + * @edid_ctrl: Handle to the edid_ctrl structure. + * + * Return: error code. + */ +bool sde_detect_hdmi_monitor(void *edid_ctrl); + +/** + * _sde_edid_update_modes() - populate EDID modes. + * @edid_ctrl: Handle to the edid_ctrl structure. + * + * Return: error code. + */ +int _sde_edid_update_modes(struct drm_connector *connector, + void *edid_ctrl); + +#endif /* _SDE_EDID_PARSER_H_ */ + diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index 161b718b8a38..d79d9613043f 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -813,10 +813,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, dwords += 6; /* - * REG_TO_MEM packet on A5xx needs another ordinal. + * REG_TO_MEM packet on A5xx and above needs another ordinal. * Add 2 more dwords since we do profiling before and after. */ - if (adreno_is_a5xx(adreno_dev)) + if (!ADRENO_LEGACY_PM4(adreno_dev)) dwords += 2; /* @@ -833,7 +833,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) { kernel_profiling = true; dwords += 6; - if (adreno_is_a5xx(adreno_dev)) + if (!ADRENO_LEGACY_PM4(adreno_dev)) dwords += 2; } diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 1de8e212a703..986026aa1b66 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -260,9 +260,12 @@ kgsl_mem_entry_create(void) { struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (entry != NULL) + if (entry != NULL) { kref_init(&entry->refcount); + /* put this ref in the caller functions after init */ + kref_get(&entry->refcount); + } return entry; } #ifdef CONFIG_DMA_SHARED_BUFFER @@ -1764,9 +1767,9 @@ long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv, /* Commit the pointer to the context in context_idr */ write_lock(&device->context_lock); idr_replace(&device->context_idr, context, context->id); + param->drawctxt_id = context->id; write_unlock(&device->context_lock); - param->drawctxt_id = context->id; done: return result; } @@ -2399,6 +2402,9 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv, trace_kgsl_mem_map(entry, fd); kgsl_mem_entry_commit_process(entry); + + /* put the extra refcount for kgsl_mem_entry_create() */ + kgsl_mem_entry_put(entry); return 0; unmap: @@ -2705,6 +2711,9 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv, trace_kgsl_mem_map(entry, param->fd); kgsl_mem_entry_commit_process(entry); + + /* put the extra refcount for kgsl_mem_entry_create() */ + kgsl_mem_entry_put(entry); return result; error_attach: @@ -3143,6 +3152,9 @@ long kgsl_ioctl_gpuobj_alloc(struct kgsl_device_private *dev_priv, param->mmapsize = kgsl_memdesc_footprint(&entry->memdesc); param->id = entry->id; + /* put the extra refcount for kgsl_mem_entry_create() */ + kgsl_mem_entry_put(entry); + return 0; } @@ -3166,6 +3178,9 @@ long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv, param->size = (size_t) entry->memdesc.size; param->flags = (unsigned int) entry->memdesc.flags; + /* put the extra refcount for kgsl_mem_entry_create() */ + kgsl_mem_entry_put(entry); + return 0; } @@ -3189,6 +3204,9 @@ long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv, param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc); param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr; + /* put the extra refcount for kgsl_mem_entry_create() */ + kgsl_mem_entry_put(entry); + return 0; } @@ -3306,6 +3324,9 @@ long kgsl_ioctl_sparse_phys_alloc(struct kgsl_device_private *dev_priv, trace_sparse_phys_alloc(entry->id, param->size, param->pagesize); kgsl_mem_entry_commit_process(entry); + /* put the extra refcount for kgsl_mem_entry_create() */ + kgsl_mem_entry_put(entry); + return 0; err_invalid_pages: @@ -3385,6 +3406,9 @@ long kgsl_ioctl_sparse_virt_alloc(struct kgsl_device_private *dev_priv, trace_sparse_virt_alloc(entry->id, param->size, param->pagesize); kgsl_mem_entry_commit_process(entry); + /* put the extra refcount for kgsl_mem_entry_create() */ + kgsl_mem_entry_put(entry); + return 0; } diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 287d839f98d0..9f9dd574c8d0 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2557,6 +2557,8 @@ static int etm4_set_reg_dump(struct etmv4_drvdata *drvdata) drvdata->reg_data.addr = virt_to_phys(baddr); drvdata->reg_data.len = size; + scnprintf(drvdata->reg_data.name, sizeof(drvdata->reg_data.name), + "KETM_REG%d", drvdata->cpu); dump_entry.id = MSM_DUMP_DATA_ETM_REG + drvdata->cpu; dump_entry.addr = virt_to_phys(&drvdata->reg_data); diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 34b12e015768..c5998bd5ce02 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, 2016-2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1726,6 +1726,9 @@ static int tmc_etf_set_buf_dump(struct tmc_drvdata *drvdata) drvdata->buf_data.addr = virt_to_phys(drvdata->buf); drvdata->buf_data.len = drvdata->size; + scnprintf(drvdata->buf_data.name, sizeof(drvdata->buf_data.name), + "KTMC_ETF%d", count); + dump_entry.id = MSM_DUMP_DATA_TMC_ETF + count; dump_entry.addr = virt_to_phys(&drvdata->buf_data); @@ -1817,6 +1820,8 @@ static int tmc_set_reg_dump(struct tmc_drvdata *drvdata) drvdata->reg_data.addr = virt_to_phys(drvdata->reg_buf); drvdata->reg_data.len = size; + scnprintf(drvdata->reg_data.name, sizeof(drvdata->reg_data.name), + "KTMC_REG%d", count); dump_entry.id = MSM_DUMP_DATA_TMC_REG + count; dump_entry.addr = virt_to_phys(&drvdata->reg_data); diff --git a/drivers/iio/adc/qcom-tadc.c b/drivers/iio/adc/qcom-tadc.c index 054dfcc8556a..05b1985ba378 100644 --- a/drivers/iio/adc/qcom-tadc.c +++ b/drivers/iio/adc/qcom-tadc.c @@ -228,6 +228,7 @@ struct tadc_chip { struct votable *tadc_disable_votable; struct work_struct status_change_work; struct notifier_block nb; + u8 hwtrig_conv; }; struct tadc_pt { @@ -356,6 +357,26 @@ unlock: return rc; } +static int tadc_masked_write(struct tadc_chip *chip, u16 reg, u8 mask, u8 data) +{ + int rc = 0; + + mutex_lock(&chip->write_lock); + if (tadc_is_reg_locked(chip, reg)) { + rc = regmap_write(chip->regmap, (reg & 0xFF00) | 0xD0, 0xA5); + if (rc < 0) { + pr_err("Couldn't unlock secure register rc=%d\n", rc); + goto unlock; + } + } + + rc = regmap_update_bits(chip->regmap, reg, mask, data); + +unlock: + mutex_unlock(&chip->write_lock); + return rc; +} + static int tadc_lerp(const struct tadc_pt *pts, size_t size, bool inv, s32 input, s32 *output) { @@ -880,6 +901,12 @@ static int tadc_disable_vote_callback(struct votable *votable, if (timeleft == 0) pr_err("Timed out waiting for eoc, disabling hw conversions regardless\n"); + rc = tadc_read(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip), + &chip->hwtrig_conv, 1); + if (rc < 0) { + pr_err("Couldn't save hw conversions rc=%d\n", rc); + return rc; + } rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip), 0x00); if (rc < 0) { pr_err("Couldn't disable hw conversions rc=%d\n", rc); @@ -896,9 +923,10 @@ static int tadc_disable_vote_callback(struct votable *votable, pr_err("Couldn't disable direct test mode rc=%d\n", rc); return rc; } - rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip), 0x07); + rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip), + chip->hwtrig_conv); if (rc < 0) { - pr_err("Couldn't enable hw conversions rc=%d\n", rc); + pr_err("Couldn't restore hw conversions rc=%d\n", rc); return rc; } } @@ -1126,16 +1154,23 @@ static int tadc_init_hw(struct tadc_chip *chip) return rc; } - /* enable all temperature hardware triggers */ - rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip), - BIT(TADC_THERM1) | - BIT(TADC_THERM2) | - BIT(TADC_DIE_TEMP)); + /* enable connector and die temp hardware triggers */ + rc = tadc_masked_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip), + BIT(TADC_THERM2) | BIT(TADC_DIE_TEMP), + BIT(TADC_THERM2) | BIT(TADC_DIE_TEMP)); if (rc < 0) { pr_err("Couldn't enable hardware triggers rc=%d\n", rc); return rc; } + /* save hw triggered conversion configuration */ + rc = tadc_read(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip), + &chip->hwtrig_conv, 1); + if (rc < 0) { + pr_err("Couldn't save hw conversions rc=%d\n", rc); + return rc; + } + return 0; } diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c index 1df5d8812991..6d2e7a569044 100644 --- a/drivers/input/misc/hbtp_input.c +++ b/drivers/input/misc/hbtp_input.c @@ -1,5 +1,5 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -47,6 +47,8 @@ struct hbtp_data { struct input_dev *input_dev; s32 count; struct mutex mutex; + struct mutex sensormutex; + struct hbtp_sensor_data *sensor_data; bool touch_status[HBTP_MAX_FINGER]; #if defined(CONFIG_FB) struct notifier_block fb_notif; @@ -87,10 +89,14 @@ struct hbtp_data { u32 power_on_delay; u32 power_off_delay; bool manage_pin_ctrl; + s16 ROI[MAX_ROI_SIZE]; + s16 accelBuffer[MAX_ACCEL_SIZE]; }; static struct hbtp_data *hbtp; +static struct kobject *sensor_kobject; + #if defined(CONFIG_FB) static int hbtp_fb_suspend(struct hbtp_data *ts); static int hbtp_fb_early_resume(struct hbtp_data *ts); @@ -150,6 +156,46 @@ static int fb_notifier_callback(struct notifier_block *self, } #endif +static ssize_t hbtp_sensor_roi_show(struct file *dev, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t pos, + size_t size) { + mutex_lock(&hbtp->sensormutex); + memcpy(buf, hbtp->ROI, size); + mutex_unlock(&hbtp->sensormutex); + + return size; +} + +static ssize_t hbtp_sensor_vib_show(struct file *dev, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t pos, + size_t size) { + mutex_lock(&hbtp->sensormutex); + memcpy(buf, hbtp->accelBuffer, size); + mutex_unlock(&hbtp->sensormutex); + + return size; +} + +static struct bin_attribute capdata_attr = { + .attr = { + .name = "capdata", + .mode = S_IRUGO, + }, + .size = 1024, + .read = hbtp_sensor_roi_show, + .write = NULL, +}; + +static struct bin_attribute vibdata_attr = { + .attr = { + .name = "vib_data", + .mode = S_IRUGO, + }, + .size = MAX_ACCEL_SIZE*sizeof(int16_t), + .read = hbtp_sensor_vib_show, + .write = NULL, +}; + static int hbtp_input_open(struct inode *inode, struct file *file) { mutex_lock(&hbtp->mutex); @@ -748,6 +794,22 @@ static long hbtp_input_ioctl_handler(struct file *file, unsigned int cmd, return -EINVAL; } break; + + case HBTP_SET_SENSORDATA: + if (copy_from_user(hbtp->sensor_data, (void *)arg, + sizeof(struct hbtp_sensor_data))) { + pr_err("%s: Error copying data\n", __func__); + return -EFAULT; + } + mutex_lock(&hbtp->sensormutex); + memcpy(hbtp->ROI, hbtp->sensor_data->ROI, sizeof(hbtp->ROI)); + memcpy(hbtp->accelBuffer, hbtp->sensor_data->accelBuffer, + sizeof(hbtp->accelBuffer)); + mutex_unlock(&hbtp->sensormutex); + + error = 0; + break; + default: pr_err("%s: Unsupported ioctl command %u\n", __func__, cmd); error = -EINVAL; @@ -1358,7 +1420,13 @@ static int __init hbtp_init(void) if (!hbtp) return -ENOMEM; + hbtp->sensor_data = kzalloc(sizeof(struct hbtp_sensor_data), + GFP_KERNEL); + if (!hbtp->sensor_data) + goto err_sensordata; + mutex_init(&hbtp->mutex); + mutex_init(&hbtp->sensormutex); error = misc_register(&hbtp_input_misc); if (error) { @@ -1376,6 +1444,28 @@ static int __init hbtp_init(void) } #endif + sensor_kobject = kobject_create_and_add("hbtpsensor", kernel_kobj); + if (!sensor_kobject) { + pr_err("%s: Could not create hbtpsensor kobject\n", __func__); + goto err_kobject_create; + } + + error = sysfs_create_bin_file(sensor_kobject, &capdata_attr); + if (error < 0) { + pr_err("%s: hbtp capdata sysfs creation failed: %d\n", __func__, + error); + goto err_sysfs_create_capdata; + } + pr_debug("capdata sysfs creation success\n"); + + error = sysfs_create_bin_file(sensor_kobject, &vibdata_attr); + if (error < 0) { + pr_err("%s: vibdata sysfs creation failed: %d\n", __func__, + error); + goto err_sysfs_create_vibdata; + } + pr_debug("vibdata sysfs creation success\n"); + error = platform_driver_register(&hbtp_pdev_driver); if (error) { pr_err("Failed to register platform driver: %d\n", error); @@ -1385,12 +1475,20 @@ static int __init hbtp_init(void) return 0; err_platform_drv_reg: + sysfs_remove_bin_file(sensor_kobject, &vibdata_attr); +err_sysfs_create_vibdata: + sysfs_remove_bin_file(sensor_kobject, &capdata_attr); +err_sysfs_create_capdata: + kobject_put(sensor_kobject); +err_kobject_create: #if defined(CONFIG_FB) fb_unregister_client(&hbtp->fb_notif); err_fb_reg: #endif misc_deregister(&hbtp_input_misc); err_misc_reg: + kfree(hbtp->sensor_data); +err_sensordata: kfree(hbtp); return error; @@ -1398,6 +1496,9 @@ err_misc_reg: static void __exit hbtp_exit(void) { + sysfs_remove_bin_file(sensor_kobject, &vibdata_attr); + sysfs_remove_bin_file(sensor_kobject, &capdata_attr); + kobject_put(sensor_kobject); misc_deregister(&hbtp_input_misc); if (hbtp->input_dev) input_unregister_device(hbtp->input_dev); @@ -1408,6 +1509,7 @@ static void __exit hbtp_exit(void) platform_driver_unregister(&hbtp_pdev_driver); + kfree(hbtp->sensor_data); kfree(hbtp); } diff --git a/drivers/input/misc/vl53L0/stmvl53l0.h b/drivers/input/misc/vl53L0/stmvl53l0.h index ae517ebe461a..b15d78cc6825 100644 --- a/drivers/input/misc/vl53L0/stmvl53l0.h +++ b/drivers/input/misc/vl53L0/stmvl53l0.h @@ -131,6 +131,7 @@ struct stmvl53l0_data { struct miscdevice miscdev; int irq; + int irq_gpio; unsigned int reset; /* control flag from HAL */ diff --git a/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c b/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c index e08edbcc73f9..79fba00ea086 100644 --- a/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c +++ b/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c @@ -248,6 +248,7 @@ static int stmvl53l0_cci_init(struct cci_data *data) cci_client->retries = 3; cci_client->id_map = 0; cci_client->cci_i2c_master = data->cci_master; + cci_client->i2c_freq_mode = I2C_FAST_MODE; rc = data->client->i2c_func_tbl->i2c_util(data->client, MSM_CCI_INIT); if (rc < 0) { vl53l0_errmsg("%d: CCI Init failed\n", __LINE__); @@ -295,8 +296,20 @@ static int32_t stmvl53l0_platform_probe(struct platform_device *pdev) rc = stmvl53l0_get_dt_data(&pdev->dev, cci_object); if (rc < 0) { vl53l0_errmsg("%d, failed rc %d\n", __LINE__, rc); + kfree(vl53l0_data->client_object); + kfree(vl53l0_data); return rc; } + vl53l0_data->irq_gpio = of_get_named_gpio_flags(pdev->dev.of_node, + "stm,irq-gpio", 0, NULL); + + if (!gpio_is_valid(vl53l0_data->irq_gpio)) { + vl53l0_errmsg("%d failed get irq gpio", __LINE__); + kfree(vl53l0_data->client_object); + kfree(vl53l0_data); + return -EINVAL; + } + cci_object->subdev_id = pdev->id; /* Set device type as platform device */ @@ -418,6 +431,7 @@ int stmvl53l0_power_up_cci(void *cci_object, unsigned int *preset_flag) } } data->power_up = 1; + usleep_range(3000, 3500); *preset_flag = 1; vl53l0_dbgmsg("End\n"); diff --git a/drivers/input/misc/vl53L0/stmvl53l0_module.c b/drivers/input/misc/vl53L0/stmvl53l0_module.c index f242e5f497d0..6881aba9fc64 100644 --- a/drivers/input/misc/vl53L0/stmvl53l0_module.c +++ b/drivers/input/misc/vl53L0/stmvl53l0_module.c @@ -38,8 +38,8 @@ #include "vl53l0_api.h" #include "vl53l010_api.h" -/*#define USE_INT */ -#define IRQ_NUM 59 +#define USE_INT + /* #define DEBUG_TIME_LOG */ #ifdef DEBUG_TIME_LOG struct timeval start_tv, stop_tv; @@ -2668,12 +2668,12 @@ int stmvl53l0_setup(struct stmvl53l0_data *data) #ifdef USE_INT /* init interrupt */ - gpio_request(IRQ_NUM, "vl53l0_gpio_int"); - gpio_direction_input(IRQ_NUM); - irq = gpio_to_irq(IRQ_NUM); + gpio_request(data->irq_gpio, "vl53l0_gpio_int"); + gpio_direction_input(data->irq_gpio); + irq = gpio_to_irq(data->irq_gpio); if (irq < 0) { vl53l0_errmsg("filed to map GPIO: %d to interrupt:%d\n", - IRQ_NUM, irq); + data->irq_gpio, irq); } else { vl53l0_dbgmsg("register_irq:%d\n", irq); /* IRQF_TRIGGER_FALLING- poliarity:0 IRQF_TRIGGER_RISNG - diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 51159711b1d8..25fe6c85a34e 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -3288,6 +3288,43 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, 1 << DOMAIN_ATTR_ENABLE_TTBR1; ret = 0; break; + case DOMAIN_ATTR_GEOMETRY: { + struct iommu_domain_geometry *geometry = + (struct iommu_domain_geometry *)data; + + if (smmu_domain->smmu != NULL) { + dev_err(smmu_domain->smmu->dev, + "cannot set geometry attribute while attached\n"); + ret = -EBUSY; + break; + } + + if (geometry->aperture_start >= SZ_1G * 4ULL || + geometry->aperture_end >= SZ_1G * 4ULL) { + pr_err("fastmap does not support IOVAs >= 4GB\n"); + ret = -EINVAL; + break; + } + if (smmu_domain->attributes + & (1 << DOMAIN_ATTR_GEOMETRY)) { + if (geometry->aperture_start + < domain->geometry.aperture_start) + domain->geometry.aperture_start = + geometry->aperture_start; + + if (geometry->aperture_end + > domain->geometry.aperture_end) + domain->geometry.aperture_end = + geometry->aperture_end; + } else { + smmu_domain->attributes |= 1 << DOMAIN_ATTR_GEOMETRY; + domain->geometry.aperture_start = + geometry->aperture_start; + domain->geometry.aperture_end = geometry->aperture_end; + } + ret = 0; + break; + } default: ret = -ENODEV; break; diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c index 8c6364f03eac..0881d68f34d8 100644 --- a/drivers/iommu/dma-mapping-fast.c +++ b/drivers/iommu/dma-mapping-fast.c @@ -188,7 +188,9 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping, iommu_tlbiall(mapping->domain); mapping->have_stale_tlbs = false; - av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, mapping->base, + av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, + mapping->domain->geometry.aperture_start, + mapping->base, mapping->base + mapping->size - 1, skip_sync); } @@ -367,7 +369,8 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page, if (unlikely(iova == DMA_ERROR_CODE)) goto fail; - pmd = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, iova); + pmd = iopte_pmd_offset(mapping->pgtbl_pmds, + mapping->domain->geometry.aperture_start, iova); if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot))) goto fail_free_iova; @@ -391,7 +394,8 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova, struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; unsigned long flags; av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, - mapping->base, iova); + mapping->domain->geometry.aperture_start, + iova); unsigned long offset = iova & ~FAST_PAGE_MASK; size_t len = ALIGN(size + offset, FAST_PAGE_SIZE); int nptes = len >> FAST_PAGE_SHIFT; @@ -414,7 +418,8 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev, { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, - mapping->base, iova); + mapping->domain->geometry.aperture_start, + iova); unsigned long offset = iova & ~FAST_PAGE_MASK; struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); @@ -427,7 +432,8 @@ static void fast_smmu_sync_single_for_device(struct device *dev, { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, - mapping->base, iova); + mapping->domain->geometry.aperture_start, + iova); unsigned long offset = iova & ~FAST_PAGE_MASK; struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); @@ -555,8 +561,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, while (sg_miter_next(&miter)) { int nptes = miter.length >> FAST_PAGE_SHIFT; - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, - iova_iter); + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, + mapping->domain->geometry.aperture_start, + iova_iter); if (unlikely(av8l_fast_map_public( ptep, page_to_phys(miter.page), miter.length, prot))) { @@ -584,7 +591,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, out_unmap: /* need to take the lock again for page tables and iova */ spin_lock_irqsave(&mapping->lock, flags); - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_addr); + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, + mapping->domain->geometry.aperture_start, + dma_addr); av8l_fast_unmap_public(ptep, size); fast_dmac_clean_range(mapping, ptep, ptep + count); out_free_iova: @@ -616,7 +625,8 @@ static void fast_smmu_free(struct device *dev, size_t size, pages = area->pages; dma_common_free_remap(vaddr, size, VM_USERMAP, false); - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_handle); + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, + mapping->domain->geometry.aperture_start, dma_handle); spin_lock_irqsave(&mapping->lock, flags); av8l_fast_unmap_public(ptep, size); fast_dmac_clean_range(mapping, ptep, ptep + count); @@ -720,7 +730,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = { * * Creates a mapping structure which holds information about used/unused IO * address ranges, which is required to perform mapping with IOMMU aware - * functions. The only VA range supported is [0, 4GB). + * functions. The only VA range supported is [0, 4GB]. * * The client device need to be attached to the mapping with * fast_smmu_attach_device function. @@ -774,6 +784,7 @@ int fast_smmu_attach_device(struct device *dev, struct iommu_domain *domain = mapping->domain; struct iommu_pgtbl_info info; u64 size = (u64)mapping->bits << PAGE_SHIFT; + struct iommu_domain_geometry geometry; if (mapping->base + size > (SZ_1G * 4ULL)) return -EINVAL; @@ -788,8 +799,11 @@ int fast_smmu_attach_device(struct device *dev, mapping->fast->domain = domain; mapping->fast->dev = dev; - domain->geometry.aperture_start = mapping->base; - domain->geometry.aperture_end = mapping->base + size - 1; + geometry.aperture_start = mapping->base; + geometry.aperture_end = mapping->base + size - 1; + if (iommu_domain_set_attr(domain, DOMAIN_ATTR_GEOMETRY, + &geometry)) + return -EINVAL; if (iommu_attach_device(domain, dev)) return -EINVAL; diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c index 3582e206db68..5378e95c4627 100644 --- a/drivers/iommu/io-pgtable-fast.c +++ b/drivers/iommu/io-pgtable-fast.c @@ -133,6 +133,9 @@ struct av8l_fast_io_pgtable { #define AV8L_FAST_TCR_EPD1_SHIFT 23 #define AV8L_FAST_TCR_EPD1_FAULT 1 +#define AV8L_FAST_TCR_SEP_SHIFT (15 + 32) +#define AV8L_FAST_TCR_SEP_UPSTREAM 7ULL + #define AV8L_FAST_MAIR_ATTR_SHIFT(n) ((n) << 3) #define AV8L_FAST_MAIR_ATTR_MASK 0xff #define AV8L_FAST_MAIR_ATTR_DEVICE 0x04 @@ -173,12 +176,12 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep) } void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, u64 base, - u64 end, bool skip_sync) + u64 start, u64 end, bool skip_sync) { int i; - av8l_fast_iopte *pmdp = pmds; + av8l_fast_iopte *pmdp = iopte_pmd_offset(pmds, base, start); - for (i = base >> AV8L_FAST_PAGE_SHIFT; + for (i = start >> AV8L_FAST_PAGE_SHIFT; i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) { if (!(*pmdp & AV8L_FAST_PTE_VALID)) { *pmdp = 0; @@ -256,16 +259,17 @@ void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size) __av8l_fast_unmap(ptep, size, true); } -/* upper layer must take care of TLB invalidation */ static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova, size_t size) { struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops); + struct io_pgtable *iop = &data->iop; av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova); unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT; __av8l_fast_unmap(ptep, size, false); dmac_clean_range(ptep, ptep + nptes); + iop->cfg.tlb->tlb_flush_all(iop->cookie); return size; } @@ -522,6 +526,7 @@ av8l_fast_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie) #if defined(CONFIG_ARM) reg |= ARM_32_LPAE_TCR_EAE; #endif + reg |= AV8L_FAST_TCR_SEP_UPSTREAM << AV8L_FAST_TCR_SEP_SHIFT; cfg->av8l_fast_cfg.tcr = reg; /* MAIRs */ @@ -668,7 +673,7 @@ static int __init av8l_fast_positive_testing(void) } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(pmds, base, max, false); + av8l_fast_clear_stale_ptes(pmds, base, base, max, false); /* map the entire 4GB VA space with 8K map calls */ for (iova = base; iova < max; iova += SZ_8K) { @@ -689,7 +694,7 @@ static int __init av8l_fast_positive_testing(void) } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(pmds, base, max, false); + av8l_fast_clear_stale_ptes(pmds, base, base, max, false); /* map the entire 4GB VA space with 16K map calls */ for (iova = base; iova < max; iova += SZ_16K) { @@ -710,7 +715,7 @@ static int __init av8l_fast_positive_testing(void) } /* sweep up TLB proving PTEs */ - av8l_fast_clear_stale_ptes(pmds, base, max, false); + av8l_fast_clear_stale_ptes(pmds, base, base, max, false); /* map the entire 4GB VA space with 64K map calls */ for (iova = base; iova < max; iova += SZ_64K) { diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c index 75fcde6e2c20..22b4934df6df 100644 --- a/drivers/iommu/iommu-debug.c +++ b/drivers/iommu/iommu-debug.c @@ -787,9 +787,13 @@ static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored) enum iommu_attr attrs[] = { DOMAIN_ATTR_FAST, DOMAIN_ATTR_ATOMIC, + DOMAIN_ATTR_GEOMETRY, }; int one = 1; - void *attr_values[] = { &one, &one, &one }; + struct iommu_domain_geometry geometry = {0, 0, 0}; + void *attr_values[] = { &one, &one, &geometry}; + + geometry.aperture_end = (dma_addr_t)(SZ_1G * 4ULL - 1); iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values, ARRAY_SIZE(attrs), sizes); diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c index 54d395d5e78d..1a6cad946a25 100644 --- a/drivers/leds/leds-qpnp-flash-v2.c +++ b/drivers/leds/leds-qpnp-flash-v2.c @@ -127,11 +127,11 @@ #define FLASH_LED_LMH_MITIGATION_DISABLE 0 #define FLASH_LED_CHGR_MITIGATION_ENABLE BIT(4) #define FLASH_LED_CHGR_MITIGATION_DISABLE 0 -#define FLASH_LED_MITIGATION_SEL_DEFAULT 2 +#define FLASH_LED_LMH_MITIGATION_SEL_DEFAULT 2 #define FLASH_LED_MITIGATION_SEL_MAX 2 #define FLASH_LED_CHGR_MITIGATION_SEL_SHIFT 4 -#define FLASH_LED_MITIGATION_THRSH_DEFAULT 0xA -#define FLASH_LED_MITIGATION_THRSH_MAX 0x1F +#define FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT 0xA +#define FLASH_LED_CHGR_MITIGATION_THRSH_MAX 0x1F #define FLASH_LED_LMH_OCV_THRESH_DEFAULT_UV 3700000 #define FLASH_LED_LMH_RBATT_THRESH_DEFAULT_UOHM 400000 #define FLASH_LED_IRES_BASE 3 @@ -152,12 +152,17 @@ #define FLASH_LED_MOD_ENABLE BIT(7) #define FLASH_LED_DISABLE 0x00 #define FLASH_LED_SAFETY_TMR_DISABLED 0x13 -#define FLASH_LED_MIN_CURRENT_MA 25 #define FLASH_LED_MAX_TOTAL_CURRENT_MA 3750 /* notifier call chain for flash-led irqs */ static ATOMIC_NOTIFIER_HEAD(irq_notifier_list); +enum flash_charger_mitigation { + FLASH_DISABLE_CHARGER_MITIGATION, + FLASH_HW_CHARGER_MITIGATION_BY_ILED_THRSHLD, + FLASH_SW_CHARGER_MITIGATION, +}; + enum flash_led_type { FLASH_LED_TYPE_FLASH, FLASH_LED_TYPE_TORCH, @@ -175,15 +180,14 @@ enum { struct flash_node_data { struct platform_device *pdev; struct led_classdev cdev; - struct pinctrl *pinctrl; - struct pinctrl_state *gpio_state_active; - struct pinctrl_state *gpio_state_suspend; + struct pinctrl *strobe_pinctrl; struct pinctrl_state *hw_strobe_state_active; struct pinctrl_state *hw_strobe_state_suspend; int hw_strobe_gpio; int ires_ua; int max_current; int current_ma; + int prev_current_ma; u8 duration; u8 id; u8 type; @@ -198,6 +202,9 @@ struct flash_node_data { struct flash_switch_data { struct platform_device *pdev; struct regulator *vreg; + struct pinctrl *led_en_pinctrl; + struct pinctrl_state *gpio_state_active; + struct pinctrl_state *gpio_state_suspend; struct led_classdev cdev; int led_mask; bool regulator_on; @@ -260,6 +267,7 @@ struct qpnp_flash_led { int num_fnodes; int num_snodes; int enable; + int total_current_ma; u16 base; bool trigger_lmh; bool trigger_chgr; @@ -486,10 +494,12 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) if (rc < 0) return rc; + val = led->pdata->chgr_mitigation_sel + << FLASH_LED_CHGR_MITIGATION_SEL_SHIFT; rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_MITIGATION_SEL(led->base), FLASH_LED_CHGR_MITIGATION_SEL_MASK, - led->pdata->chgr_mitigation_sel); + val); if (rc < 0) return rc; @@ -509,7 +519,7 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) if (led->pdata->led1n2_iclamp_low_ma) { val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_low_ma, - led->fnode[0].ires_ua); + led->fnode[LED1].ires_ua); rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_LED1N2_ICLAMP_LOW(led->base), FLASH_LED_CURRENT_MASK, val); @@ -519,7 +529,7 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) if (led->pdata->led1n2_iclamp_mid_ma) { val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_mid_ma, - led->fnode[0].ires_ua); + led->fnode[LED1].ires_ua); rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_LED1N2_ICLAMP_MID(led->base), FLASH_LED_CURRENT_MASK, val); @@ -529,7 +539,7 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) if (led->pdata->led3_iclamp_low_ma) { val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_low_ma, - led->fnode[3].ires_ua); + led->fnode[LED3].ires_ua); rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_LED3_ICLAMP_LOW(led->base), FLASH_LED_CURRENT_MASK, val); @@ -539,7 +549,7 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) if (led->pdata->led3_iclamp_mid_ma) { val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_mid_ma, - led->fnode[3].ires_ua); + led->fnode[LED3].ires_ua); rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_LED3_ICLAMP_MID(led->base), FLASH_LED_CURRENT_MASK, val); @@ -570,9 +580,9 @@ static int qpnp_flash_led_hw_strobe_enable(struct flash_node_data *fnode, if (gpio_is_valid(fnode->hw_strobe_gpio)) { gpio_set_value(fnode->hw_strobe_gpio, on ? 1 : 0); - } else if (fnode->hw_strobe_state_active && + } else if (fnode->strobe_pinctrl && fnode->hw_strobe_state_active && fnode->hw_strobe_state_suspend) { - rc = pinctrl_select_state(fnode->pinctrl, + rc = pinctrl_select_state(fnode->strobe_pinctrl, on ? fnode->hw_strobe_state_active : fnode->hw_strobe_state_suspend); if (rc < 0) { @@ -875,14 +885,29 @@ static int qpnp_flash_led_get_max_avail_current(struct qpnp_flash_led *led) return max_avail_current; } +static void qpnp_flash_led_aggregate_max_current(struct flash_node_data *fnode) +{ + struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev); + + if (fnode->current_ma) + led->total_current_ma += fnode->current_ma + - fnode->prev_current_ma; + else + led->total_current_ma -= fnode->prev_current_ma; + + fnode->prev_current_ma = fnode->current_ma; +} + static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value) { int prgm_current_ma = value; + int min_ma = fnode->ires_ua / 1000; + struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev); if (value <= 0) prgm_current_ma = 0; - else if (value < FLASH_LED_MIN_CURRENT_MA) - prgm_current_ma = FLASH_LED_MIN_CURRENT_MA; + else if (value < min_ma) + prgm_current_ma = min_ma; prgm_current_ma = min(prgm_current_ma, fnode->max_current); fnode->current_ma = prgm_current_ma; @@ -890,6 +915,13 @@ static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value) fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma, fnode->ires_ua); fnode->led_on = prgm_current_ma != 0; + + if (led->pdata->chgr_mitigation_sel == FLASH_SW_CHARGER_MITIGATION) { + qpnp_flash_led_aggregate_max_current(fnode); + led->trigger_chgr = false; + if (led->total_current_ma >= 1000) + led->trigger_chgr = true; + } } static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode) @@ -948,15 +980,6 @@ static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode) led->fnode[i].led_on = false; - if (led->fnode[i].pinctrl) { - rc = pinctrl_select_state(led->fnode[i].pinctrl, - led->fnode[i].gpio_state_suspend); - if (rc < 0) { - pr_err("failed to disable GPIO, rc=%d\n", rc); - return rc; - } - } - if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) { rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i], led->pdata->hw_strobe_option, false); @@ -968,6 +991,17 @@ static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode) } } + if (snode->led_en_pinctrl) { + pr_debug("Selecting suspend state for %s\n", snode->cdev.name); + rc = pinctrl_select_state(snode->led_en_pinctrl, + snode->gpio_state_suspend); + if (rc < 0) { + pr_err("failed to select pinctrl suspend state rc=%d\n", + rc); + return rc; + } + } + snode->enabled = false; return 0; } @@ -1038,15 +1072,6 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on) val |= FLASH_LED_ENABLE << led->fnode[i].id; - if (led->fnode[i].pinctrl) { - rc = pinctrl_select_state(led->fnode[i].pinctrl, - led->fnode[i].gpio_state_active); - if (rc < 0) { - pr_err("failed to enable GPIO rc=%d\n", rc); - return rc; - } - } - if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) { rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i], led->pdata->hw_strobe_option, true); @@ -1058,6 +1083,17 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on) } } + if (snode->led_en_pinctrl) { + pr_debug("Selecting active state for %s\n", snode->cdev.name); + rc = pinctrl_select_state(snode->led_en_pinctrl, + snode->gpio_state_active); + if (rc < 0) { + pr_err("failed to select pinctrl active state rc=%d\n", + rc); + return rc; + } + } + if (led->enable == 0) { rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_MOD_CTRL(led->base), @@ -1153,10 +1189,6 @@ int qpnp_flash_led_prepare(struct led_trigger *trig, int options, *max_current = rc; } - led->trigger_chgr = false; - if (options & PRE_FLASH) - led->trigger_chgr = true; - return 0; } @@ -1330,7 +1362,7 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led, struct flash_node_data *fnode, struct device_node *node) { const char *temp_string; - int rc; + int rc, min_ma; u32 val; bool strobe_sel = 0, edge_trigger = 0, active_high = 0; @@ -1386,10 +1418,11 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led, return rc; } + min_ma = fnode->ires_ua / 1000; rc = of_property_read_u32(node, "qcom,max-current", &val); if (!rc) { - if (val < FLASH_LED_MIN_CURRENT_MA) - val = FLASH_LED_MIN_CURRENT_MA; + if (val < min_ma) + val = min_ma; fnode->max_current = val; fnode->cdev.max_brightness = val; } else { @@ -1399,11 +1432,10 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led, rc = of_property_read_u32(node, "qcom,current-ma", &val); if (!rc) { - if (val < FLASH_LED_MIN_CURRENT_MA || - val > fnode->max_current) + if (val < min_ma || val > fnode->max_current) pr_warn("Invalid operational current specified, capping it\n"); - if (val < FLASH_LED_MIN_CURRENT_MA) - val = FLASH_LED_MIN_CURRENT_MA; + if (val < min_ma) + val = min_ma; if (val > fnode->max_current) val = fnode->max_current; fnode->current_ma = val; @@ -1460,6 +1492,20 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led, } fnode->trigger = (strobe_sel << 2) | (edge_trigger << 1) | active_high; + rc = led_classdev_register(&led->pdev->dev, &fnode->cdev); + if (rc < 0) { + pr_err("Unable to register led node %d\n", fnode->id); + return rc; + } + + fnode->cdev.dev->of_node = node; + fnode->strobe_pinctrl = devm_pinctrl_get(fnode->cdev.dev); + if (IS_ERR_OR_NULL(fnode->strobe_pinctrl)) { + pr_debug("No pinctrl defined for %s, err=%ld\n", + fnode->cdev.name, PTR_ERR(fnode->strobe_pinctrl)); + fnode->strobe_pinctrl = NULL; + } + if (fnode->trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) { if (of_find_property(node, "qcom,hw-strobe-gpio", NULL)) { fnode->hw_strobe_gpio = of_get_named_gpio(node, @@ -1469,11 +1515,11 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led, return fnode->hw_strobe_gpio; } gpio_direction_output(fnode->hw_strobe_gpio, 0); - } else { + } else if (fnode->strobe_pinctrl) { fnode->hw_strobe_gpio = -1; fnode->hw_strobe_state_active = - pinctrl_lookup_state(fnode->pinctrl, - "strobe_enable"); + pinctrl_lookup_state(fnode->strobe_pinctrl, + "strobe_enable"); if (IS_ERR_OR_NULL(fnode->hw_strobe_state_active)) { pr_err("No active pin for hardware strobe, rc=%ld\n", PTR_ERR(fnode->hw_strobe_state_active)); @@ -1481,8 +1527,8 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led, } fnode->hw_strobe_state_suspend = - pinctrl_lookup_state(fnode->pinctrl, - "strobe_disable"); + pinctrl_lookup_state(fnode->strobe_pinctrl, + "strobe_disable"); if (IS_ERR_OR_NULL(fnode->hw_strobe_state_suspend)) { pr_err("No suspend pin for hardware strobe, rc=%ld\n", PTR_ERR(fnode->hw_strobe_state_suspend) @@ -1492,38 +1538,6 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led, } } - rc = led_classdev_register(&led->pdev->dev, &fnode->cdev); - if (rc < 0) { - pr_err("Unable to register led node %d\n", fnode->id); - return rc; - } - - fnode->cdev.dev->of_node = node; - - fnode->pinctrl = devm_pinctrl_get(fnode->cdev.dev); - if (IS_ERR_OR_NULL(fnode->pinctrl)) { - pr_debug("No pinctrl defined\n"); - fnode->pinctrl = NULL; - } else { - fnode->gpio_state_active = - pinctrl_lookup_state(fnode->pinctrl, "led_enable"); - if (IS_ERR_OR_NULL(fnode->gpio_state_active)) { - pr_err("Cannot lookup LED active state\n"); - devm_pinctrl_put(fnode->pinctrl); - fnode->pinctrl = NULL; - return PTR_ERR(fnode->gpio_state_active); - } - - fnode->gpio_state_suspend = - pinctrl_lookup_state(fnode->pinctrl, "led_disable"); - if (IS_ERR_OR_NULL(fnode->gpio_state_suspend)) { - pr_err("Cannot lookup LED disable state\n"); - devm_pinctrl_put(fnode->pinctrl); - fnode->pinctrl = NULL; - return PTR_ERR(fnode->gpio_state_suspend); - } - } - return 0; } @@ -1588,6 +1602,36 @@ static int qpnp_flash_led_parse_and_register_switch(struct qpnp_flash_led *led, } snode->cdev.dev->of_node = node; + + snode->led_en_pinctrl = devm_pinctrl_get(snode->cdev.dev); + if (IS_ERR_OR_NULL(snode->led_en_pinctrl)) { + pr_debug("No pinctrl defined for %s, err=%ld\n", + snode->cdev.name, PTR_ERR(snode->led_en_pinctrl)); + snode->led_en_pinctrl = NULL; + } + + if (snode->led_en_pinctrl) { + snode->gpio_state_active = + pinctrl_lookup_state(snode->led_en_pinctrl, + "led_enable"); + if (IS_ERR_OR_NULL(snode->gpio_state_active)) { + pr_err("Cannot lookup LED active state\n"); + devm_pinctrl_put(snode->led_en_pinctrl); + snode->led_en_pinctrl = NULL; + return PTR_ERR(snode->gpio_state_active); + } + + snode->gpio_state_suspend = + pinctrl_lookup_state(snode->led_en_pinctrl, + "led_disable"); + if (IS_ERR_OR_NULL(snode->gpio_state_suspend)) { + pr_err("Cannot lookup LED disable state\n"); + devm_pinctrl_put(snode->led_en_pinctrl); + snode->led_en_pinctrl = NULL; + return PTR_ERR(snode->gpio_state_suspend); + } + } + return 0; } @@ -1939,7 +1983,7 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led, return rc; } - led->pdata->lmh_mitigation_sel = FLASH_LED_MITIGATION_SEL_DEFAULT; + led->pdata->lmh_mitigation_sel = FLASH_LED_LMH_MITIGATION_SEL_DEFAULT; rc = of_property_read_u32(node, "qcom,lmh-mitigation-sel", &val); if (!rc) { led->pdata->lmh_mitigation_sel = val; @@ -1953,7 +1997,7 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led, return -EINVAL; } - led->pdata->chgr_mitigation_sel = FLASH_LED_MITIGATION_SEL_DEFAULT; + led->pdata->chgr_mitigation_sel = FLASH_SW_CHARGER_MITIGATION; rc = of_property_read_u32(node, "qcom,chgr-mitigation-sel", &val); if (!rc) { led->pdata->chgr_mitigation_sel = val; @@ -1967,9 +2011,7 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led, return -EINVAL; } - led->pdata->chgr_mitigation_sel <<= FLASH_LED_CHGR_MITIGATION_SEL_SHIFT; - - led->pdata->iled_thrsh_val = FLASH_LED_MITIGATION_THRSH_DEFAULT; + led->pdata->iled_thrsh_val = FLASH_LED_CHGR_MITIGATION_THRSH_DEFAULT; rc = of_property_read_u32(node, "qcom,iled-thrsh-ma", &val); if (!rc) { led->pdata->iled_thrsh_val = MITIGATION_THRSH_MA_TO_VAL(val); @@ -1978,7 +2020,7 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led, return rc; } - if (led->pdata->iled_thrsh_val > FLASH_LED_MITIGATION_THRSH_MAX) { + if (led->pdata->iled_thrsh_val > FLASH_LED_CHGR_MITIGATION_THRSH_MAX) { pr_err("Invalid iled_thrsh_val specified\n"); return -EINVAL; } @@ -2094,22 +2136,24 @@ static int qpnp_flash_led_probe(struct platform_device *pdev) if (!strcmp("flash", temp_string) || !strcmp("torch", temp_string)) { rc = qpnp_flash_led_parse_each_led_dt(led, - &led->fnode[i++], temp); + &led->fnode[i], temp); if (rc < 0) { pr_err("Unable to parse flash node %d rc=%d\n", i, rc); goto error_led_register; } + i++; } if (!strcmp("switch", temp_string)) { rc = qpnp_flash_led_parse_and_register_switch(led, - &led->snode[j++], temp); + &led->snode[j], temp); if (rc < 0) { pr_err("Unable to parse and register switch node, rc=%d\n", rc); goto error_switch_register; } + j++; } } diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h index 139a4c9b49ee..b283f6277b87 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h @@ -497,6 +497,7 @@ struct msm_vfe_src_info { enum msm_vfe_dual_hw_type dual_hw_type; struct msm_vfe_dual_hw_ms_info dual_hw_ms_info; bool accept_frame; + uint32_t lpm; }; struct msm_vfe_fetch_engine_info { @@ -599,7 +600,7 @@ struct msm_vfe_tasklet_queue_cmd { struct vfe_device *vfe_dev; }; -#define MSM_VFE_TASKLETQ_SIZE 200 +#define MSM_VFE_TASKLETQ_SIZE 400 enum msm_vfe_overflow_state { NO_OVERFLOW, diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index ebd3a32281d7..63e46125c292 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -773,6 +773,40 @@ void msm_isp_check_for_output_error(struct vfe_device *vfe_dev, } } +static int msm_isp_check_sync_time(struct msm_vfe_src_info *src_info, + struct msm_isp_timestamp *ts, + struct master_slave_resource_info *ms_res) +{ + int i; + struct msm_vfe_src_info *master_src_info = NULL; + uint32_t master_time = 0, current_time; + + if (!ms_res->src_sof_mask) + return 0; + + for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) { + if (ms_res->src_info[i] == NULL) + continue; + if (src_info == ms_res->src_info[i] || + ms_res->src_info[i]->active == 0) + continue; + if (ms_res->src_sof_mask & + (1 << ms_res->src_info[i]->dual_hw_ms_info.index)) { + master_src_info = ms_res->src_info[i]; + break; + } + } + if (!master_src_info) + return 0; + master_time = master_src_info-> + dual_hw_ms_info.sof_info.mono_timestamp_ms; + current_time = ts->buf_time.tv_sec * 1000 + + ts->buf_time.tv_usec / 1000; + if ((current_time - master_time) > ms_res->sof_delta_threshold) + return 1; + return 0; +} + static void msm_isp_sync_dual_cam_frame_id( struct vfe_device *vfe_dev, struct master_slave_resource_info *ms_res, @@ -787,11 +821,24 @@ static void msm_isp_sync_dual_cam_frame_id( if (src_info->dual_hw_ms_info.sync_state == ms_res->dual_sync_mode) { - (frame_src == VFE_PIX_0) ? src_info->frame_id += + if (msm_isp_check_sync_time(src_info, ts, ms_res) == 0) { + (frame_src == VFE_PIX_0) ? src_info->frame_id += vfe_dev->axi_data.src_info[frame_src]. sof_counter_step : src_info->frame_id++; - return; + return; + } + ms_res->src_sof_mask = 0; + ms_res->active_src_mask = 0; + for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) { + if (ms_res->src_info[i] == NULL) + continue; + if (ms_res->src_info[i]->active == 0) + continue; + ms_res->src_info[i]->dual_hw_ms_info. + sync_state = + MSM_ISP_DUAL_CAM_ASYNC; + } } WARN_ON(ms_res->dual_sync_mode == MSM_ISP_DUAL_CAM_ASYNC); @@ -948,8 +995,6 @@ static void msm_isp_update_pd_stats_idx(struct vfe_device *vfe_dev, uint32_t pingpong_status = 0, pingpong_bit = 0; struct msm_isp_buffer *done_buf = NULL; int vfe_idx = -1; - /* initialize pd_buf_idx with an invalid index 0xF */ - vfe_dev->pd_buf_idx = 0xF; if (frame_src < VFE_RAW_0 || frame_src > VFE_RAW_2) return; @@ -2421,6 +2466,7 @@ int msm_isp_ab_ib_update_lpm_mode(struct vfe_device *vfe_dev, void *arg) int i, rc = 0; uint64_t total_bandwidth = 0; int vfe_idx; + uint32_t intf; unsigned long flags; struct msm_vfe_axi_stream *stream_info; struct msm_vfe_dual_lpm_mode *ab_ib_vote = NULL; @@ -2436,7 +2482,13 @@ int msm_isp_ab_ib_update_lpm_mode(struct vfe_device *vfe_dev, void *arg) stream_info = msm_isp_get_stream_common_data(vfe_dev, ab_ib_vote->stream_src[i]); + if (stream_info == NULL) + continue; + /* loop all stream on current session */ spin_lock_irqsave(&stream_info->lock, flags); + intf = SRC_TO_INTF(stream_info->stream_src); + vfe_dev->axi_data.src_info[intf].lpm = + ab_ib_vote->lpm_mode; if (stream_info->state == ACTIVE) { vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, @@ -2457,7 +2509,12 @@ int msm_isp_ab_ib_update_lpm_mode(struct vfe_device *vfe_dev, void *arg) stream_info = msm_isp_get_stream_common_data(vfe_dev, ab_ib_vote->stream_src[i]); + if (stream_info == NULL) + continue; spin_lock_irqsave(&stream_info->lock, flags); + intf = SRC_TO_INTF(stream_info->stream_src); + vfe_dev->axi_data.src_info[intf].lpm = + ab_ib_vote->lpm_mode; if (stream_info->state == PAUSED) { vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, @@ -2883,7 +2940,9 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev, * those state transitions instead of directly forcing stream to * be INACTIVE */ - if (stream_info->state != PAUSED) { + intf = SRC_TO_INTF(stream_info->stream_src); + if ((!vfe_dev->axi_data.src_info[intf].lpm) || + stream_info->state != PAUSED) { while (stream_info->state != ACTIVE) __msm_isp_axi_stream_update(stream_info, ×tamp); @@ -2900,10 +2959,12 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev, vfe_dev->hw_info->vfe_ops.axi_ops. clear_wm_irq_mask(vfe_dev, stream_info); } - if (stream_info->state == ACTIVE) { + if (stream_info->state == ACTIVE && + !vfe_dev->axi_data.src_info[intf].lpm) { init_completion(&stream_info->inactive_comp); stream_info->state = STOP_PENDING; - } else if (stream_info->state == PAUSED) { + } else if (vfe_dev->axi_data.src_info[intf].lpm || + stream_info->state == PAUSED) { /* don't wait for reg update */ stream_info->state = STOP_PENDING; msm_isp_axi_stream_enable_cfg(stream_info); @@ -3018,6 +3079,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, int k; struct vfe_device *vfe_dev; struct msm_vfe_axi_shared_data *axi_data = &vfe_dev_ioctl->axi_data; + uint32_t intf; if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM) return -EINVAL; @@ -3081,7 +3143,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, cfg_wm_irq_mask(vfe_dev, stream_info); } } - + intf = SRC_TO_INTF(stream_info->stream_src); init_completion(&stream_info->active_comp); stream_info->state = START_PENDING; msm_isp_update_intf_stream_cnt(stream_info, 1); @@ -3091,6 +3153,11 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, vfe_dev_ioctl->pdev->id); if (src_state) { src_mask |= (1 << SRC_TO_INTF(stream_info->stream_src)); + if (vfe_dev_ioctl->axi_data.src_info[intf].lpm) { + while (stream_info->state != ACTIVE) + __msm_isp_axi_stream_update( + stream_info, ×tamp); + } } else { for (k = 0; k < stream_info->num_isp; k++) { vfe_dev = stream_info->vfe_dev[k]; @@ -3365,22 +3432,21 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev, /* * If frame_id = 1 then no eof check is needed */ - if (vfe_dev->axi_data.src_info[VFE_PIX_0].active && - vfe_dev->axi_data.src_info[VFE_PIX_0].accept_frame == false) { + if (vfe_dev->axi_data.src_info[frame_src].active && + frame_src == VFE_PIX_0 && + vfe_dev->axi_data.src_info[frame_src].accept_frame == false) { pr_debug("%s:%d invalid time to request frame %d\n", __func__, __LINE__, frame_id); goto error; } - if ((vfe_dev->axi_data.src_info[VFE_PIX_0].active && (frame_id != - vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id + vfe_dev-> - axi_data.src_info[VFE_PIX_0].sof_counter_step)) || - ((!vfe_dev->axi_data.src_info[VFE_PIX_0].active) && (frame_id != + if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id != vfe_dev->axi_data.src_info[frame_src].frame_id + vfe_dev-> - axi_data.src_info[frame_src].sof_counter_step))) { + axi_data.src_info[VFE_PIX_0].sof_counter_step)) || + ((!vfe_dev->axi_data.src_info[frame_src].active))) { pr_debug("%s:%d invalid frame id %d cur frame id %d pix %d\n", __func__, __LINE__, frame_id, - vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id, - vfe_dev->axi_data.src_info[VFE_PIX_0].active); + vfe_dev->axi_data.src_info[frame_src].frame_id, + vfe_dev->axi_data.src_info[frame_src].active); goto error; } if (stream_info->undelivered_request_cnt >= MAX_BUFFERS_IN_HW) { @@ -3887,6 +3953,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg) &update_cmd->req_frm_ver2; stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(req_frm->stream_handle)); + if (stream_info == NULL) { + pr_err_ratelimited("%s: stream_info is NULL\n", + __func__); + rc = -EINVAL; + break; + } rc = msm_isp_request_frame(vfe_dev, stream_info, req_frm->user_stream_id, req_frm->frame_id, diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h index 65009cb22286..a8d4cfb43927 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h @@ -141,6 +141,11 @@ static inline struct msm_vfe_axi_stream *msm_isp_get_stream_common_data( struct msm_vfe_common_dev_data *common_data = vfe_dev->common_data; struct msm_vfe_axi_stream *stream_info; + if (stream_idx >= VFE_AXI_SRC_MAX) { + pr_err("invalid stream_idx %d\n", stream_idx); + return NULL; + } + if (vfe_dev->is_split && stream_idx < RDI_INTF_0) stream_info = &common_data->streams[stream_idx]; else diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c index f2cf4d477b3f..f92c67150215 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c @@ -228,9 +228,10 @@ static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev, done_buf->buf_idx; stats_event->pd_stats_idx = 0xF; - if (stream_info->stats_type == MSM_ISP_STATS_BF) + if (stream_info->stats_type == MSM_ISP_STATS_BF) { stats_event->pd_stats_idx = vfe_dev->pd_buf_idx; - + vfe_dev->pd_buf_idx = 0xF; + } if (comp_stats_type_mask == NULL) { stats_event->stats_mask = 1 << stream_info->stats_type; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c index 765bf6521759..2a9bb6e8e505 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c @@ -2070,6 +2070,7 @@ static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev, if (queue_cmd->cmd_used) { pr_err("%s: Tasklet queue overflow: %d\n", __func__, vfe_dev->pdev->id); + spin_unlock_irqrestore(&tasklet->tasklet_lock, flags); return; } else { atomic_add(1, &vfe_dev->irq_cnt); @@ -2314,6 +2315,9 @@ int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) vfe_dev->reg_update_requested = 0; /* Register page fault handler */ vfe_dev->buf_mgr->pagefault_debug_disable = 0; + /* initialize pd_buf_idx with an invalid index 0xF */ + vfe_dev->pd_buf_idx = 0xF; + cam_smmu_reg_client_page_fault_handler( vfe_dev->buf_mgr->iommu_hdl, msm_vfe_iommu_fault_handler, vfe_dev); diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c index 41d8ef577a27..caf6639f5151 100644 --- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c +++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c @@ -49,17 +49,31 @@ #define ISPIF_TIMEOUT_ALL_US 1000000 #define ISPIF_SOF_DEBUG_COUNT 5 +/* 3D Threshold value according guidelines for line width 1280 */ +#define STEREO_DEFAULT_3D_THRESHOLD 0x36 + +/* + * Overflows before restarting interface during stereo usecase + * to give some tolerance for cases when the two sensors sync fails + * this value is chosen by experiment + */ +#define MAX_PIX_OVERFLOW_ERROR_COUNT 10 +static int pix_overflow_error_count[VFE_MAX] = { 0 }; + #undef CDBG #ifdef CONFIG_MSMB_CAMERA_DEBUG #define CDBG(fmt, args...) pr_debug(fmt, ##args) #else -#define CDBG(fmt, args...) do { } while (0) +#define CDBG(fmt, args...) #endif static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable); static int ispif_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh); static long msm_ispif_subdev_ioctl_unlocked(struct v4l2_subdev *sd, unsigned int cmd, void *arg); +static long msm_ispif_dispatch_cmd(enum ispif_cfg_type_t cmd, + struct ispif_device *ispif, + struct msm_ispif_param_data_ext *params); int msm_ispif_get_clk_info(struct ispif_device *ispif_dev, struct platform_device *pdev); @@ -249,16 +263,7 @@ static long msm_ispif_cmd_ext(struct v4l2_subdev *sd, } mutex_lock(&ispif->mutex); - switch (pcdata.cfg_type) { - case ISPIF_CFG2: - rc = msm_ispif_config2(ispif, params); - msm_ispif_io_dump_reg(ispif); - break; - default: - pr_err("%s: invalid cfg_type\n", __func__); - rc = -EINVAL; - break; - } + rc = msm_ispif_dispatch_cmd(pcdata.cfg_type, ispif, params); mutex_unlock(&ispif->mutex); kfree(params); return rc; @@ -855,15 +860,34 @@ static uint16_t msm_ispif_get_cids_mask_from_cfg( return cids_mask; } + +static uint16_t msm_ispif_get_right_cids_mask_from_cfg( + struct msm_ispif_right_param_entry *entry, int num_cids) +{ + int i; + uint16_t cids_mask = 0; + + if (WARN_ON(!entry)) + return cids_mask; + + for (i = 0; i < num_cids && i < MAX_CID_CH_PARAM_ENTRY; i++) { + if (entry->cids[i] < CID_MAX) + cids_mask |= (1 << entry->cids[i]); + } + + return cids_mask; +} + static int msm_ispif_config(struct ispif_device *ispif, void *data) { int rc = 0, i = 0; - uint16_t cid_mask; + uint16_t cid_mask = 0; + uint16_t cid_right_mask = 0; enum msm_ispif_intftype intftype; enum msm_ispif_vfe_intf vfe_intf; - struct msm_ispif_param_data *params = - (struct msm_ispif_param_data *)data; + struct msm_ispif_param_data_ext *params = + (struct msm_ispif_param_data_ext *)data; BUG_ON(!ispif); BUG_ON(!params); @@ -913,9 +937,15 @@ static int msm_ispif_config(struct ispif_device *ispif, return -EINVAL; } - if (ispif->csid_version >= CSID_VERSION_V30) + if (ispif->csid_version >= CSID_VERSION_V30) { msm_ispif_select_clk_mux(ispif, intftype, params->entries[i].csid, vfe_intf); + if (intftype == PIX0 && params->stereo_enable && + params->right_entries[i].csid < CSID_MAX) + msm_ispif_select_clk_mux(ispif, PIX1, + params->right_entries[i].csid, + vfe_intf); + } rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf); if (rc) { @@ -926,10 +956,26 @@ static int msm_ispif_config(struct ispif_device *ispif, msm_ispif_sel_csid_core(ispif, intftype, params->entries[i].csid, vfe_intf); + if (intftype == PIX0 && params->stereo_enable && + params->right_entries[i].csid < CSID_MAX) + /* configure right stereo csid */ + msm_ispif_sel_csid_core(ispif, PIX1, + params->right_entries[i].csid, vfe_intf); + cid_mask = msm_ispif_get_cids_mask_from_cfg( ¶ms->entries[i]); msm_ispif_enable_intf_cids(ispif, intftype, cid_mask, vfe_intf, 1); + if (params->stereo_enable) + cid_right_mask = msm_ispif_get_right_cids_mask_from_cfg( + ¶ms->right_entries[i], + params->entries[i].num_cids); + else + cid_right_mask = 0; + if (cid_right_mask && params->stereo_enable) + /* configure right stereo cids */ + msm_ispif_enable_intf_cids(ispif, PIX1, + cid_right_mask, vfe_intf, 1); if (params->entries[i].crop_enable) msm_ispif_enable_crop(ispif, intftype, vfe_intf, params->entries[i].crop_start_pixel, @@ -962,8 +1008,28 @@ static int msm_ispif_config(struct ispif_device *ispif, return rc; } +static void msm_ispif_config_stereo(struct ispif_device *ispif, + struct msm_ispif_param_data_ext *params) { + + int i; + enum msm_ispif_vfe_intf vfe_intf; + + for (i = 0; i < params->num; i++) { + if (params->entries[i].intftype == PIX0 && + params->stereo_enable && + params->right_entries[i].csid < CSID_MAX) { + vfe_intf = params->entries[i].vfe_intf; + msm_camera_io_w_mb(0x3, + ispif->base + ISPIF_VFE_m_OUTPUT_SEL(vfe_intf)); + msm_camera_io_w_mb(STEREO_DEFAULT_3D_THRESHOLD, + ispif->base + + ISPIF_VFE_m_3D_THRESHOLD(vfe_intf)); + } + } +} + static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits, - struct msm_ispif_param_data *params) + struct msm_ispif_param_data_ext *params) { uint8_t vc; int i, k; @@ -1008,6 +1074,19 @@ static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits, ispif->applied_intf_cmd[vfe_intf].intf_cmd |= (cmd_bits << (vc * 2 + intf_type * 8)); } + if (intf_type == PIX0 && params->stereo_enable && + params->right_entries[i].cids[k] < CID_MAX) { + cid = params->right_entries[i].cids[k]; + vc = cid / 4; + + /* fill right stereo command */ + /* zero 2 bits */ + ispif->applied_intf_cmd[vfe_intf].intf_cmd &= + ~(0x3 << (vc * 2 + PIX1 * 8)); + /* set cmd bits */ + ispif->applied_intf_cmd[vfe_intf].intf_cmd |= + (cmd_bits << (vc * 2 + PIX1 * 8)); + } } /* cmd for PIX0, PIX1, RDI0, RDI1 */ if (ispif->applied_intf_cmd[vfe_intf].intf_cmd != 0xFFFFFFFF) @@ -1024,7 +1103,7 @@ static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits, } static int msm_ispif_stop_immediately(struct ispif_device *ispif, - struct msm_ispif_param_data *params) + struct msm_ispif_param_data_ext *params) { int i, rc = 0; uint16_t cid_mask = 0; @@ -1052,13 +1131,22 @@ static int msm_ispif_stop_immediately(struct ispif_device *ispif, ¶ms->entries[i]); msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype, cid_mask, params->entries[i].vfe_intf, 0); + if (params->stereo_enable) { + cid_mask = msm_ispif_get_right_cids_mask_from_cfg( + ¶ms->right_entries[i], + params->entries[i].num_cids); + if (cid_mask) + msm_ispif_enable_intf_cids(ispif, + params->entries[i].intftype, cid_mask, + params->entries[i].vfe_intf, 0); + } } return rc; } static int msm_ispif_start_frame_boundary(struct ispif_device *ispif, - struct msm_ispif_param_data *params) + struct msm_ispif_param_data_ext *params) { int rc = 0; @@ -1074,13 +1162,14 @@ static int msm_ispif_start_frame_boundary(struct ispif_device *ispif, rc = -EINVAL; return rc; } + msm_ispif_config_stereo(ispif, params); msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params); return rc; } static int msm_ispif_restart_frame_boundary(struct ispif_device *ispif, - struct msm_ispif_param_data *params) + struct msm_ispif_param_data_ext *params) { int rc = 0, i; long timeout = 0; @@ -1222,10 +1311,11 @@ end: } static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif, - struct msm_ispif_param_data *params) + struct msm_ispif_param_data_ext *params) { int i, rc = 0; uint16_t cid_mask = 0; + uint16_t cid_right_mask = 0; uint32_t intf_addr; enum msm_ispif_vfe_intf vfe_intf; uint32_t stop_flag = 0; @@ -1263,6 +1353,13 @@ static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif, for (i = 0; i < params->num; i++) { cid_mask = msm_ispif_get_cids_mask_from_cfg(¶ms->entries[i]); + if (params->stereo_enable) + cid_right_mask = + msm_ispif_get_right_cids_mask_from_cfg( + ¶ms->right_entries[i], + params->entries[i].num_cids); + else + cid_right_mask = 0; vfe_intf = params->entries[i].vfe_intf; switch (params->entries[i].intftype) { @@ -1294,10 +1391,24 @@ static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif, ISPIF_TIMEOUT_ALL_US); if (rc < 0) goto end; + if (cid_right_mask) { + intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1); + rc = readl_poll_timeout(ispif->base + intf_addr, + stop_flag, + (stop_flag & 0xF) == 0xF, + ISPIF_TIMEOUT_SLEEP_US, + ISPIF_TIMEOUT_ALL_US); + if (rc < 0) + goto end; + } /* disable CIDs in CID_MASK register */ msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype, cid_mask, vfe_intf, 0); + if (cid_right_mask) + msm_ispif_enable_intf_cids(ispif, + params->entries[i].intftype, cid_right_mask, + params->entries[i].vfe_intf, 0); } end: @@ -1318,6 +1429,14 @@ static void ispif_process_irq(struct ispif_device *ispif, ispif->sof_count[vfe_id].sof_cnt[PIX0]++; ispif->ispif_sof_debug++; } + if (out[vfe_id].ispifIrqStatus1 & + ISPIF_IRQ_STATUS_PIX_SOF_MASK) { + if (ispif->ispif_sof_debug < ISPIF_SOF_DEBUG_COUNT*2) + pr_err("%s: PIX1 frame id: %u\n", __func__, + ispif->sof_count[vfe_id].sof_cnt[PIX1]); + ispif->sof_count[vfe_id].sof_cnt[PIX1]++; + ispif->ispif_sof_debug++; + } if (out[vfe_id].ispifIrqStatus0 & ISPIF_IRQ_STATUS_RDI0_SOF_MASK) { if (ispif->ispif_rdi0_debug < ISPIF_SOF_DEBUG_COUNT) @@ -1344,12 +1463,55 @@ static void ispif_process_irq(struct ispif_device *ispif, } } +static int msm_ispif_reconfig_3d_output(struct ispif_device *ispif, + enum msm_ispif_vfe_intf vfe_id) +{ + uint32_t reg_data; + + if (WARN_ON(!ispif)) + return -EINVAL; + + if (!((vfe_id == VFE0) || (vfe_id == VFE1))) { + pr_err("%s;%d Cannot reconfigure 3D mode for VFE%d", __func__, + __LINE__, vfe_id); + return -EINVAL; + } + pr_info("%s;%d Reconfiguring 3D mode for VFE%d", __func__, __LINE__, + vfe_id); + reg_data = 0xFFFCFFFC; + msm_camera_io_w_mb(reg_data, ispif->base + + ISPIF_VFE_m_INTF_CMD_0(vfe_id)); + msm_camera_io_w_mb(reg_data, ispif->base + + ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR); + + if (vfe_id == VFE0) { + reg_data = 0; + reg_data |= (PIX_0_VFE_RST_STB | PIX_1_VFE_RST_STB | + STROBED_RST_EN | PIX_0_CSID_RST_STB | + PIX_1_CSID_RST_STB | PIX_OUTPUT_0_MISR_RST_STB); + msm_camera_io_w_mb(reg_data, ispif->base + ISPIF_RST_CMD_ADDR); + } else { + reg_data = 0; + reg_data |= (PIX_0_VFE_RST_STB | PIX_1_VFE_RST_STB | + STROBED_RST_EN | PIX_0_CSID_RST_STB | + PIX_1_CSID_RST_STB | PIX_OUTPUT_0_MISR_RST_STB); + msm_camera_io_w_mb(reg_data, ispif->base + + ISPIF_RST_CMD_1_ADDR); + } + + reg_data = 0xFFFDFFFD; + msm_camera_io_w_mb(reg_data, ispif->base + + ISPIF_VFE_m_INTF_CMD_0(vfe_id)); + return 0; +} + static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out, void *data) { struct ispif_device *ispif = (struct ispif_device *)data; bool fatal_err = false; int i = 0; + uint32_t reg_data; BUG_ON(!ispif); BUG_ON(!out); @@ -1400,6 +1562,12 @@ static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out, fatal_err = true; } + if (out[VFE0].ispifIrqStatus1 & PIX_INTF_1_OVERFLOW_IRQ) { + pr_err_ratelimited("%s: VFE0 pix1 overflow.\n", + __func__); + fatal_err = true; + } + if (out[VFE0].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ) { pr_err_ratelimited("%s: VFE0 rdi0 overflow.\n", __func__); @@ -1432,6 +1600,12 @@ static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out, fatal_err = true; } + if (out[VFE1].ispifIrqStatus1 & PIX_INTF_1_OVERFLOW_IRQ) { + pr_err_ratelimited("%s: VFE1 pix1 overflow.\n", + __func__); + fatal_err = true; + } + if (out[VFE1].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ) { pr_err_ratelimited("%s: VFE1 rdi0 overflow.\n", __func__); @@ -1453,6 +1627,43 @@ static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out, ispif_process_irq(ispif, out, VFE1); } + if ((out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ) || + (out[VFE0].ispifIrqStatus1 & PIX_INTF_0_OVERFLOW_IRQ) || + (out[VFE0].ispifIrqStatus2 & (L_R_SOF_MISMATCH_ERR_IRQ | + L_R_EOF_MISMATCH_ERR_IRQ | L_R_SOL_MISMATCH_ERR_IRQ))) { + reg_data = msm_camera_io_r(ispif->base + + ISPIF_VFE_m_OUTPUT_SEL(VFE0)); + if ((reg_data & 0x03) == VFE_PIX_INTF_SEL_3D) { + pix_overflow_error_count[VFE0]++; + if (pix_overflow_error_count[VFE0] >= + MAX_PIX_OVERFLOW_ERROR_COUNT) { + msm_ispif_reconfig_3d_output(ispif, VFE0); + pix_overflow_error_count[VFE0] = 0; + } + fatal_err = false; + } + } + + if (ispif->vfe_info.num_vfe > 1) { + if ((out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ) || + (out[VFE1].ispifIrqStatus1 & PIX_INTF_0_OVERFLOW_IRQ) || + (out[VFE1].ispifIrqStatus2 & (L_R_SOF_MISMATCH_ERR_IRQ | + L_R_EOF_MISMATCH_ERR_IRQ | L_R_SOL_MISMATCH_ERR_IRQ))) { + reg_data = msm_camera_io_r(ispif->base + + ISPIF_VFE_m_OUTPUT_SEL(VFE1)); + if ((reg_data & 0x03) == VFE_PIX_INTF_SEL_3D) { + pix_overflow_error_count[VFE1]++; + if (pix_overflow_error_count[VFE1] >= + MAX_PIX_OVERFLOW_ERROR_COUNT) { + msm_ispif_reconfig_3d_output(ispif, + VFE1); + pix_overflow_error_count[VFE1] = 0; + } + } + fatal_err = false; + } + } + if (fatal_err == true) { pr_err_ratelimited("%s: fatal error, stop ispif immediately\n", __func__); @@ -1561,61 +1772,97 @@ static void msm_ispif_release(struct ispif_device *ispif) pr_err("%s: failed to remove vote for AHB\n", __func__); } -static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg) +static long msm_ispif_dispatch_cmd(enum ispif_cfg_type_t cmd, + struct ispif_device *ispif, + struct msm_ispif_param_data_ext *params) { long rc = 0; - struct ispif_cfg_data *pcdata = (struct ispif_cfg_data *)arg; - struct ispif_device *ispif = - (struct ispif_device *)v4l2_get_subdevdata(sd); - - BUG_ON(!sd); - BUG_ON(!pcdata); - mutex_lock(&ispif->mutex); - switch (pcdata->cfg_type) { - case ISPIF_ENABLE_REG_DUMP: - ispif->enb_dump_reg = pcdata->reg_dump; /* save dump config */ - break; - case ISPIF_INIT: - rc = msm_ispif_init(ispif, pcdata->csid_version); - msm_ispif_io_dump_reg(ispif); - break; + switch (cmd) { case ISPIF_CFG: - rc = msm_ispif_config(ispif, &pcdata->params); + rc = msm_ispif_config(ispif, params); msm_ispif_io_dump_reg(ispif); break; case ISPIF_START_FRAME_BOUNDARY: - rc = msm_ispif_start_frame_boundary(ispif, &pcdata->params); + rc = msm_ispif_start_frame_boundary(ispif, params); msm_ispif_io_dump_reg(ispif); break; case ISPIF_RESTART_FRAME_BOUNDARY: - rc = msm_ispif_restart_frame_boundary(ispif, &pcdata->params); + rc = msm_ispif_restart_frame_boundary(ispif, params); msm_ispif_io_dump_reg(ispif); break; - case ISPIF_STOP_FRAME_BOUNDARY: - rc = msm_ispif_stop_frame_boundary(ispif, &pcdata->params); + rc = msm_ispif_stop_frame_boundary(ispif, params); msm_ispif_io_dump_reg(ispif); break; case ISPIF_STOP_IMMEDIATELY: - rc = msm_ispif_stop_immediately(ispif, &pcdata->params); + rc = msm_ispif_stop_immediately(ispif, params); msm_ispif_io_dump_reg(ispif); break; case ISPIF_RELEASE: msm_ispif_reset(ispif); msm_ispif_reset_hw(ispif); break; - case ISPIF_SET_VFE_INFO: - rc = msm_ispif_set_vfe_info(ispif, &pcdata->vfe_info); + case ISPIF_CFG2: + rc = msm_ispif_config2(ispif, params); + msm_ispif_io_dump_reg(ispif); break; default: pr_err("%s: invalid cfg_type\n", __func__); rc = -EINVAL; break; } + return rc; +} + +static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg) +{ + long rc = 0; + struct ispif_cfg_data *pcdata = (struct ispif_cfg_data *)arg; + struct ispif_device *ispif = + (struct ispif_device *)v4l2_get_subdevdata(sd); + int i; + struct msm_ispif_param_data_ext params; + + if (WARN_ON(!sd) || WARN_ON(!pcdata)) + return -EINVAL; + + mutex_lock(&ispif->mutex); + switch (pcdata->cfg_type) { + case ISPIF_ENABLE_REG_DUMP: + /* save dump config */ + ispif->enb_dump_reg = pcdata->reg_dump; + break; + case ISPIF_INIT: + rc = msm_ispif_init(ispif, pcdata->csid_version); + msm_ispif_io_dump_reg(ispif); + break; + case ISPIF_SET_VFE_INFO: + rc = msm_ispif_set_vfe_info(ispif, &pcdata->vfe_info); + break; + default: + memset(¶ms, 0, sizeof(params)); + if (pcdata->params.num > MAX_PARAM_ENTRIES) { + pr_err("%s: invalid num entries %u\n", __func__, + pcdata->params.num); + rc = -EINVAL; + } else { + params.num = pcdata->params.num; + for (i = 0; i < pcdata->params.num; i++) + memcpy(¶ms.entries[i], + &pcdata->params.entries[i], + sizeof(struct msm_ispif_params_entry)); + params.stereo_enable = 0; + rc = msm_ispif_dispatch_cmd(pcdata->cfg_type, ispif, + ¶ms); + } + break; + } mutex_unlock(&ispif->mutex); + return rc; } + static struct v4l2_file_operations msm_ispif_v4l2_subdev_fops; static long msm_ispif_subdev_ioctl_unlocked(struct v4l2_subdev *sd, diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h index d488ca618537..49d7d0f7624e 100644 --- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h +++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h @@ -87,6 +87,12 @@ #define MISC_LOGIC_RST_STB BIT(1) #define STROBED_RST_EN BIT(0) +#define VFE_PIX_INTF_SEL_3D 0x3 +#define PIX_OUTPUT_0_MISR_RST_STB BIT(16) +#define L_R_SOF_MISMATCH_ERR_IRQ BIT(16) +#define L_R_EOF_MISMATCH_ERR_IRQ BIT(17) +#define L_R_SOL_MISMATCH_ERR_IRQ BIT(18) + #define ISPIF_RST_CMD_MASK 0xFE1C77FF #define ISPIF_RST_CMD_1_MASK 0xFFFFFFFF /* undefined */ diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h index 8ae61dc2d4f6..9abf55efc46c 100644 --- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h +++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h @@ -22,6 +22,7 @@ #define ISPIF_VFE(m) ((m) * 0x200) #define ISPIF_VFE_m_CTRL_0(m) (0x200 + ISPIF_VFE(m)) +#define ISPIF_VFE_m_CTRL_1(m) (0x204 + ISPIF_VFE(m)) #define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + ISPIF_VFE(m)) #define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20C + ISPIF_VFE(m)) #define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + ISPIF_VFE(m)) @@ -71,6 +72,12 @@ #define MISC_LOGIC_RST_STB BIT(1) #define STROBED_RST_EN BIT(0) +#define VFE_PIX_INTF_SEL_3D 0x3 +#define PIX_OUTPUT_0_MISR_RST_STB BIT(16) +#define L_R_SOF_MISMATCH_ERR_IRQ BIT(16) +#define L_R_EOF_MISMATCH_ERR_IRQ BIT(17) +#define L_R_SOL_MISMATCH_ERR_IRQ BIT(18) + #define ISPIF_RST_CMD_MASK 0xFE0F1FFF #define ISPIF_RST_CMD_1_MASK 0xFC0F1FF9 @@ -78,6 +85,7 @@ #define ISPIF_RST_CMD_1_MASK_RESTART 0x00001FF9 #define PIX_INTF_0_OVERFLOW_IRQ BIT(12) +#define PIX_INTF_1_OVERFLOW_IRQ BIT(12) #define RAW_INTF_0_OVERFLOW_IRQ BIT(25) #define RAW_INTF_1_OVERFLOW_IRQ BIT(25) #define RAW_INTF_2_OVERFLOW_IRQ BIT(12) diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h index 94cc974441ee..5f2aa06f3e13 100644 --- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h +++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h @@ -74,6 +74,12 @@ #define MISC_LOGIC_RST_STB BIT(1) #define STROBED_RST_EN BIT(0) +#define VFE_PIX_INTF_SEL_3D 0x3 +#define PIX_OUTPUT_0_MISR_RST_STB BIT(16) +#define L_R_SOF_MISMATCH_ERR_IRQ BIT(16) +#define L_R_EOF_MISMATCH_ERR_IRQ BIT(17) +#define L_R_SOL_MISMATCH_ERR_IRQ BIT(18) + #define ISPIF_RST_CMD_MASK 0xFE7F1FFF #define ISPIF_RST_CMD_1_MASK 0xFC7F1FF9 @@ -81,6 +87,7 @@ #define ISPIF_RST_CMD_1_MASK_RESTART 0x7F1FF9 #define PIX_INTF_0_OVERFLOW_IRQ BIT(12) +#define PIX_INTF_1_OVERFLOW_IRQ BIT(12) #define RAW_INTF_0_OVERFLOW_IRQ BIT(25) #define RAW_INTF_1_OVERFLOW_IRQ BIT(25) #define RAW_INTF_2_OVERFLOW_IRQ BIT(12) diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c index b067c4916341..0ff270bb8410 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c +++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c @@ -201,6 +201,7 @@ static int32_t msm_cci_validate_queue(struct cci_device *cci_dev, enum cci_i2c_queue_t queue) { int32_t rc = 0; + unsigned long flags; uint32_t read_val = 0; uint32_t reg_offset = master * 0x200 + queue * 0x100; read_val = msm_camera_io_r_mb(cci_dev->base + @@ -223,6 +224,8 @@ static int32_t msm_cci_validate_queue(struct cci_device *cci_dev, CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset); reg_val = 1 << ((master * 2) + queue); CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__); + spin_lock_irqsave(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); atomic_set(&cci_dev->cci_master_info[master]. done_pending[queue], 1); msm_camera_io_w_mb(reg_val, cci_dev->base + @@ -230,6 +233,8 @@ static int32_t msm_cci_validate_queue(struct cci_device *cci_dev, CDBG("%s line %d wait_for_completion_timeout\n", __func__, __LINE__); atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1); + spin_unlock_irqrestore(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); rc = wait_for_completion_timeout(&cci_dev-> cci_master_info[master].report_q[queue], CCI_TIMEOUT); if (rc <= 0) { @@ -438,10 +443,17 @@ static int32_t msm_cci_wait_report_cmd(struct cci_device *cci_dev, enum cci_i2c_master_t master, enum cci_i2c_queue_t queue) { + unsigned long flags; uint32_t reg_val = 1 << ((master * 2) + queue); msm_cci_load_report_cmd(cci_dev, master, queue); + + spin_lock_irqsave(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1); atomic_set(&cci_dev->cci_master_info[master].done_pending[queue], 1); + spin_unlock_irqrestore(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); + msm_camera_io_w_mb(reg_val, cci_dev->base + CCI_QUEUE_START_ADDR); return msm_cci_wait(cci_dev, master, queue); @@ -451,13 +463,19 @@ static void msm_cci_process_half_q(struct cci_device *cci_dev, enum cci_i2c_master_t master, enum cci_i2c_queue_t queue) { + unsigned long flags; uint32_t reg_val = 1 << ((master * 2) + queue); + + spin_lock_irqsave(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); if (0 == atomic_read(&cci_dev->cci_master_info[master].q_free[queue])) { msm_cci_load_report_cmd(cci_dev, master, queue); atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1); msm_camera_io_w_mb(reg_val, cci_dev->base + CCI_QUEUE_START_ADDR); } + spin_unlock_irqrestore(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); } static int32_t msm_cci_process_full_q(struct cci_device *cci_dev, @@ -465,15 +483,23 @@ static int32_t msm_cci_process_full_q(struct cci_device *cci_dev, enum cci_i2c_queue_t queue) { int32_t rc = 0; + unsigned long flags; + + spin_lock_irqsave(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); if (1 == atomic_read(&cci_dev->cci_master_info[master].q_free[queue])) { atomic_set(&cci_dev->cci_master_info[master]. done_pending[queue], 1); + spin_unlock_irqrestore(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); rc = msm_cci_wait(cci_dev, master, queue); if (rc < 0) { pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc); return rc; } } else { + spin_unlock_irqrestore(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); rc = msm_cci_wait_report_cmd(cci_dev, master, queue); if (rc < 0) { pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc); @@ -501,8 +527,13 @@ static int32_t msm_cci_transfer_end(struct cci_device *cci_dev, enum cci_i2c_queue_t queue) { int32_t rc = 0; + unsigned long flags; + spin_lock_irqsave(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); if (0 == atomic_read(&cci_dev->cci_master_info[master].q_free[queue])) { + spin_unlock_irqrestore(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); rc = msm_cci_lock_queue(cci_dev, master, queue, 0); if (rc < 0) { pr_err("%s failed line %d\n", __func__, __LINE__); @@ -516,6 +547,8 @@ static int32_t msm_cci_transfer_end(struct cci_device *cci_dev, } else { atomic_set(&cci_dev->cci_master_info[master]. done_pending[queue], 1); + spin_unlock_irqrestore(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); rc = msm_cci_wait(cci_dev, master, queue); if (rc < 0) { pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc); @@ -570,6 +603,7 @@ static int32_t msm_cci_data_queue(struct cci_device *cci_dev, uint32_t reg_offset; uint32_t val = 0; uint32_t max_queue_size; + unsigned long flags; if (i2c_cmd == NULL) { pr_err("%s:%d Failed line\n", __func__, @@ -613,7 +647,11 @@ static int32_t msm_cci_data_queue(struct cci_device *cci_dev, msm_camera_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR + reg_offset); + spin_lock_irqsave(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0); + spin_unlock_irqrestore(&cci_dev->cci_master_info[master]. + lock_q[queue], flags); max_queue_size = cci_dev->cci_i2c_queue_info[master][queue]. max_queue_size; @@ -1641,6 +1679,7 @@ static int32_t msm_cci_config(struct v4l2_subdev *sd, static irqreturn_t msm_cci_irq(int irq_num, void *data) { uint32_t irq; + unsigned long flags; struct cci_device *cci_dev = data; irq = msm_camera_io_r_mb(cci_dev->base + CCI_IRQ_STATUS_0_ADDR); msm_camera_io_w_mb(irq, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR); @@ -1667,22 +1706,30 @@ static irqreturn_t msm_cci_irq(int irq_num, void *data) if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) { struct msm_camera_cci_master_info *cci_master_info; cci_master_info = &cci_dev->cci_master_info[MASTER_0]; + spin_lock_irqsave(&cci_dev->cci_master_info[MASTER_0]. + lock_q[QUEUE_0], flags); atomic_set(&cci_master_info->q_free[QUEUE_0], 0); cci_master_info->status = 0; if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) { complete(&cci_master_info->report_q[QUEUE_0]); atomic_set(&cci_master_info->done_pending[QUEUE_0], 0); } + spin_unlock_irqrestore(&cci_dev->cci_master_info[MASTER_0]. + lock_q[QUEUE_0], flags); } if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) { struct msm_camera_cci_master_info *cci_master_info; cci_master_info = &cci_dev->cci_master_info[MASTER_0]; + spin_lock_irqsave(&cci_dev->cci_master_info[MASTER_0]. + lock_q[QUEUE_1], flags); atomic_set(&cci_master_info->q_free[QUEUE_1], 0); cci_master_info->status = 0; if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) { complete(&cci_master_info->report_q[QUEUE_1]); atomic_set(&cci_master_info->done_pending[QUEUE_1], 0); } + spin_unlock_irqrestore(&cci_dev->cci_master_info[MASTER_0]. + lock_q[QUEUE_1], flags); } if (irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) { cci_dev->cci_master_info[MASTER_1].status = 0; @@ -1691,22 +1738,30 @@ static irqreturn_t msm_cci_irq(int irq_num, void *data) if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) { struct msm_camera_cci_master_info *cci_master_info; cci_master_info = &cci_dev->cci_master_info[MASTER_1]; + spin_lock_irqsave(&cci_dev->cci_master_info[MASTER_1]. + lock_q[QUEUE_0], flags); atomic_set(&cci_master_info->q_free[QUEUE_0], 0); cci_master_info->status = 0; if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) { complete(&cci_master_info->report_q[QUEUE_0]); atomic_set(&cci_master_info->done_pending[QUEUE_0], 0); } + spin_unlock_irqrestore(&cci_dev->cci_master_info[MASTER_1]. + lock_q[QUEUE_0], flags); } if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) { struct msm_camera_cci_master_info *cci_master_info; cci_master_info = &cci_dev->cci_master_info[MASTER_1]; + spin_lock_irqsave(&cci_dev->cci_master_info[MASTER_1]. + lock_q[QUEUE_1], flags); atomic_set(&cci_master_info->q_free[QUEUE_1], 0); cci_master_info->status = 0; if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) { complete(&cci_master_info->report_q[QUEUE_1]); atomic_set(&cci_master_info->done_pending[QUEUE_1], 0); } + spin_unlock_irqrestore(&cci_dev->cci_master_info[MASTER_1]. + lock_q[QUEUE_1], flags); } if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) { cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE; @@ -1795,7 +1850,9 @@ static void msm_cci_init_cci_params(struct cci_device *new_cci_dev) mutex_init(&new_cci_dev->cci_master_info[i].mutex_q[j]); init_completion(&new_cci_dev-> cci_master_info[i].report_q[j]); - } + spin_lock_init(&new_cci_dev-> + cci_master_info[i].lock_q[j]); + } } return; } diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h index 6e39d814bd73..eb615cc7a62c 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h +++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -125,6 +125,7 @@ struct msm_camera_cci_master_info { struct mutex mutex_q[NUM_QUEUES]; struct completion report_q[NUM_QUEUES]; atomic_t done_pending[NUM_QUEUES]; + spinlock_t lock_q[NUM_QUEUES]; }; struct msm_cci_clk_params_t { diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c index 55a743737c59..7ae071176ef4 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c +++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c @@ -227,16 +227,34 @@ static void msm_csid_set_sof_freeze_debug_reg( static int msm_csid_reset(struct csid_device *csid_dev) { int32_t rc = 0; + uint32_t irq = 0, irq_bitshift; + + irq_bitshift = csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift; msm_camera_io_w(csid_dev->ctrl_reg->csid_reg.csid_rst_stb_all, csid_dev->base + csid_dev->ctrl_reg->csid_reg.csid_rst_cmd_addr); rc = wait_for_completion_timeout(&csid_dev->reset_complete, CSID_TIMEOUT); - if (rc <= 0) { + if (rc < 0) { pr_err("wait_for_completion in msm_csid_reset fail rc = %d\n", rc); + } else if (rc == 0) { + irq = msm_camera_io_r(csid_dev->base + + csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr); + pr_err_ratelimited("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n", + __func__, csid_dev->pdev->id, irq); + if (irq & (0x1 << irq_bitshift)) { + rc = 1; + CDBG("%s succeeded", __func__); + } else { + rc = 0; + pr_err("%s reset csid_irq_status failed = 0x%x\n", + __func__, irq); + } if (rc == 0) rc = -ETIMEDOUT; + } else { + CDBG("%s succeeded", __func__); } return rc; } diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h index 5d57ec8c28ff..8f55f453bf03 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h +++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h @@ -89,6 +89,7 @@ struct msm_sensor_ctrl_t { uint32_t set_mclk_23880000; uint8_t is_csid_tg_mode; uint32_t is_secure; + uint8_t bypass_video_node_creation; }; int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp); diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c index 1dd2b0d26007..344f1a6f8d92 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c +++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c @@ -86,11 +86,14 @@ static int32_t msm_sensor_driver_create_i2c_v4l_subdev struct i2c_client *client = s_ctrl->sensor_i2c_client->client; CDBG("%s %s I2c probe succeeded\n", __func__, client->name); - rc = camera_init_v4l2(&client->dev, &session_id); - if (rc < 0) { - pr_err("failed: camera_init_i2c_v4l2 rc %d", rc); - return rc; + if (s_ctrl->bypass_video_node_creation == 0) { + rc = camera_init_v4l2(&client->dev, &session_id); + if (rc < 0) { + pr_err("failed: camera_init_i2c_v4l2 rc %d", rc); + return rc; + } } + CDBG("%s rc %d session_id %d\n", __func__, rc, session_id); snprintf(s_ctrl->msm_sd.sd.name, sizeof(s_ctrl->msm_sd.sd.name), "%s", @@ -123,11 +126,14 @@ static int32_t msm_sensor_driver_create_v4l_subdev int32_t rc = 0; uint32_t session_id = 0; - rc = camera_init_v4l2(&s_ctrl->pdev->dev, &session_id); - if (rc < 0) { - pr_err("failed: camera_init_v4l2 rc %d", rc); - return rc; + if (s_ctrl->bypass_video_node_creation == 0) { + rc = camera_init_v4l2(&s_ctrl->pdev->dev, &session_id); + if (rc < 0) { + pr_err("failed: camera_init_v4l2 rc %d", rc); + return rc; + } } + CDBG("rc %d session_id %d", rc, session_id); s_ctrl->sensordata->sensor_info->session_id = session_id; @@ -773,6 +779,8 @@ int32_t msm_sensor_driver_probe(void *setting, slave_info32->sensor_init_params; slave_info->output_format = slave_info32->output_format; + slave_info->bypass_video_node_creation = + !!slave_info32->bypass_video_node_creation; kfree(slave_info32); } else #endif @@ -800,7 +808,8 @@ int32_t msm_sensor_driver_probe(void *setting, slave_info->sensor_init_params.position); CDBG("mount %d", slave_info->sensor_init_params.sensor_mount_angle); - + CDBG("bypass video node creation %d", + slave_info->bypass_video_node_creation); /* Validate camera id */ if (slave_info->camera_id >= MAX_CAMERAS) { pr_err("failed: invalid camera id %d max %d", @@ -980,6 +989,9 @@ CSID_TG: */ s_ctrl->is_probe_succeed = 1; + s_ctrl->bypass_video_node_creation = + slave_info->bypass_video_node_creation; + /* * Create /dev/videoX node, comment for now until dummy /dev/videoX * node is created and used by HAL diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c index c3a0cfb390c4..34ec6529d8ae 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c @@ -1245,6 +1245,33 @@ static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot) goto err_detach; } + ion_free(rot->iclient, handle); + + sde_smmu_ctrl(0); + + return rc; +err_detach: + dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment); +err_put: + dma_buf_put(data->srcp_dma_buf); + data->srcp_dma_buf = NULL; +imap_err: + ion_free(rot->iclient, handle); + + return rc; +} + +/* + * sde_hw_rotator_swts_map - map software timestamp buffer + * @rot: Pointer to rotator hw + * + */ +static int sde_hw_rotator_swts_map(struct sde_hw_rotator *rot) +{ + int rc = 0; + struct sde_mdp_img_data *data = &rot->swts_buf; + + sde_smmu_ctrl(1); rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr, &data->len, DMA_BIDIRECTIONAL); @@ -1264,35 +1291,25 @@ static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot) data->mapped = true; SDEROT_DBG("swts buffer mapped: %pad/%lx va:%p\n", &data->addr, - data->len, rot->swts_buffer); - - ion_free(rot->iclient, handle); - + data->len, rot->swts_buffer); sde_smmu_ctrl(0); - return rc; + kmap_err: sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE, DMA_FROM_DEVICE, data->srcp_dma_buf); err_unmap: dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table, DMA_FROM_DEVICE); -err_detach: - dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment); -err_put: - dma_buf_put(data->srcp_dma_buf); - data->srcp_dma_buf = NULL; -imap_err: - ion_free(rot->iclient, handle); return rc; } /* - * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer + * sde_hw_rotator_swtc_unmap - unmap software timestamp buffer * @rot: Pointer to rotator hw */ -static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot) +static void sde_hw_rotator_swtc_unmap(struct sde_hw_rotator *rot) { struct sde_mdp_img_data *data; @@ -1301,9 +1318,29 @@ static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot) dma_buf_end_cpu_access(data->srcp_dma_buf, 0, data->len, DMA_FROM_DEVICE); dma_buf_kunmap(data->srcp_dma_buf, 0, rot->swts_buffer); + rot->swts_buffer = NULL; sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE, DMA_FROM_DEVICE, data->srcp_dma_buf); + data->addr = 0x0; + data->len = 0; + + data->mapped = false; +} + +/* + * sde_hw_rotator_swtc_destroy - destroy software timestamp buffer + * @rot: Pointer to rotator hw + */ +static void sde_hw_rotator_swtc_destroy(struct sde_hw_rotator *rot) +{ + struct sde_mdp_img_data *data; + + data = &rot->swts_buf; + + if (data->mapped) + sde_hw_rotator_swtc_unmap(rot); + dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table, DMA_FROM_DEVICE); dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment); @@ -1436,6 +1473,7 @@ static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext( struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id) { struct sde_hw_rotator_resource_info *resinfo; + int ret = 0; if (!mgr || !mgr->hw_data) { SDEROT_ERR("null parameters\n"); @@ -1463,8 +1501,21 @@ static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext( else { resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1; - if (resinfo->rot->iclient == NULL) - sde_hw_rotator_swts_create(resinfo->rot); + if (resinfo->rot->iclient == NULL) { + ret = sde_hw_rotator_swts_create(resinfo->rot); + if (ret) { + SDEROT_ERR("swts buffer create failed\n"); + goto swts_fail; + } + } + + if (resinfo->rot->swts_buf.mapped == false) { + ret = sde_hw_rotator_swts_map(resinfo->rot); + if (ret) { + SDEROT_ERR("swts buffer map failed\n"); + goto swts_fail; + } + } } if (resinfo->rot->irq_num >= 0) @@ -1474,6 +1525,10 @@ static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext( resinfo, wb_id); return &resinfo->hw; + +swts_fail: + devm_kfree(&mgr->pdev->dev, resinfo); + return NULL; } /* @@ -1485,6 +1540,7 @@ static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr, struct sde_rot_hw_resource *hw) { struct sde_hw_rotator_resource_info *resinfo; + struct sde_rot_data_type *mdata = sde_rot_get_mdata(); if (!mgr || !mgr->hw_data) return; @@ -1499,6 +1555,18 @@ static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr, if (resinfo->rot->irq_num >= 0) sde_hw_rotator_disable_irq(resinfo->rot); + /* + * For SDM660 and SDM630, the IOMMU is shared between MDP and rotator. + * If IOMMU is detached from MDP driver, the timestamp buffer will be + * invalidated. It is safer to unmap the timestamp buffer when the + * rotator session ends, so that it will be mapped again when a fresh + * session starts. + */ + if (((mdata->mdss_version == MDSS_MDP_HW_REV_320) || + (mdata->mdss_version == MDSS_MDP_HW_REV_330)) && + resinfo->rot->swts_buf.mapped) + sde_hw_rotator_swtc_unmap(resinfo->rot); + devm_kfree(&mgr->pdev->dev, resinfo); } diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c index a63279715de6..a8dc1d010d62 100644 --- a/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_v4l2_vidc.c @@ -766,6 +766,8 @@ static int __init msm_vidc_init(void) if (rc) { dprintk(VIDC_ERR, "Failed to register platform driver\n"); + msm_vidc_debugfs_deinit_drv(); + debugfs_remove_recursive(vidc_driver->debugfs_root); kfree(vidc_driver); vidc_driver = NULL; } @@ -776,6 +778,7 @@ static int __init msm_vidc_init(void) static void __exit msm_vidc_exit(void) { platform_driver_unregister(&msm_vidc_driver); + msm_vidc_debugfs_deinit_drv(); debugfs_remove_recursive(vidc_driver->debugfs_root); mutex_destroy(&vidc_driver->lock); kfree(vidc_driver); diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c index 24eb8fff905b..953780e3c220 100644 --- a/drivers/media/platform/msm/vidc/msm_vdec.c +++ b/drivers/media/platform/msm/vidc/msm_vdec.c @@ -2587,6 +2587,12 @@ static int try_set_ext_ctrl(struct msm_vidc_inst *inst, int rc = 0, i = 0, fourcc = 0; struct v4l2_ext_control *ext_control; struct v4l2_control control; + u32 old_mode = 0; + bool mode_changed = false; + enum mode { + PRIMARY = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_PRIMARY, + SECONDARY = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_SECONDARY + }; if (!inst || !inst->core || !ctrl) { dprintk(VIDC_ERR, @@ -2595,19 +2601,21 @@ static int try_set_ext_ctrl(struct msm_vidc_inst *inst, } ext_control = ctrl->controls; - control.id = - V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE; + control.id = V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE; + old_mode = msm_comm_g_ctrl_for_id(inst, control.id); for (i = 0; i < ctrl->count; i++) { switch (ext_control[i].id) { case V4L2_CID_MPEG_VIDC_VIDEO_STREAM_OUTPUT_MODE: control.value = ext_control[i].value; - rc = msm_comm_s_ctrl(inst, &control); if (rc) dprintk(VIDC_ERR, "%s Failed setting stream output mode : %d\n", __func__, rc); + + if (old_mode == SECONDARY && control.value == PRIMARY) + mode_changed = true; break; case V4L2_CID_MPEG_VIDC_VIDEO_DPB_COLOR_FORMAT: switch (ext_control[i].value) { @@ -2620,6 +2628,24 @@ static int try_set_ext_ctrl(struct msm_vidc_inst *inst, "%s Release output buffers failed\n", __func__); } + /* Update buffer reqmt for split to comb mode */ + if (mode_changed) { + fourcc = + inst->fmts[CAPTURE_PORT].fourcc; + msm_comm_set_color_format(inst, + HAL_BUFFER_OUTPUT, fourcc); + if (rc) { + dprintk(VIDC_ERR, + "%s Failed setting output color format : %d\n", + __func__, rc); + break; + } + rc = msm_comm_try_get_bufreqs(inst); + if (rc) + dprintk(VIDC_ERR, + "%s Failed to get buffer requirements : %d\n", + __func__, rc); + } break; case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_UBWC: case V4L2_MPEG_VIDC_VIDEO_DPB_COLOR_FMT_TP10_UBWC: diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c index ca9d7fba4ee3..30726354164b 100644 --- a/drivers/media/platform/msm/vidc/msm_venc.c +++ b/drivers/media/platform/msm/vidc/msm_venc.c @@ -3511,6 +3511,48 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) frameqp = ctrl->val; pdata = &frameqp; break; + case V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_I_FRAME_QP: + { + rc = msm_venc_validate_qp_value(inst, ctrl); + if (rc) { + dprintk(VIDC_ERR, "Invalid Initial I QP\n"); + break; + } + /* + * Defer sending property from here, set_ext_ctrl + * will send it based on the rc value. + */ + property_id = 0; + break; + } + case V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_B_FRAME_QP: + { + rc = msm_venc_validate_qp_value(inst, ctrl); + if (rc) { + dprintk(VIDC_ERR, "Invalid Initial B QP\n"); + break; + } + /* + * Defer sending property from here, set_ext_ctrl + * will send it based on the rc value. + */ + property_id = 0; + break; + } + case V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_P_FRAME_QP: + { + rc = msm_venc_validate_qp_value(inst, ctrl); + if (rc) { + dprintk(VIDC_ERR, "Invalid Initial P QP\n"); + break; + } + /* + * Defer sending property from here, set_ext_ctrl + * will send it based on the rc value. + */ + property_id = 0; + break; + } case V4L2_CID_MPEG_VIDC_VIDEO_VQZIP_SEI: property_id = HAL_PARAM_VENC_VQZIP_SEI; enable.enable = ctrl->val; @@ -3745,7 +3787,7 @@ static int try_set_ext_ctrl(struct msm_vidc_inst *inst, struct hal_aspect_ratio sar; struct hal_bitrate bitrate; struct hal_frame_size blur_res; - struct v4l2_ctrl *temp_ctrl; + struct v4l2_control temp_ctrl; if (!inst || !inst->core || !inst->core->device || !ctrl) { dprintk(VIDC_ERR, "%s invalid parameters\n", __func__); @@ -3812,12 +3854,15 @@ static int try_set_ext_ctrl(struct msm_vidc_inst *inst, /* Sanity check for the QP boundaries as we are using * same control to set Initial QP for all the codecs */ - temp_ctrl->id = + temp_ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_I_FRAME_QP; - temp_ctrl->val = control[i].value; - rc = msm_venc_validate_qp_value(inst, temp_ctrl); + temp_ctrl.value = control[i].value; + + rc = msm_comm_s_ctrl(inst, &temp_ctrl); if (rc) { - dprintk(VIDC_ERR, "Invalid Initial I QP\n"); + dprintk(VIDC_ERR, + "%s Failed setting Initial I Frame QP : %d\n", + __func__, rc); break; } quant.qpi = control[i].value; @@ -3825,12 +3870,14 @@ static int try_set_ext_ctrl(struct msm_vidc_inst *inst, pdata = &quant; break; case V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_P_FRAME_QP: - temp_ctrl->id = + temp_ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_P_FRAME_QP; - temp_ctrl->val = control[i].value; - rc = msm_venc_validate_qp_value(inst, temp_ctrl); + temp_ctrl.value = control[i].value; + rc = msm_comm_s_ctrl(inst, &temp_ctrl); if (rc) { - dprintk(VIDC_ERR, "Invalid Initial P QP\n"); + dprintk(VIDC_ERR, + "%s Failed setting Initial P Frame QP : %d\n", + __func__, rc); break; } quant.qpp = control[i].value; @@ -3838,12 +3885,14 @@ static int try_set_ext_ctrl(struct msm_vidc_inst *inst, pdata = &quant; break; case V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_B_FRAME_QP: - temp_ctrl->id = + temp_ctrl.id = V4L2_CID_MPEG_VIDC_VIDEO_INITIAL_B_FRAME_QP; - temp_ctrl->val = control[i].value; - rc = msm_venc_validate_qp_value(inst, temp_ctrl); + temp_ctrl.value = control[i].value; + rc = msm_comm_s_ctrl(inst, &temp_ctrl); if (rc) { - dprintk(VIDC_ERR, "Invalid Initial B QP\n"); + dprintk(VIDC_ERR, + "%s Failed setting Initial B Frame QP : %d\n", + __func__, rc); break; } quant.qpb = control[i].value; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 8b459e4da618..7b28e80979f2 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -902,7 +902,7 @@ static int wait_for_sess_signal_receipt(struct msm_vidc_inst *inst, dprintk(VIDC_ERR, "sess resp timeout can potentially crash the system\n"); msm_comm_print_debug_info(inst); - BUG_ON(inst->core->resources.debug_timeout); + BUG_ON(msm_vidc_debug_timeout); msm_comm_kill_session(inst); rc = -EIO; } else { @@ -1748,7 +1748,7 @@ static void handle_sys_error(enum hal_command_response cmd, void *data) msm_comm_print_inst_info(inst); mutex_unlock(&core->lock); - BUG_ON(core->resources.debug_timeout); + BUG_ON(msm_vidc_debug_timeout); } void msm_comm_session_clean(struct msm_vidc_inst *inst) @@ -2543,7 +2543,7 @@ static int msm_comm_session_abort(struct msm_vidc_inst *inst) "ABORT timeout can potentially crash the system\n"); msm_comm_print_debug_info(inst); - BUG_ON(inst->core->resources.debug_timeout); + BUG_ON(msm_vidc_debug_timeout); rc = -EBUSY; } else { rc = 0; @@ -2645,7 +2645,7 @@ int msm_comm_check_core_init(struct msm_vidc_core *core) msm_comm_print_debug_info(inst); mutex_lock(&core->lock); - BUG_ON(core->resources.debug_timeout); + BUG_ON(msm_vidc_debug_timeout); rc = -EIO; goto exit; } else { @@ -4110,10 +4110,9 @@ int msm_comm_try_get_prop(struct msm_vidc_inst *inst, enum hal_property ptype, call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data); dprintk(VIDC_ERR, "SESS_PROP timeout can potentially crash the system\n"); - if (inst->core->resources.debug_timeout) - msm_comm_print_debug_info(inst); + msm_comm_print_debug_info(inst); - BUG_ON(inst->core->resources.debug_timeout); + BUG_ON(msm_vidc_debug_timeout); msm_comm_kill_session(inst); rc = -ETIMEDOUT; goto exit; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c index 1248a1c08103..a9b367d6fe93 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c @@ -38,6 +38,7 @@ bool msm_vidc_debug_timeout = false; #define MAX_DBG_BUF_SIZE 4096 struct debug_buffer { + struct mutex lock; char ptr[MAX_DBG_BUF_SIZE]; char *curr; u32 filled_size; @@ -64,8 +65,12 @@ static u32 write_str(struct debug_buffer *buffer, const char *fmt, ...) { va_list args; u32 size; + + char *curr = buffer->curr; + char *end = buffer->ptr + MAX_DBG_BUF_SIZE; + va_start(args, fmt); - size = vscnprintf(buffer->curr, MAX_DBG_BUF_SIZE - 1, fmt, args); + size = vscnprintf(curr, end - curr, fmt, args); va_end(args); buffer->curr += size; buffer->filled_size += size; @@ -79,12 +84,15 @@ static ssize_t core_info_read(struct file *file, char __user *buf, struct hfi_device *hdev; struct hal_fw_info fw_info = { {0} }; int i = 0, rc = 0; + ssize_t len = 0; if (!core || !core->device) { dprintk(VIDC_ERR, "Invalid params, core: %pK\n", core); return 0; } hdev = core->device; + + mutex_lock(&dbg_buf.lock); INIT_DBG_BUF(dbg_buf); write_str(&dbg_buf, "===============================\n"); write_str(&dbg_buf, "CORE %d: %pK\n", core->id, core); @@ -108,8 +116,11 @@ err_fw_info: completion_done(&core->completions[SYS_MSG_INDEX(i)]) ? "pending" : "done"); } - return simple_read_from_buffer(buf, count, ppos, + len = simple_read_from_buffer(buf, count, ppos, dbg_buf.ptr, dbg_buf.filled_size); + + mutex_unlock(&dbg_buf.lock); + return len; } static const struct file_operations core_info_fops = { @@ -147,7 +158,10 @@ static const struct file_operations ssr_fops = { struct dentry *msm_vidc_debugfs_init_drv(void) { bool ok = false; - struct dentry *dir = debugfs_create_dir("msm_vidc", NULL); + struct dentry *dir = NULL; + + mutex_init(&dbg_buf.lock); + dir = debugfs_create_dir("msm_vidc", NULL); if (IS_ERR_OR_NULL(dir)) { dir = NULL; goto failed_create_dir; @@ -216,6 +230,7 @@ struct dentry *msm_vidc_debugfs_init_core(struct msm_vidc_core *core, dprintk(VIDC_ERR, "Failed to create debugfs for msm_vidc\n"); goto failed_create_dir; } + if (!debugfs_create_file("info", S_IRUGO, dir, core, &core_info_fops)) { dprintk(VIDC_ERR, "debugfs_create_file: fail\n"); goto failed_create_dir; @@ -269,11 +284,14 @@ static ssize_t inst_info_read(struct file *file, char __user *buf, { struct msm_vidc_inst *inst = file->private_data; int i, j; + ssize_t len = 0; if (!inst) { dprintk(VIDC_ERR, "Invalid params, inst %pK\n", inst); return 0; } + + mutex_lock(&dbg_buf.lock); INIT_DBG_BUF(dbg_buf); write_str(&dbg_buf, "===============================\n"); write_str(&dbg_buf, "INSTANCE: %pK (%s)\n", inst, @@ -331,8 +349,10 @@ static ssize_t inst_info_read(struct file *file, char __user *buf, publish_unreleased_reference(inst); - return simple_read_from_buffer(buf, count, ppos, + len = simple_read_from_buffer(buf, count, ppos, dbg_buf.ptr, dbg_buf.filled_size); + mutex_unlock(&dbg_buf.lock); + return len; } static const struct file_operations inst_info_fops = { @@ -413,3 +433,8 @@ void msm_vidc_debugfs_update(struct msm_vidc_inst *inst, } } +void msm_vidc_debugfs_deinit_drv(void) +{ + mutex_destroy(&dbg_buf.lock); +} + diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.h b/drivers/media/platform/msm/vidc/msm_vidc_debug.h index 39ac6273f34e..853ce4b89f2b 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_debug.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.h @@ -126,6 +126,7 @@ struct dentry *msm_vidc_debugfs_init_inst(struct msm_vidc_inst *inst, struct dentry *parent); void msm_vidc_debugfs_update(struct msm_vidc_inst *inst, enum msm_vidc_debugfs_event e); +void msm_vidc_debugfs_deinit_drv(void); static inline void tic(struct msm_vidc_inst *i, enum profiling_points p, char *b) diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c index a65e22c66e30..4cc977e568ee 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c @@ -1123,7 +1123,7 @@ int read_platform_resources_from_dt( res->debug_timeout = of_property_read_bool(pdev->dev.of_node, "qcom,debug-timeout"); - res->debug_timeout |= msm_vidc_debug_timeout; + msm_vidc_debug_timeout |= res->debug_timeout; of_property_read_u32(pdev->dev.of_node, "qcom,pm-qos-latency-us", &res->pm_qos_latency_us); diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c index e8ba1495de2b..27249eeec013 100644 --- a/drivers/mfd/wcd934x-regmap.c +++ b/drivers/mfd/wcd934x-regmap.c @@ -1937,7 +1937,6 @@ static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg) case WCD934X_BIAS_VBG_FINE_ADJ: case WCD934X_CODEC_CPR_SVS_CX_VDD: case WCD934X_CODEC_CPR_SVS2_CX_VDD: - case WCD934X_CDC_TOP_TOP_CFG1: case WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL: return true; } diff --git a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c index 9889d9c4723b..84761747f129 100644 --- a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c +++ b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c @@ -148,6 +148,8 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd, case AUDIO_START: { pr_debug("%s: AUDIO_START\n", __func__); + mutex_lock(&effects->lock); + rc = q6asm_open_read_write_v2(effects->ac, FORMAT_LINEAR_PCM, FORMAT_MULTI_CHANNEL_LINEAR_PCM, @@ -159,6 +161,7 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd, pr_err("%s: Open failed for hw accelerated effects:rc=%d\n", __func__, rc); rc = -EINVAL; + mutex_unlock(&effects->lock); goto ioctl_fail; } effects->opened = 1; @@ -175,6 +178,7 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd, pr_err("%s: Write buffer Allocation failed rc = %d\n", __func__, rc); rc = -ENOMEM; + mutex_unlock(&effects->lock); goto ioctl_fail; } atomic_set(&effects->in_count, effects->config.input.num_buf); @@ -185,6 +189,7 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd, pr_err("%s: Read buffer Allocation failed rc = %d\n", __func__, rc); rc = -ENOMEM; + mutex_unlock(&effects->lock); goto readbuf_fail; } atomic_set(&effects->out_count, effects->config.output.num_buf); @@ -199,6 +204,7 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd, if (rc < 0) { pr_err("%s: pcm read block config failed\n", __func__); rc = -EINVAL; + mutex_unlock(&effects->lock); goto cfg_fail; } pr_debug("%s: dec: sample_rate: %d, num_channels: %d, bit_width: %d\n", @@ -213,6 +219,7 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd, pr_err("%s: pcm write format block config failed\n", __func__); rc = -EINVAL; + mutex_unlock(&effects->lock); goto cfg_fail; } @@ -225,6 +232,7 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd, effects->started = 0; pr_err("%s: ASM run state failed\n", __func__); } + mutex_unlock(&effects->lock); break; } case AUDIO_EFFECTS_WRITE: { @@ -286,8 +294,11 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd, uint32_t idx = 0; uint32_t size = 0; + mutex_lock(&effects->lock); + if (!effects->started) { rc = -EFAULT; + mutex_unlock(&effects->lock); goto ioctl_fail; } @@ -304,11 +315,13 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd, if (!rc) { pr_err("%s: read wait_event_timeout\n", __func__); rc = -EFAULT; + mutex_unlock(&effects->lock); goto ioctl_fail; } if (!atomic_read(&effects->in_count)) { pr_err("%s: pcm stopped in_count 0\n", __func__); rc = -EFAULT; + mutex_unlock(&effects->lock); goto ioctl_fail; } @@ -316,15 +329,18 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd, if (bufptr) { if (!((void *)arg)) { rc = -EFAULT; + mutex_unlock(&effects->lock); goto ioctl_fail; } if ((effects->config.buf_cfg.input_len > size) || copy_to_user((void *)arg, bufptr, effects->config.buf_cfg.input_len)) { rc = -EFAULT; + mutex_unlock(&effects->lock); goto ioctl_fail; } } + mutex_unlock(&effects->lock); break; } default: @@ -456,6 +472,7 @@ static long audio_effects_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case AUDIO_SET_EFFECTS_CONFIG: { pr_debug("%s: AUDIO_SET_EFFECTS_CONFIG\n", __func__); + mutex_lock(&effects->lock); memset(&effects->config, 0, sizeof(effects->config)); if (copy_from_user(&effects->config, (void *)arg, sizeof(effects->config))) { @@ -473,6 +490,7 @@ static long audio_effects_ioctl(struct file *file, unsigned int cmd, effects->config.input.num_buf, effects->config.input.sample_rate, effects->config.input.num_channels); + mutex_unlock(&effects->lock); break; } case AUDIO_EFFECTS_SET_BUF_LEN: { @@ -494,6 +512,7 @@ static long audio_effects_ioctl(struct file *file, unsigned int cmd, buf_avail.input_num_avail = atomic_read(&effects->in_count); buf_avail.output_num_avail = atomic_read(&effects->out_count); + mutex_lock(&effects->lock); pr_debug("%s: write buf avail: %d, read buf avail: %d\n", __func__, buf_avail.output_num_avail, buf_avail.input_num_avail); @@ -503,16 +522,20 @@ static long audio_effects_ioctl(struct file *file, unsigned int cmd, __func__); rc = -EFAULT; } + mutex_unlock(&effects->lock); break; } case AUDIO_EFFECTS_SET_PP_PARAMS: { + mutex_lock(&effects->lock); if (copy_from_user(argvalues, (void *)arg, MAX_PP_PARAMS_SZ*sizeof(long))) { pr_err("%s: copy from user for pp params failed\n", __func__); + mutex_unlock(&effects->lock); return -EFAULT; } rc = audio_effects_set_pp_param(effects, argvalues); + mutex_unlock(&effects->lock); break; } default: @@ -578,12 +601,14 @@ static long audio_effects_compat_ioctl(struct file *file, unsigned int cmd, case AUDIO_SET_EFFECTS_CONFIG32: { struct msm_hwacc_effects_config32 config32; struct msm_hwacc_effects_config *config = &effects->config; + mutex_lock(&effects->lock); memset(&effects->config, 0, sizeof(effects->config)); if (copy_from_user(&config32, (void *)arg, sizeof(config32))) { pr_err("%s: copy to user for AUDIO_SET_EFFECTS_CONFIG failed\n", __func__); rc = -EFAULT; + mutex_unlock(&effects->lock); break; } config->input.buf_size = config32.input.buf_size; @@ -620,16 +645,19 @@ static long audio_effects_compat_ioctl(struct file *file, unsigned int cmd, effects->config.input.num_buf, effects->config.input.sample_rate, effects->config.input.num_channels); + mutex_unlock(&effects->lock); break; } case AUDIO_EFFECTS_SET_BUF_LEN32: { struct msm_hwacc_buf_cfg32 buf_cfg32; struct msm_hwacc_effects_config *config = &effects->config; + mutex_lock(&effects->lock); if (copy_from_user(&buf_cfg32, (void *)arg, sizeof(buf_cfg32))) { pr_err("%s: copy from user for AUDIO_EFFECTS_SET_BUF_LEN failed\n", __func__); rc = -EFAULT; + mutex_unlock(&effects->lock); break; } config->buf_cfg.input_len = buf_cfg32.input_len; @@ -637,6 +665,7 @@ static long audio_effects_compat_ioctl(struct file *file, unsigned int cmd, pr_debug("%s: write buf len: %d, read buf len: %d\n", __func__, effects->config.buf_cfg.output_len, effects->config.buf_cfg.input_len); + mutex_unlock(&effects->lock); break; } case AUDIO_EFFECTS_GET_BUF_AVAIL32: { @@ -644,6 +673,7 @@ static long audio_effects_compat_ioctl(struct file *file, unsigned int cmd, memset(&buf_avail, 0, sizeof(buf_avail)); + mutex_lock(&effects->lock); buf_avail.input_num_avail = atomic_read(&effects->in_count); buf_avail.output_num_avail = atomic_read(&effects->out_count); pr_debug("%s: write buf avail: %d, read buf avail: %d\n", @@ -655,22 +685,26 @@ static long audio_effects_compat_ioctl(struct file *file, unsigned int cmd, __func__); rc = -EFAULT; } + mutex_unlock(&effects->lock); break; } case AUDIO_EFFECTS_SET_PP_PARAMS32: { long argvalues[MAX_PP_PARAMS_SZ] = {0}; int argvalues32[MAX_PP_PARAMS_SZ] = {0}; + mutex_lock(&effects->lock); if (copy_from_user(argvalues32, (void *)arg, MAX_PP_PARAMS_SZ*sizeof(int))) { pr_err("%s: copy from user failed for pp params\n", __func__); + mutex_unlock(&effects->lock); return -EFAULT; } for (i = 0; i < MAX_PP_PARAMS_SZ; i++) argvalues[i] = argvalues32[i]; rc = audio_effects_set_pp_param(effects, argvalues); + mutex_unlock(&effects->lock); break; } case AUDIO_START32: { diff --git a/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c b/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c index 3bb95f50bc13..52f7d3d2f268 100644 --- a/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c +++ b/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -179,7 +179,7 @@ static const int s_button_map[] = { }; /* The opened devices container */ -static int s_opened_devs[MAX_DEVS_NUMBER]; +static atomic_t s_opened_devs[MAX_DEVS_NUMBER]; static struct wakeup_source usf_wakeup_source; @@ -2338,14 +2338,11 @@ static uint16_t add_opened_dev(int minor) uint16_t ind = 0; for (ind = 0; ind < MAX_DEVS_NUMBER; ++ind) { - if (minor == s_opened_devs[ind]) { + if (minor == atomic_cmpxchg(&s_opened_devs[ind], 0, minor)) { pr_err("%s: device %d is already opened\n", __func__, minor); return USF_UNDEF_DEV_ID; - } - - if (s_opened_devs[ind] == 0) { - s_opened_devs[ind] = minor; + } else { pr_debug("%s: device %d is added; ind=%d\n", __func__, minor, ind); return ind; @@ -2401,7 +2398,7 @@ static int usf_release(struct inode *inode, struct file *file) usf_disable(&usf->usf_tx); usf_disable(&usf->usf_rx); - s_opened_devs[usf->dev_ind] = 0; + atomic_set(&s_opened_devs[usf->dev_ind], 0); wakeup_source_trash(&usf_wakeup_source); mutex_unlock(&usf->mutex); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 5d6c44b00bc2..e9f1a19dfe3f 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -2189,6 +2189,17 @@ static int mmc_blk_err_check(struct mmc_card *card, int need_retune = card->host->need_retune; int ecc_err = 0, gen_err = 0; + if (card->host->sdr104_wa && mmc_card_sd(card) && + (card->host->ios.timing == MMC_TIMING_UHS_SDR104) && + !card->sdr104_blocked && + (brq->data.error == -EILSEQ || + brq->data.error == -EIO || + brq->data.error == -ETIMEDOUT || + brq->cmd.error == -EILSEQ || + brq->cmd.error == -EIO || + brq->cmd.error == -ETIMEDOUT)) + card->err_in_sdr104 = true; + /* * sbc.error indicates a problem with the set block count * command. No data will have been transferred. @@ -3645,6 +3656,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) struct mmc_async_req *areq; const u8 packed_nr = 2; u8 reqs = 0; + bool reset = false; #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED unsigned long waitfor = jiffies; #endif @@ -3690,6 +3702,26 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; mmc_queue_bounce_post(mq_rq); + if (card->err_in_sdr104) { + /* + * Data CRC/timeout errors will manifest as CMD/DATA + * ERR. But we'd like to retry these too. + * Moreover, no harm done if this fails too for multiple + * times, we anyway reduce the bus-speed and retry the + * same request. + * If that fails too, we don't override this status. + */ + if (status == MMC_BLK_ABORT || + status == MMC_BLK_CMD_ERR || + status == MMC_BLK_DATA_ERR || + status == MMC_BLK_RETRY) + /* reset on all of these errors and retry */ + reset = true; + + status = MMC_BLK_RETRY; + card->err_in_sdr104 = false; + } + switch (status) { case MMC_BLK_SUCCESS: case MMC_BLK_PARTIAL: @@ -3730,8 +3762,32 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) break; case MMC_BLK_RETRY: retune_retry_done = brq->retune_retry_done; - if (retry++ < MMC_BLK_MAX_RETRIES) + if (retry++ < MMC_BLK_MAX_RETRIES) { break; + } else if (reset) { + reset = false; + /* + * If we exhaust all the retries due to + * CRC/timeout errors in SDR140 mode with UHS SD + * cards, re-configure the card in SDR50 + * bus-speed mode. + * All subsequent re-init of this card will be + * in SDR50 mode, unless it is removed and + * re-inserted. When new UHS SD cards are + * inserted, it may start at SDR104 mode if + * supported by the card. + */ + pr_err("%s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n", + req->rq_disk->disk_name); + mmc_host_clear_sdr104(card->host); + mmc_suspend_clk_scaling(card->host); + mmc_blk_reset(md, card->host, type); + /* SDR104 mode is blocked from now on */ + card->sdr104_blocked = true; + /* retry 5 times again */ + retry = 0; + break; + } /* Fall through */ case MMC_BLK_ABORT: if (!mmc_blk_reset(md, card->host, type) && diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index c409f713d4f0..41f0935440fd 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -4040,6 +4040,10 @@ int _mmc_detect_card_removed(struct mmc_host *host) if (ret) { mmc_card_set_removed(host->card); + if (host->card->sdr104_blocked) { + mmc_host_set_sdr104(host); + host->card->sdr104_blocked = false; + } pr_debug("%s: card remove detected\n", mmc_hostname(host)); } diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 1116544eebc1..c66187299598 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -80,7 +80,6 @@ void mmc_init_context_info(struct mmc_host *host); extern bool mmc_can_scale_clk(struct mmc_host *host); extern int mmc_init_clk_scaling(struct mmc_host *host); -extern int mmc_suspend_clk_scaling(struct mmc_host *host); extern int mmc_resume_clk_scaling(struct mmc_host *host); extern int mmc_exit_clk_scaling(struct mmc_host *host); extern unsigned long mmc_get_max_frequency(struct mmc_host *host); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 5ab09b4ae868..3b79f514350e 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -2031,7 +2031,7 @@ reinit: err = mmc_select_hs400(card); if (err) goto free_card; - } else { + } else if (!mmc_card_hs400(card)) { /* Select the desired bus width optionally */ err = mmc_select_bus_width(card); if (!IS_ERR_VALUE(err) && mmc_card_hs(card)) { diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 9e147a667edf..5b4d5d74fe55 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -1285,6 +1285,8 @@ static int _mmc_sd_resume(struct mmc_host *host) #endif mmc_card_clr_suspended(host->card); + if (host->card->sdr104_blocked) + goto out; err = mmc_resume_clk_scaling(host); if (err) { pr_err("%s: %s: fail to resume clock scaling (%d)\n", diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 2eaac11ec8ba..987d61bdda2d 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1960,6 +1960,8 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, if (of_get_property(np, "qcom,core_3_0v_support", NULL)) pdata->core_3_0v_support = true; + pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa"); + return pdata; out: return NULL; @@ -4579,6 +4581,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) if (msm_host->pdata->nonhotplug) msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG; + msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa; /* Initialize ICE if present */ if (msm_host->ice.pdev) { diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h index 2e4f2179378e..92f61708001e 100644 --- a/drivers/mmc/host/sdhci-msm.h +++ b/drivers/mmc/host/sdhci-msm.h @@ -153,6 +153,7 @@ struct sdhci_msm_pltfm_data { u32 ice_clk_min; struct sdhci_msm_pm_qos_data pm_qos_data; bool core_3_0v_support; + bool sdr104_wa; }; struct sdhci_msm_bus_vote { diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 44633dc5d2be..40a34c283955 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -3083,7 +3083,10 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) mmc_hostname(host->mmc), intmask, host->data->error, ktime_to_ms(ktime_sub( ktime_get(), host->data_start_time))); - sdhci_dumpregs(host); + + if (!host->mmc->sdr104_wa || + (host->mmc->ios.timing != MMC_TIMING_UHS_SDR104)) + sdhci_dumpregs(host); } sdhci_finish_data(host); } else { diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c index bf6502e27bdd..015cb99d445b 100644 --- a/drivers/net/ethernet/msm/msm_rmnet_mhi.c +++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c @@ -35,6 +35,7 @@ #define MHI_NAPI_WEIGHT_VALUE 12 #define WATCHDOG_TIMEOUT (30 * HZ) #define RMNET_IPC_LOG_PAGES (100) +#define IRQ_MASKED_BIT (0) enum DBG_LVL { MSG_VERBOSE = 0x1, @@ -100,14 +101,15 @@ struct rmnet_mhi_private { u32 mhi_enabled; struct platform_device *pdev; struct net_device *dev; - atomic_t irq_masked_cntr; + unsigned long flags; + int wake_count; spinlock_t out_chan_full_lock; /* tx queue lock */ - atomic_t pending_data; struct sk_buff *frag_skb; struct work_struct alloc_work; /* lock to queue hardware and internal queue */ spinlock_t alloc_lock; void *rmnet_ipc_log; + rwlock_t pm_lock; /* state change lock */ struct debug_params debug; struct dentry *dentry; }; @@ -130,12 +132,12 @@ static int rmnet_mhi_process_fragment(struct rmnet_mhi_private *rmnet_mhi_ptr, rmnet_mhi_ptr->frag_skb = NULL; return -ENOMEM; } - kfree_skb(rmnet_mhi_ptr->frag_skb); + dev_kfree_skb_any(rmnet_mhi_ptr->frag_skb); rmnet_mhi_ptr->frag_skb = temp_skb; memcpy(skb_put(rmnet_mhi_ptr->frag_skb, skb->len), skb->data, skb->len); - kfree_skb(skb); + dev_kfree_skb_any(skb); if (!frag) { /* Last fragmented piece was received, ship it */ netif_receive_skb(rmnet_mhi_ptr->frag_skb); @@ -196,7 +198,6 @@ static int rmnet_alloc_rx(struct rmnet_mhi_private *rmnet_mhi_ptr, { u32 cur_mru = rmnet_mhi_ptr->mru; struct mhi_skb_priv *skb_priv; - unsigned long flags; int ret; struct sk_buff *skb; @@ -215,7 +216,7 @@ static int rmnet_alloc_rx(struct rmnet_mhi_private *rmnet_mhi_ptr, skb_priv->dma_addr = 0; /* These steps must be in atomic context */ - spin_lock_irqsave(&rmnet_mhi_ptr->alloc_lock, flags); + spin_lock_bh(&rmnet_mhi_ptr->alloc_lock); /* It's possible by the time alloc_skb (GFP_KERNEL) * returns we already called rmnet_alloc_rx @@ -224,14 +225,22 @@ static int rmnet_alloc_rx(struct rmnet_mhi_private *rmnet_mhi_ptr, */ if (unlikely(atomic_read(&rmnet_mhi_ptr->rx_pool_len) >= rmnet_mhi_ptr->rx_buffers_max)) { - spin_unlock_irqrestore(&rmnet_mhi_ptr->alloc_lock, - flags); + spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock); dev_kfree_skb_any(skb); return 0; } - ret = mhi_queue_xfer( - rmnet_mhi_ptr->rx_client_handle, + read_lock_bh(&rmnet_mhi_ptr->pm_lock); + if (unlikely(!rmnet_mhi_ptr->mhi_enabled)) { + rmnet_log(rmnet_mhi_ptr, MSG_INFO, + "!interface is disabled\n"); + dev_kfree_skb_any(skb); + read_unlock_bh(&rmnet_mhi_ptr->pm_lock); + spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock); + return -EIO; + } + + ret = mhi_queue_xfer(rmnet_mhi_ptr->rx_client_handle, skb->data, skb_priv->dma_size, MHI_EOT); @@ -239,14 +248,15 @@ static int rmnet_alloc_rx(struct rmnet_mhi_private *rmnet_mhi_ptr, rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL, "mhi_queue_xfer failed, error %d", ret); - spin_unlock_irqrestore(&rmnet_mhi_ptr->alloc_lock, - flags); + read_unlock_bh(&rmnet_mhi_ptr->pm_lock); + spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock); dev_kfree_skb_any(skb); return ret; } skb_queue_tail(&rmnet_mhi_ptr->rx_buffers, skb); atomic_inc(&rmnet_mhi_ptr->rx_pool_len); - spin_unlock_irqrestore(&rmnet_mhi_ptr->alloc_lock, flags); + read_unlock_bh(&rmnet_mhi_ptr->pm_lock); + spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock); } return 0; @@ -258,13 +268,25 @@ static void rmnet_mhi_alloc_work(struct work_struct *work) struct rmnet_mhi_private, alloc_work); int ret; + /* sleep about 1 sec and retry, that should be enough time + * for system to reclaim freed memory back. + */ + const int sleep_ms = 1000; + int retry = 60; rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Entered\n"); - ret = rmnet_alloc_rx(rmnet_mhi_ptr, - rmnet_mhi_ptr->allocation_flags); + do { + ret = rmnet_alloc_rx(rmnet_mhi_ptr, + rmnet_mhi_ptr->allocation_flags); + /* sleep and try again */ + if (ret == -ENOMEM) { + msleep(sleep_ms); + retry--; + } + } while (ret == -ENOMEM && retry); - WARN_ON(ret == -ENOMEM); - rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exit\n"); + rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exit with status:%d retry:%d\n", + ret, retry); } static int rmnet_mhi_poll(struct napi_struct *napi, int budget) @@ -281,6 +303,12 @@ static int rmnet_mhi_poll(struct napi_struct *napi, int budget) rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Entered\n"); + read_lock_bh(&rmnet_mhi_ptr->pm_lock); + if (unlikely(!rmnet_mhi_ptr->mhi_enabled)) { + rmnet_log(rmnet_mhi_ptr, MSG_INFO, "interface is disabled!\n"); + read_unlock_bh(&rmnet_mhi_ptr->pm_lock); + return 0; + } while (received_packets < budget) { struct mhi_result *result = mhi_poll(rmnet_mhi_ptr->rx_client_handle); @@ -338,77 +366,50 @@ static int rmnet_mhi_poll(struct napi_struct *napi, int budget) dev->stats.rx_bytes += result->bytes_xferd; } /* while (received_packets < budget) or any other error */ + read_unlock_bh(&rmnet_mhi_ptr->pm_lock); /* Queue new buffers */ res = rmnet_alloc_rx(rmnet_mhi_ptr, GFP_ATOMIC); - if (res == -ENOMEM) { - rmnet_log(rmnet_mhi_ptr, - MSG_INFO, - "out of mem, queuing bg worker\n"); - rmnet_mhi_ptr->alloc_fail++; - schedule_work(&rmnet_mhi_ptr->alloc_work); - } - napi_complete(napi); + read_lock_bh(&rmnet_mhi_ptr->pm_lock); + if (likely(rmnet_mhi_ptr->mhi_enabled)) { + if (res == -ENOMEM) { + rmnet_log(rmnet_mhi_ptr, MSG_INFO, + "out of mem, queuing bg worker\n"); + rmnet_mhi_ptr->alloc_fail++; + schedule_work(&rmnet_mhi_ptr->alloc_work); + } + + napi_complete(napi); - /* We got a NULL descriptor back */ - if (should_reschedule == false) { - if (atomic_read(&rmnet_mhi_ptr->irq_masked_cntr)) { - atomic_dec(&rmnet_mhi_ptr->irq_masked_cntr); - mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle); + /* We got a NULL descriptor back */ + if (!should_reschedule) { + if (test_and_clear_bit(IRQ_MASKED_BIT, + &rmnet_mhi_ptr->flags)) + mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle); mhi_set_lpm(rmnet_mhi_ptr->rx_client_handle, true); + rmnet_mhi_ptr->wake_count--; + } else { + if (received_packets == budget) + rmnet_mhi_ptr->debug.rx_napi_budget_overflow++; + napi_reschedule(napi); } - } else { - if (received_packets == budget) - rmnet_mhi_ptr->debug.rx_napi_budget_overflow++; - napi_reschedule(napi); - } - rmnet_mhi_ptr->debug.rx_napi_skb_burst_min = - min((u64)received_packets, - rmnet_mhi_ptr->debug.rx_napi_skb_burst_min); + rmnet_mhi_ptr->debug.rx_napi_skb_burst_min = + min((u64)received_packets, + rmnet_mhi_ptr->debug.rx_napi_skb_burst_min); - rmnet_mhi_ptr->debug.rx_napi_skb_burst_max = - max((u64)received_packets, - rmnet_mhi_ptr->debug.rx_napi_skb_burst_max); + rmnet_mhi_ptr->debug.rx_napi_skb_burst_max = + max((u64)received_packets, + rmnet_mhi_ptr->debug.rx_napi_skb_burst_max); + } + read_unlock_bh(&rmnet_mhi_ptr->pm_lock); rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited, polled %d pkts\n", received_packets); return received_packets; } -void rmnet_mhi_clean_buffers(struct net_device *dev) -{ - struct rmnet_mhi_private *rmnet_mhi_ptr = - *(struct rmnet_mhi_private **)netdev_priv(dev); - - rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Entered\n"); - /* Clean TX buffers */ - rmnet_mhi_internal_clean_unmap_buffers(dev, - &rmnet_mhi_ptr->tx_buffers, - DMA_TO_DEVICE); - - /* Clean RX buffers */ - rmnet_mhi_internal_clean_unmap_buffers(dev, - &rmnet_mhi_ptr->rx_buffers, - DMA_FROM_DEVICE); - rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exited\n"); -} - -static int rmnet_mhi_disable_channels(struct rmnet_mhi_private *rmnet_mhi_ptr) -{ - rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Closing MHI TX channel\n"); - mhi_close_channel(rmnet_mhi_ptr->tx_client_handle); - rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Closing MHI RX channel\n"); - mhi_close_channel(rmnet_mhi_ptr->rx_client_handle); - rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Clearing Pending TX buffers.\n"); - rmnet_mhi_clean_buffers(rmnet_mhi_ptr->dev); - rmnet_mhi_ptr->tx_client_handle = NULL; - rmnet_mhi_ptr->rx_client_handle = NULL; - - return 0; -} - static int rmnet_mhi_init_inbound(struct rmnet_mhi_private *rmnet_mhi_ptr) { int res; @@ -431,7 +432,7 @@ static void rmnet_mhi_tx_cb(struct mhi_result *result) struct net_device *dev; struct rmnet_mhi_private *rmnet_mhi_ptr; unsigned long burst_counter = 0; - unsigned long flags; + unsigned long flags, pm_flags; rmnet_mhi_ptr = result->user_data; dev = rmnet_mhi_ptr->dev; @@ -451,10 +452,10 @@ static void rmnet_mhi_tx_cb(struct mhi_result *result) break; } else { if (skb->data == result->buf_addr) { - kfree_skb(skb); + dev_kfree_skb_any(skb); break; } - kfree_skb(skb); + dev_kfree_skb_any(skb); burst_counter++; /* Update statistics */ @@ -477,10 +478,15 @@ static void rmnet_mhi_tx_cb(struct mhi_result *result) rmnet_mhi_ptr->debug.tx_cb_skb_free_burst_max); /* In case we couldn't write again, now we can! */ - spin_lock_irqsave(&rmnet_mhi_ptr->out_chan_full_lock, flags); - rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Waking up queue\n"); - netif_wake_queue(dev); - spin_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, flags); + read_lock_irqsave(&rmnet_mhi_ptr->pm_lock, pm_flags); + if (likely(rmnet_mhi_ptr->mhi_enabled)) { + spin_lock_irqsave(&rmnet_mhi_ptr->out_chan_full_lock, flags); + rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Waking up queue\n"); + netif_wake_queue(dev); + spin_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, + flags); + } + read_unlock_irqrestore(&rmnet_mhi_ptr->pm_lock, pm_flags); rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n"); } @@ -488,20 +494,27 @@ static void rmnet_mhi_rx_cb(struct mhi_result *result) { struct net_device *dev; struct rmnet_mhi_private *rmnet_mhi_ptr; + unsigned long flags; + rmnet_mhi_ptr = result->user_data; dev = rmnet_mhi_ptr->dev; rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Entered\n"); rmnet_mhi_ptr->debug.rx_interrupts_count++; - - if (napi_schedule_prep(&(rmnet_mhi_ptr->napi))) { - mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle); - atomic_inc(&rmnet_mhi_ptr->irq_masked_cntr); - mhi_set_lpm(rmnet_mhi_ptr->rx_client_handle, false); - __napi_schedule(&(rmnet_mhi_ptr->napi)); - } else { - rmnet_mhi_ptr->debug.rx_interrupts_in_masked_irq++; + read_lock_irqsave(&rmnet_mhi_ptr->pm_lock, flags); + if (likely(rmnet_mhi_ptr->mhi_enabled)) { + if (napi_schedule_prep(&rmnet_mhi_ptr->napi)) { + if (!test_and_set_bit(IRQ_MASKED_BIT, + &rmnet_mhi_ptr->flags)) + mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle); + mhi_set_lpm(rmnet_mhi_ptr->rx_client_handle, false); + rmnet_mhi_ptr->wake_count++; + __napi_schedule(&rmnet_mhi_ptr->napi); + } else { + rmnet_mhi_ptr->debug.rx_interrupts_in_masked_irq++; + } } + read_unlock_irqrestore(&rmnet_mhi_ptr->pm_lock, flags); rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n"); } @@ -510,8 +523,7 @@ static int rmnet_mhi_open(struct net_device *dev) struct rmnet_mhi_private *rmnet_mhi_ptr = *(struct rmnet_mhi_private **)netdev_priv(dev); - rmnet_log(rmnet_mhi_ptr, - MSG_INFO, + rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Opened net dev interface for MHI chans %d and %d\n", rmnet_mhi_ptr->tx_channel, rmnet_mhi_ptr->rx_channel); @@ -527,43 +539,35 @@ static int rmnet_mhi_open(struct net_device *dev) /* Poll to check if any buffers are accumulated in the * transport buffers */ - if (napi_schedule_prep(&(rmnet_mhi_ptr->napi))) { - mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle); - atomic_inc(&rmnet_mhi_ptr->irq_masked_cntr); - mhi_set_lpm(rmnet_mhi_ptr->rx_client_handle, false); - __napi_schedule(&(rmnet_mhi_ptr->napi)); - } else { - rmnet_mhi_ptr->debug.rx_interrupts_in_masked_irq++; + read_lock_bh(&rmnet_mhi_ptr->pm_lock); + if (likely(rmnet_mhi_ptr->mhi_enabled)) { + if (napi_schedule_prep(&rmnet_mhi_ptr->napi)) { + if (!test_and_set_bit(IRQ_MASKED_BIT, + &rmnet_mhi_ptr->flags)) { + mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle); + } + mhi_set_lpm(rmnet_mhi_ptr->rx_client_handle, false); + rmnet_mhi_ptr->wake_count++; + __napi_schedule(&rmnet_mhi_ptr->napi); + } else { + rmnet_mhi_ptr->debug.rx_interrupts_in_masked_irq++; + } } + read_unlock_bh(&rmnet_mhi_ptr->pm_lock); return 0; } -static int rmnet_mhi_disable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr) -{ - rmnet_mhi_ptr->rx_enabled = 0; - rmnet_mhi_ptr->tx_enabled = 0; - rmnet_mhi_ptr->mhi_enabled = 0; - if (rmnet_mhi_ptr->dev != 0) { - netif_stop_queue(rmnet_mhi_ptr->dev); - netif_napi_del(&(rmnet_mhi_ptr->napi)); - rmnet_mhi_disable_channels(rmnet_mhi_ptr); - unregister_netdev(rmnet_mhi_ptr->dev); - free_netdev(rmnet_mhi_ptr->dev); - rmnet_mhi_ptr->dev = 0; - } - return 0; -} - static int rmnet_mhi_disable(struct rmnet_mhi_private *rmnet_mhi_ptr) { - rmnet_mhi_ptr->mhi_enabled = 0; - rmnet_mhi_disable_iface(rmnet_mhi_ptr); napi_disable(&(rmnet_mhi_ptr->napi)); - if (atomic_read(&rmnet_mhi_ptr->irq_masked_cntr)) { + rmnet_mhi_ptr->rx_enabled = 0; + rmnet_mhi_internal_clean_unmap_buffers(rmnet_mhi_ptr->dev, + &rmnet_mhi_ptr->rx_buffers, + DMA_FROM_DEVICE); + if (test_and_clear_bit(IRQ_MASKED_BIT, &rmnet_mhi_ptr->flags)) mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle); - atomic_dec(&rmnet_mhi_ptr->irq_masked_cntr); - } + return 0; } @@ -574,11 +578,9 @@ static int rmnet_mhi_stop(struct net_device *dev) netif_stop_queue(dev); rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Entered\n"); - if (atomic_read(&rmnet_mhi_ptr->irq_masked_cntr)) { + if (test_and_clear_bit(IRQ_MASKED_BIT, &rmnet_mhi_ptr->flags)) { mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle); - atomic_dec(&rmnet_mhi_ptr->irq_masked_cntr); - rmnet_log(rmnet_mhi_ptr, - MSG_ERROR, + rmnet_log(rmnet_mhi_ptr, MSG_ERROR, "IRQ was masked, unmasking...\n"); } rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n"); @@ -605,14 +607,23 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags; struct mhi_skb_priv *tx_priv; - rmnet_log(rmnet_mhi_ptr, - MSG_VERBOSE, - "Entered chan %d\n", - rmnet_mhi_ptr->tx_channel); + rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, + "Entered chan %d\n", rmnet_mhi_ptr->tx_channel); tx_priv = (struct mhi_skb_priv *)(skb->cb); tx_priv->dma_size = skb->len; tx_priv->dma_addr = 0; + read_lock_bh(&rmnet_mhi_ptr->pm_lock); + if (unlikely(!rmnet_mhi_ptr->mhi_enabled)) { + /* Only reason interface could be disabled and we get data + * is due to an SSR. We do not want to stop the queue and + * return error. instead we will flush all the uplink packets + * and return successful + */ + res = NETDEV_TX_OK; + dev_kfree_skb_any(skb); + goto mhi_xmit_exit; + } if (mhi_get_free_desc(rmnet_mhi_ptr->tx_client_handle) <= 0) { rmnet_log(rmnet_mhi_ptr, @@ -624,7 +635,8 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); spin_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, flags); - return NETDEV_TX_BUSY; + res = NETDEV_TX_BUSY; + goto mhi_xmit_exit; } res = mhi_queue_xfer(rmnet_mhi_ptr->tx_client_handle, skb->data, @@ -641,15 +653,17 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); spin_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, flags); - return NETDEV_TX_BUSY; + res = NETDEV_TX_BUSY; + goto mhi_xmit_exit; } - + res = NETDEV_TX_OK; skb_queue_tail(&(rmnet_mhi_ptr->tx_buffers), skb); dev->trans_start = jiffies; rmnet_mhi_ptr->debug.tx_queued_packets_count++; - +mhi_xmit_exit: + read_unlock_bh(&rmnet_mhi_ptr->pm_lock); rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n"); - return NETDEV_TX_OK; + return res; } static int rmnet_mhi_ioctl_extended(struct net_device *dev, struct ifreq *ifr) @@ -698,16 +712,19 @@ static int rmnet_mhi_ioctl_extended(struct net_device *dev, struct ifreq *ifr) sizeof(ext_cmd.u.if_name)); break; case RMNET_IOCTL_SET_SLEEP_STATE: + read_lock_bh(&rmnet_mhi_ptr->pm_lock); if (rmnet_mhi_ptr->mhi_enabled && rmnet_mhi_ptr->tx_client_handle != NULL) { + rmnet_mhi_ptr->wake_count += (ext_cmd.u.data) ? -1 : 1; mhi_set_lpm(rmnet_mhi_ptr->tx_client_handle, ext_cmd.u.data); } else { - rmnet_log(rmnet_mhi_ptr, - MSG_ERROR, + rmnet_log(rmnet_mhi_ptr, MSG_ERROR, "Cannot set LPM value, MHI is not up.\n"); + read_unlock_bh(&rmnet_mhi_ptr->pm_lock); return -ENODEV; } + read_unlock_bh(&rmnet_mhi_ptr->pm_lock); break; default: rc = -EINVAL; @@ -832,9 +849,8 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr) "Failed to start TX chan ret %d\n", r); goto mhi_tx_chan_start_fail; - } else { - rmnet_mhi_ptr->tx_enabled = 1; } + client_handle = rmnet_mhi_ptr->tx_client_handle; } if (rmnet_mhi_ptr->rx_client_handle != NULL) { @@ -848,8 +864,6 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr) "Failed to start RX chan ret %d\n", r); goto mhi_rx_chan_start_fail; - } else { - rmnet_mhi_ptr->rx_enabled = 1; } /* Both tx & rx client handle contain same device info */ client_handle = rmnet_mhi_ptr->rx_client_handle; @@ -860,62 +874,64 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr) goto net_dev_alloc_fail; } - snprintf(ifalias, - sizeof(ifalias), - "%s_%04x_%02u.%02u.%02u_%u", - rmnet_mhi_ptr->interface_name, - client_handle->dev_id, - client_handle->domain, - client_handle->bus, - client_handle->slot, - rmnet_mhi_ptr->dev_id); - - snprintf(ifname, sizeof(ifname), "%s%%d", - rmnet_mhi_ptr->interface_name); - rtnl_lock(); - rmnet_mhi_ptr->dev = - alloc_netdev(sizeof(struct rmnet_mhi_private *), - ifname, NET_NAME_PREDICTABLE, rmnet_mhi_setup); if (!rmnet_mhi_ptr->dev) { - rmnet_log(rmnet_mhi_ptr, - MSG_CRITICAL, - "Network device allocation failed\n"); - ret = -ENOMEM; - goto net_dev_alloc_fail; + snprintf(ifalias, sizeof(ifalias), + "%s_%04x_%02u.%02u.%02u_%u", + rmnet_mhi_ptr->interface_name, + client_handle->dev_id, + client_handle->domain, + client_handle->bus, + client_handle->slot, + rmnet_mhi_ptr->dev_id); + + snprintf(ifname, sizeof(ifname), "%s%%d", + rmnet_mhi_ptr->interface_name); + + rtnl_lock(); + rmnet_mhi_ptr->dev = alloc_netdev( + sizeof(struct rmnet_mhi_private *), + ifname, NET_NAME_PREDICTABLE, rmnet_mhi_setup); + + if (!rmnet_mhi_ptr->dev) { + rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL, + "Network device allocation failed\n"); + ret = -ENOMEM; + goto net_dev_alloc_fail; + } + SET_NETDEV_DEV(rmnet_mhi_ptr->dev, &rmnet_mhi_ptr->pdev->dev); + dev_set_alias(rmnet_mhi_ptr->dev, ifalias, strlen(ifalias)); + rmnet_mhi_ctxt = netdev_priv(rmnet_mhi_ptr->dev); + rtnl_unlock(); + *rmnet_mhi_ctxt = rmnet_mhi_ptr; + + ret = dma_set_mask(&rmnet_mhi_ptr->dev->dev, MHI_DMA_MASK); + if (ret) + rmnet_mhi_ptr->allocation_flags = GFP_KERNEL; + else + rmnet_mhi_ptr->allocation_flags = GFP_DMA; + + netif_napi_add(rmnet_mhi_ptr->dev, &rmnet_mhi_ptr->napi, + rmnet_mhi_poll, MHI_NAPI_WEIGHT_VALUE); + + ret = register_netdev(rmnet_mhi_ptr->dev); + if (ret) { + rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL, + "Network device registration failed\n"); + goto net_dev_reg_fail; + } } - SET_NETDEV_DEV(rmnet_mhi_ptr->dev, &rmnet_mhi_ptr->pdev->dev); - dev_set_alias(rmnet_mhi_ptr->dev, ifalias, strlen(ifalias)); - rmnet_mhi_ctxt = netdev_priv(rmnet_mhi_ptr->dev); - rtnl_unlock(); - *rmnet_mhi_ctxt = rmnet_mhi_ptr; - - ret = dma_set_mask(&(rmnet_mhi_ptr->dev->dev), - MHI_DMA_MASK); - if (ret) - rmnet_mhi_ptr->allocation_flags = GFP_KERNEL; - else - rmnet_mhi_ptr->allocation_flags = GFP_DMA; + + write_lock_irq(&rmnet_mhi_ptr->pm_lock); + rmnet_mhi_ptr->mhi_enabled = 1; + write_unlock_irq(&rmnet_mhi_ptr->pm_lock); r = rmnet_mhi_init_inbound(rmnet_mhi_ptr); if (r) { - rmnet_log(rmnet_mhi_ptr, - MSG_CRITICAL, - "Failed to init inbound ret %d\n", - r); + rmnet_log(rmnet_mhi_ptr, MSG_INFO, + "Failed to init inbound ret %d\n", r); } - netif_napi_add(rmnet_mhi_ptr->dev, &(rmnet_mhi_ptr->napi), - rmnet_mhi_poll, MHI_NAPI_WEIGHT_VALUE); - - rmnet_mhi_ptr->mhi_enabled = 1; - ret = register_netdev(rmnet_mhi_ptr->dev); - if (ret) { - rmnet_log(rmnet_mhi_ptr, - MSG_CRITICAL, - "Network device registration failed\n"); - goto net_dev_reg_fail; - } napi_enable(&(rmnet_mhi_ptr->napi)); rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exited.\n"); @@ -951,25 +967,47 @@ static void rmnet_mhi_cb(struct mhi_cb_info *cb_info) switch (cb_info->cb_reason) { case MHI_CB_MHI_DISABLED: - rmnet_log(rmnet_mhi_ptr, - MSG_CRITICAL, - "Got MHI_DISABLED notification. Stopping stack\n"); - if (rmnet_mhi_ptr->mhi_enabled) { - rmnet_mhi_ptr->mhi_enabled = 0; - /* Ensure MHI is disabled before other mem ops */ - wmb(); - while (atomic_read(&rmnet_mhi_ptr->pending_data)) { - rmnet_log(rmnet_mhi_ptr, - MSG_CRITICAL, - "Waiting for channels to stop.\n"); - msleep(25); - } + case MHI_CB_MHI_SHUTDOWN: + case MHI_CB_SYS_ERROR: + rmnet_log(rmnet_mhi_ptr, MSG_INFO, + "Got MHI_SYS_ERROR notification. Stopping stack\n"); + + /* Disable interface on first notification. Long + * as we set mhi_enabled = 0, we gurantee rest of + * driver will not touch any critical data. + */ + write_lock_irq(&rmnet_mhi_ptr->pm_lock); + rmnet_mhi_ptr->mhi_enabled = 0; + write_unlock_irq(&rmnet_mhi_ptr->pm_lock); + + if (cb_info->chan == rmnet_mhi_ptr->rx_channel) { + rmnet_log(rmnet_mhi_ptr, MSG_INFO, + "Receive MHI_DISABLE notification for rx path\n"); rmnet_mhi_disable(rmnet_mhi_ptr); + } else { + rmnet_log(rmnet_mhi_ptr, MSG_INFO, + "Receive MHI_DISABLE notification for tx path\n"); + rmnet_mhi_ptr->tx_enabled = 0; + rmnet_mhi_internal_clean_unmap_buffers + (rmnet_mhi_ptr->dev, &rmnet_mhi_ptr->tx_buffers, + DMA_TO_DEVICE); + } + + /* Remove all votes disabling low power mode */ + if (!rmnet_mhi_ptr->tx_enabled && !rmnet_mhi_ptr->rx_enabled) { + struct mhi_client_handle *handle = + rmnet_mhi_ptr->rx_client_handle; + + if (!handle) + handle = rmnet_mhi_ptr->tx_client_handle; + while (rmnet_mhi_ptr->wake_count) { + mhi_set_lpm(handle, true); + rmnet_mhi_ptr->wake_count--; + } } break; case MHI_CB_MHI_ENABLED: - rmnet_log(rmnet_mhi_ptr, - MSG_CRITICAL, + rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Got MHI_ENABLED notification. Starting stack\n"); if (cb_info->chan == rmnet_mhi_ptr->rx_channel) rmnet_mhi_ptr->rx_enabled = 1; @@ -998,16 +1036,10 @@ static void rmnet_mhi_cb(struct mhi_cb_info *cb_info) } break; case MHI_CB_XFER: - atomic_inc(&rmnet_mhi_ptr->pending_data); - /* Flush pending data is set before any other mem operations */ - wmb(); - if (rmnet_mhi_ptr->mhi_enabled) { - if (cb_info->chan == rmnet_mhi_ptr->rx_channel) - rmnet_mhi_rx_cb(cb_info->result); - else - rmnet_mhi_tx_cb(cb_info->result); - } - atomic_dec(&rmnet_mhi_ptr->pending_data); + if (cb_info->chan == rmnet_mhi_ptr->rx_channel) + rmnet_mhi_rx_cb(cb_info->result); + else + rmnet_mhi_tx_cb(cb_info->result); break; default: break; @@ -1172,6 +1204,7 @@ static int rmnet_mhi_probe(struct platform_device *pdev) return -ENOMEM; rmnet_mhi_ptr->pdev = pdev; spin_lock_init(&rmnet_mhi_ptr->out_chan_full_lock); + rwlock_init(&rmnet_mhi_ptr->pm_lock); rc = of_property_read_u32(pdev->dev.of_node, "qcom,mhi-mru", diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 35e5d980ed49..d1775748a7cd 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4671,7 +4671,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar) lockdep_assert_held(&ar->conf_mutex); list_for_each_entry(arvif, &ar->arvifs, list) { - WARN_ON(arvif->txpower < 0); + if (arvif->txpower <= 0) + continue; if (txpower == -1) txpower = arvif->txpower; @@ -4679,8 +4680,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar) txpower = min(txpower, arvif->txpower); } - if (WARN_ON(txpower == -1)) - return -EINVAL; + if (txpower == -1) + return 0; ret = ath10k_mac_txpower_setup(ar, txpower); if (ret) { @@ -5190,6 +5191,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); } + ret = ath10k_mac_txpower_recalc(ar); + if (ret) + ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); + spin_lock_bh(&ar->htt.tx_lock); ath10k_mac_vif_tx_unlock_all(arvif); spin_unlock_bh(&ar->htt.tx_lock); diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index 7d20f087da71..84a9b1a9577c 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -29,21 +29,34 @@ ath10k_snoc_service_notifier_notify(struct notifier_block *nb, service_notifier_nb); enum pd_subsys_state *state = data; struct ath10k *ar = ar_snoc->ar; + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; + int ret; switch (notification) { case SERVREG_NOTIF_SERVICE_STATE_DOWN_V01: ath10k_dbg(ar, ATH10K_DBG_SNOC, "Service down, data: 0x%pK\n", data); - if (!state || *state != ROOT_PD_SHUTDOWN) + if (!state || *state != ROOT_PD_SHUTDOWN) { atomic_set(&ar_snoc->fw_crashed, 1); + atomic_set(&qmi_cfg->fw_ready, 0); + } ath10k_dbg(ar, ATH10K_DBG_SNOC, "PD went down %d\n", atomic_read(&ar_snoc->fw_crashed)); break; case SERVREG_NOTIF_SERVICE_STATE_UP_V01: ath10k_dbg(ar, ATH10K_DBG_SNOC, "Service up\n"); - queue_work(ar->workqueue, &ar->restart_work); + ret = wait_event_timeout(ath10k_fw_ready_wait_event, + (atomic_read(&qmi_cfg->fw_ready) && + atomic_read(&qmi_cfg->server_connected)), + msecs_to_jiffies(ATH10K_SNOC_WLAN_FW_READY_TIMEOUT)); + if (ret) { + queue_work(ar->workqueue, &ar->restart_work); + } else { + ath10k_err(ar, "restart failed, fw_ready timed out\n"); + return NOTIFY_OK; + } break; default: ath10k_dbg(ar, ATH10K_DBG_SNOC, @@ -188,17 +201,18 @@ static int ath10k_snoc_modem_notifier_nb(struct notifier_block *nb, struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc, modem_ssr_nb); struct ath10k *ar = ar_snoc->ar; + struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; if (code != SUBSYS_BEFORE_SHUTDOWN) return NOTIFY_OK; - if (notif->crashed) + if (notif->crashed) { atomic_set(&ar_snoc->fw_crashed, 1); + atomic_set(&qmi_cfg->fw_ready, 0); + } ath10k_dbg(ar, ATH10K_DBG_SNOC, "Modem went down %d\n", atomic_read(&ar_snoc->fw_crashed)); - if (notif->crashed) - queue_work(ar->workqueue, &ar->restart_work); return NOTIFY_OK; } @@ -255,6 +269,7 @@ ath10k_snoc_driver_event_post(enum ath10k_snoc_driver_event_type type, { int ret = 0; int i = 0; + unsigned long irq_flags; struct ath10k *ar = (struct ath10k *)data; struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); struct ath10k_snoc_qmi_config *qmi_cfg = &ar_snoc->qmi_cfg; @@ -267,7 +282,7 @@ ath10k_snoc_driver_event_post(enum ath10k_snoc_driver_event_type type, return -EINVAL; } - spin_lock_bh(&qmi_cfg->event_lock); + spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags); for (i = 0; i < ATH10K_SNOC_DRIVER_EVENT_MAX; i++) { if (atomic_read(&qmi_cfg->qmi_ev_list[i].event_handled)) { @@ -288,7 +303,7 @@ ath10k_snoc_driver_event_post(enum ath10k_snoc_driver_event_type type, if (i >= ATH10K_SNOC_DRIVER_EVENT_MAX) i = ATH10K_SNOC_DRIVER_EVENT_SERVER_ARRIVE; - spin_unlock_bh(&qmi_cfg->event_lock); + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); queue_work(qmi_cfg->event_wq, &qmi_cfg->event_work); @@ -304,16 +319,16 @@ ath10k_snoc_driver_event_post(enum ath10k_snoc_driver_event_type type, ath10k_dbg(ar, ATH10K_DBG_SNOC, "Completed event: %s(%d)\n", ath10k_snoc_driver_event_to_str(type), type); - spin_lock_bh(&qmi_cfg->event_lock); + spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags); if (ret == -ERESTARTSYS && qmi_cfg->qmi_ev_list[i].ret == ATH10K_SNOC_EVENT_PENDING) { qmi_cfg->qmi_ev_list[i].sync = false; atomic_set(&qmi_cfg->qmi_ev_list[i].event_handled, 1); - spin_unlock_bh(&qmi_cfg->event_lock); + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); ret = -EINTR; goto out; } - spin_unlock_bh(&qmi_cfg->event_lock); + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); out: return ret; @@ -714,22 +729,23 @@ static int ath10k_snoc_driver_event_fw_ready_ind(struct ath10k *ar) static void ath10k_snoc_driver_event_work(struct work_struct *work) { - struct ath10k_snoc_qmi_driver_event *event; int ret; + unsigned long irq_flags; + struct ath10k_snoc_qmi_driver_event *event; struct ath10k_snoc_qmi_config *qmi_cfg = container_of(work, struct ath10k_snoc_qmi_config, event_work); struct ath10k_snoc *ar_snoc = container_of(qmi_cfg, struct ath10k_snoc, qmi_cfg); struct ath10k *ar = ar_snoc->ar; - spin_lock_bh(&qmi_cfg->event_lock); + spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags); while (!list_empty(&qmi_cfg->event_list)) { event = list_first_entry(&qmi_cfg->event_list, struct ath10k_snoc_qmi_driver_event, list); list_del(&event->list); - spin_unlock_bh(&qmi_cfg->event_lock); + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); ath10k_dbg(ar, ATH10K_DBG_SNOC, "Processing event: %s%s(%d)\n", ath10k_snoc_driver_event_to_str(event->type), @@ -756,17 +772,17 @@ static void ath10k_snoc_driver_event_work(struct work_struct *work) "Event Processed: %s%s(%d), ret: %d\n", ath10k_snoc_driver_event_to_str(event->type), event->sync ? "-sync" : "", event->type, ret); - spin_lock_bh(&qmi_cfg->event_lock); + spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags); if (event->sync) { event->ret = ret; complete(&event->complete); continue; } - spin_unlock_bh(&qmi_cfg->event_lock); - spin_lock_bh(&qmi_cfg->event_lock); + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); + spin_lock_irqsave(&qmi_cfg->event_lock, irq_flags); } - spin_unlock_bh(&qmi_cfg->event_lock); + spin_unlock_irqrestore(&qmi_cfg->event_lock, irq_flags); } static int @@ -838,7 +854,6 @@ int ath10k_snoc_start_qmi_service(struct ath10k *ar) goto out_destroy_wq; } - atomic_set(&qmi_cfg->fw_ready, 1); ath10k_dbg(ar, ATH10K_DBG_SNOC, "QMI service started successfully\n"); return 0; diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h index c8bc26bb96b2..29ad5acdf414 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.h +++ b/drivers/net/wireless/ath/ath10k/qmi.h @@ -18,7 +18,7 @@ #define ATH10K_SNOC_WLAN_FW_READY_TIMEOUT 8000 #define WLFW_SERVICE_INS_ID_V01 0 -#define WLFW_CLIENT_ID 0x4b4e454c +#define WLFW_CLIENT_ID 0x41544851 #define WLFW_TIMEOUT_MS 20000 enum ath10k_snoc_driver_event_type { diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index add0a7cd9edb..2cbc8ee9abf9 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -650,6 +650,9 @@ static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id, if (!ar_snoc) return -EINVAL; + if (atomic_read(&ar_snoc->fw_crashed)) + return -ESHUTDOWN; + snoc_pipe = &ar_snoc->pipe_info[pipe_id]; ce_pipe = snoc_pipe->ce_hdl; src_ring = ce_pipe->src_ring; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 923fe470360c..37898146f01d 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -501,9 +501,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, for (i = 0; i < request->n_ssids; i++) { wil_dbg_misc(wil, "SSID[%d]", i); - print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, - request->ssids[i].ssid, - request->ssids[i].ssid_len); + wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1, + request->ssids[i].ssid, + request->ssids[i].ssid_len, true); } if (request->n_ssids) @@ -540,8 +540,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, } if (request->ie_len) - print_hex_dump_bytes("Scan IE ", DUMP_PREFIX_OFFSET, - request->ie, request->ie_len); + wil_hex_dump_misc("Scan IE ", DUMP_PREFIX_OFFSET, 16, 1, + request->ie, request->ie_len, true); else wil_dbg_misc(wil, "Scan has no IE's\n"); @@ -766,6 +766,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, if (rc == 0) { netif_carrier_on(ndev); wil6210_bus_request(wil, WIL_MAX_BUS_REQUEST_KBPS); + wil->bss = bss; /* Connect can take lots of time */ mod_timer(&wil->connect_timer, jiffies + msecs_to_jiffies(2000)); @@ -794,6 +795,7 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy, return 0; } + wil->locally_generated_disc = true; rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, WMI_DISCONNECT_EVENTID, NULL, 0, WIL6210_DISCONNECT_TO_MS); @@ -847,7 +849,8 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, */ wil_dbg_misc(wil, "mgmt_tx\n"); - print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len); + wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, + len, true); cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); if (!cmd) { @@ -1180,18 +1183,18 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len, static void wil_print_bcon_data(struct cfg80211_beacon_data *b) { - print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET, - b->head, b->head_len); - print_hex_dump_bytes("tail ", DUMP_PREFIX_OFFSET, - b->tail, b->tail_len); - print_hex_dump_bytes("BCON IE ", DUMP_PREFIX_OFFSET, - b->beacon_ies, b->beacon_ies_len); - print_hex_dump_bytes("PROBE ", DUMP_PREFIX_OFFSET, - b->probe_resp, b->probe_resp_len); - print_hex_dump_bytes("PROBE IE ", DUMP_PREFIX_OFFSET, - b->proberesp_ies, b->proberesp_ies_len); - print_hex_dump_bytes("ASSOC IE ", DUMP_PREFIX_OFFSET, - b->assocresp_ies, b->assocresp_ies_len); + wil_hex_dump_misc("head ", DUMP_PREFIX_OFFSET, 16, 1, + b->head, b->head_len, true); + wil_hex_dump_misc("tail ", DUMP_PREFIX_OFFSET, 16, 1, + b->tail, b->tail_len, true); + wil_hex_dump_misc("BCON IE ", DUMP_PREFIX_OFFSET, 16, 1, + b->beacon_ies, b->beacon_ies_len, true); + wil_hex_dump_misc("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, + b->probe_resp, b->probe_resp_len, true); + wil_hex_dump_misc("PROBE IE ", DUMP_PREFIX_OFFSET, 16, 1, + b->proberesp_ies, b->proberesp_ies_len, true); + wil_hex_dump_misc("ASSOC IE ", DUMP_PREFIX_OFFSET, 16, 1, + b->assocresp_ies, b->assocresp_ies_len, true); } /* internal functions for device reset and starting AP */ @@ -1387,8 +1390,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval, info->dtim_period); wil_dbg_misc(wil, "PBSS %d\n", info->pbss); - print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET, - info->ssid, info->ssid_len); + wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1, + info->ssid, info->ssid_len, true); wil_print_bcon_data(bcon); wil_print_crypto(wil, crypto); @@ -1411,6 +1414,8 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS); wil_set_recovery_state(wil, fw_recovery_idle); + set_bit(wil_status_resetting, wil->status); + mutex_lock(&wil->mutex); wmi_pcp_stop(wil); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 54d978d884ff..fca8acffeed5 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -30,8 +30,8 @@ bool debug_fw; /* = false; */ module_param(debug_fw, bool, 0444); MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug"); -static bool oob_mode; -module_param(oob_mode, bool, 0444); +static u8 oob_mode; +module_param(oob_mode, byte, 0444); MODULE_PARM_DESC(oob_mode, " enable out of the box (OOB) mode in FW, for diagnostics and certification"); @@ -190,6 +190,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) break; } sta->status = wil_sta_unused; + sta->fst_link_loss = false; } /* reorder buffers */ for (i = 0; i < WIL_STA_TID_NUM; i++) { @@ -276,11 +277,15 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, if (test_bit(wil_status_fwconnected, wil->status)) { clear_bit(wil_status_fwconnected, wil->status); cfg80211_disconnected(ndev, reason_code, - NULL, 0, false, GFP_KERNEL); + NULL, 0, + wil->locally_generated_disc, + GFP_KERNEL); + wil->locally_generated_disc = false; } else if (test_bit(wil_status_fwconnecting, wil->status)) { cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); + wil->bss = NULL; } clear_bit(wil_status_fwconnecting, wil->status); break; @@ -302,10 +307,34 @@ static void wil_disconnect_worker(struct work_struct *work) { struct wil6210_priv *wil = container_of(work, struct wil6210_priv, disconnect_worker); + struct net_device *ndev = wil_to_ndev(wil); + int rc; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_disconnect_event evt; + } __packed reply; - mutex_lock(&wil->mutex); - _wil6210_disconnect(wil, NULL, WLAN_REASON_UNSPECIFIED, false); - mutex_unlock(&wil->mutex); + if (test_bit(wil_status_fwconnected, wil->status)) + /* connect succeeded after all */ + return; + + if (!test_bit(wil_status_fwconnecting, wil->status)) + /* already disconnected */ + return; + + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, + WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), + WIL6210_DISCONNECT_TO_MS); + if (rc) { + wil_err(wil, "disconnect error %d\n", rc); + return; + } + + wil_update_net_queues_bh(wil, NULL, true); + netif_carrier_off(ndev); + cfg80211_connect_result(ndev, NULL, NULL, 0, NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); + clear_bit(wil_status_fwconnecting, wil->status); } static void wil_connect_timer_fn(ulong x) @@ -614,13 +643,25 @@ static inline void wil_release_cpu(struct wil6210_priv *wil) wil_w(wil, RGF_USER_USER_CPU_0, 1); } -static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable) +static void wil_set_oob_mode(struct wil6210_priv *wil, u8 mode) { - wil_info(wil, "enable=%d\n", enable); - if (enable) + wil_info(wil, "oob_mode to %d\n", mode); + switch (mode) { + case 0: + wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE | + BIT_USER_OOB_R2_MODE); + break; + case 1: + wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE); wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE); - else + break; + case 2: wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE); + wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_R2_MODE); + break; + default: + wil_err(wil, "invalid oob_mode: %d\n", mode); + } } static int wil_target_reset(struct wil6210_priv *wil) @@ -1164,6 +1205,7 @@ void wil_halp_vote(struct wil6210_priv *wil) wil->halp.ref_cnt); if (++wil->halp.ref_cnt == 1) { + reinit_completion(&wil->halp.comp); wil6210_set_halp(wil); rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies); if (!rc) { diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index d472e13fb9d9..2a515e848820 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -26,6 +26,10 @@ static bool use_msi = true; module_param(use_msi, bool, 0444); MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true"); +static bool ftm_mode; +module_param(ftm_mode, bool, 0444); +MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false"); + #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP static int wil6210_pm_notify(struct notifier_block *notify_block, @@ -36,13 +40,15 @@ static int wil6210_pm_notify(struct notifier_block *notify_block, static void wil_set_capabilities(struct wil6210_priv *wil) { + const char *wil_fw_name; u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID); u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) & RGF_USER_REVISION_ID_MASK); bitmap_zero(wil->hw_capabilities, hw_capability_last); bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX); - wil->wil_fw_name = WIL_FW_NAME_DEFAULT; + wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT : + WIL_FW_NAME_DEFAULT; wil->chip_revision = chip_revision; switch (jtag_id) { @@ -51,9 +57,11 @@ void wil_set_capabilities(struct wil6210_priv *wil) case REVISION_ID_SPARROW_D0: wil->hw_name = "Sparrow D0"; wil->hw_version = HW_VER_SPARROW_D0; - if (wil_fw_verify_file_exists(wil, - WIL_FW_NAME_SPARROW_PLUS)) - wil->wil_fw_name = WIL_FW_NAME_SPARROW_PLUS; + wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_SPARROW_PLUS : + WIL_FW_NAME_SPARROW_PLUS; + + if (wil_fw_verify_file_exists(wil, wil_fw_name)) + wil->wil_fw_name = wil_fw_name; break; case REVISION_ID_SPARROW_B0: wil->hw_name = "Sparrow B0"; diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 7260bef314a4..2ae4fe85cc8c 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -71,6 +71,11 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system"); + if (test_bit(wil_status_suspended, wil->status)) { + wil_dbg_pm(wil, "trying to suspend while suspended\n"); + return 0; + } + /* if netif up, hardware is alive, shut it down */ if (ndev->flags & IFF_UP) { rc = wil_down(wil); @@ -86,10 +91,14 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) if (wil->platform_ops.suspend) { rc = wil->platform_ops.suspend(wil->platform_handle); - if (rc) + if (rc) { wil_enable_irq(wil); + goto out; + } } + set_bit(wil_status_suspended, wil->status); + out: wil_dbg_pm(wil, "suspend: %s => %d\n", is_runtime ? "runtime" : "system", rc); @@ -117,10 +126,13 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) /* if netif up, bring hardware up * During open(), IFF_UP set after actual device method - * invocation. This prevent recursive call to wil_up() + * invocation. This prevent recursive call to wil_up(). + * wil_status_suspended will be cleared in wil_reset */ if (ndev->flags & IFF_UP) rc = wil_up(wil); + else + clear_bit(wil_status_suspended, wil->status); out: wil_dbg_pm(wil, "resume: %s => %d\n", diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c index a3689738a070..b4c4d09011b4 100644 --- a/drivers/net/wireless/ath/wil6210/sysfs.c +++ b/drivers/net/wireless/ath/wil6210/sysfs.c @@ -204,9 +204,74 @@ out: static DEVICE_ATTR(thermal_throttling, 0644, wil_tt_sysfs_show, wil_tt_sysfs_store); +static ssize_t +wil_fst_link_loss_sysfs_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + ssize_t len = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(wil->sta); i++) + if (wil->sta[i].status == wil_sta_connected) + len += snprintf(buf + len, PAGE_SIZE - len, + "[%d] %pM %s\n", i, wil->sta[i].addr, + wil->sta[i].fst_link_loss ? + "On" : "Off"); + + return len; +} + +static ssize_t +wil_fst_link_loss_sysfs_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct wil6210_priv *wil = dev_get_drvdata(dev); + u8 addr[ETH_ALEN]; + char *token, *dupbuf, *tmp; + int rc = -EINVAL; + bool fst_link_loss; + + tmp = kmemdup(buf, count + 1, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + tmp[count] = '\0'; + dupbuf = tmp; + + token = strsep(&dupbuf, " "); + if (!token) + goto out; + + /* mac address */ + if (sscanf(token, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &addr[0], &addr[1], &addr[2], + &addr[3], &addr[4], &addr[5]) != 6) + goto out; + + /* On/Off */ + if (strtobool(dupbuf, &fst_link_loss)) + goto out; + + wil_dbg_misc(wil, "set [%pM] with %d\n", addr, fst_link_loss); + + rc = wmi_link_maintain_cfg_write(wil, addr, fst_link_loss); + if (!rc) + rc = count; + +out: + kfree(tmp); + return rc; +} + +static DEVICE_ATTR(fst_link_loss, 0644, + wil_fst_link_loss_sysfs_show, + wil_fst_link_loss_sysfs_store); + static struct attribute *wil6210_sysfs_entries[] = { &dev_attr_ftm_txrx_offset.attr, &dev_attr_thermal_throttling.attr, + &dev_attr_fst_link_loss.attr, NULL }; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 6111ef6408ea..0529d10a8268 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -37,8 +37,13 @@ extern bool debug_fw; extern bool disable_ap_sme; #define WIL_NAME "wil6210" -#define WIL_FW_NAME_DEFAULT "wil6210.fw" /* code Sparrow B0 */ -#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */ + +#define WIL_FW_NAME_DEFAULT "wil6210.fw" +#define WIL_FW_NAME_FTM_DEFAULT "wil6210_ftm.fw" + +#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" +#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw" + #define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */ #define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */ @@ -141,6 +146,7 @@ struct RGF_ICR { #define RGF_USER_USAGE_1 (0x880004) #define RGF_USER_USAGE_6 (0x880018) #define BIT_USER_OOB_MODE BIT(31) + #define BIT_USER_OOB_R2_MODE BIT(30) #define RGF_USER_USAGE_8 (0x880020) #define BIT_USER_PREVENT_DEEP_SLEEP BIT(0) #define BIT_USER_SUPPORT_T_POWER_ON_0 BIT(1) @@ -417,6 +423,7 @@ enum { /* for wil6210_priv.status */ wil_status_irqen, /* FIXME: interrupts enabled - for debug */ wil_status_napi_en, /* NAPI enabled protected by wil->mutex */ wil_status_resetting, /* reset in progress */ + wil_status_suspended, /* suspend completed, device is suspended */ wil_status_last /* keep last */ }; @@ -532,6 +539,7 @@ struct wil_sta_info { struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM]; struct wil_tid_crypto_rx group_crypto_rx; u8 aid; /* 1-254; 0 if unknown/not reported */ + bool fst_link_loss; }; enum { @@ -620,6 +628,8 @@ struct wil6210_priv { u16 channel; /* relevant in AP mode */ int sinfo_gen; u32 ap_isolate; /* no intra-BSS communication */ + struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */ + int locally_generated_disc; /* relevant in STA mode */ /* interrupt moderation */ u32 tx_max_burst_duration; u32 tx_interframe_timeout; @@ -777,6 +787,12 @@ static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val) print_hex_dump_debug("DBG[ WMI]" prefix_str,\ prefix_type, rowsize, \ groupsize, buf, len, ascii) + +#define wil_hex_dump_misc(prefix_str, prefix_type, rowsize, \ + groupsize, buf, len, ascii) \ + print_hex_dump_debug("DBG[MISC]" prefix_str,\ + prefix_type, rowsize, \ + groupsize, buf, len, ascii) #else /* defined(CONFIG_DYNAMIC_DEBUG) */ static inline void wil_hex_dump_txrx(const char *prefix_str, int prefix_type, int rowsize, @@ -789,6 +805,12 @@ void wil_hex_dump_wmi(const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { } + +static inline +void wil_hex_dump_misc(const char *prefix_str, int prefix_type, int rowsize, + int groupsize, const void *buf, size_t len, bool ascii) +{ +} #endif /* defined(CONFIG_DYNAMIC_DEBUG) */ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src, @@ -974,5 +996,9 @@ void wil_ftm_evt_per_dest_res(struct wil6210_priv *wil, void wil_aoa_evt_meas(struct wil6210_priv *wil, struct wmi_aoa_meas_event *evt, int len); +/* link loss */ +int wmi_link_maintain_cfg_write(struct wil6210_priv *wil, + const u8 *addr, + bool fst_link_loss); #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 6a6ba02beba0..41afbdc34c18 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -518,16 +518,16 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) assoc_resp_ielen = 0; } - mutex_lock(&wil->mutex); if (test_bit(wil_status_resetting, wil->status) || !test_bit(wil_status_fwready, wil->status)) { wil_err(wil, "status_resetting, cancel connect event, CID %d\n", evt->cid); - mutex_unlock(&wil->mutex); /* no need for cleanup, wil_reset will do that */ return; } + mutex_lock(&wil->mutex); + if ((wdev->iftype == NL80211_IFTYPE_STATION) || (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { if (!test_bit(wil_status_fwconnecting, wil->status)) { @@ -573,12 +573,16 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) GFP_KERNEL); goto out; } else { - cfg80211_connect_result(ndev, evt->bssid, - assoc_req_ie, assoc_req_ielen, - assoc_resp_ie, assoc_resp_ielen, - WLAN_STATUS_SUCCESS, - GFP_KERNEL); + struct wiphy *wiphy = wil_to_wiphy(wil); + + cfg80211_ref_bss(wiphy, wil->bss); + cfg80211_connect_bss(ndev, evt->bssid, wil->bss, + assoc_req_ie, assoc_req_ielen, + assoc_resp_ie, assoc_resp_ielen, + WLAN_STATUS_SUCCESS, GFP_KERNEL, + NL80211_TIMEOUT_UNSPECIFIED); } + wil->bss = NULL; } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { if (rc) { @@ -627,6 +631,13 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, wil->sinfo_gen++; + if (test_bit(wil_status_resetting, wil->status) || + !test_bit(wil_status_fwready, wil->status)) { + wil_err(wil, "status_resetting, cancel disconnect event\n"); + /* no need for cleanup, wil_reset will do that */ + return; + } + mutex_lock(&wil->mutex); wil6210_disconnect(wil, evt->bssid, reason_code, true); mutex_unlock(&wil->mutex); @@ -1524,6 +1535,7 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason); + wil->locally_generated_disc = true; if (del_sta) { ether_addr_copy(del_sta_cmd.dst_mac, mac); rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd, @@ -1841,6 +1853,61 @@ void wmi_event_flush(struct wil6210_priv *wil) spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); } +int wmi_link_maintain_cfg_write(struct wil6210_priv *wil, + const u8 *addr, + bool fst_link_loss) +{ + int rc; + int cid = wil_find_cid(wil, addr); + u32 cfg_type; + struct wmi_link_maintain_cfg_write_cmd cmd; + struct { + struct wmi_cmd_hdr wmi; + struct wmi_link_maintain_cfg_write_done_event evt; + } __packed reply; + + if (cid < 0) + return cid; + + switch (wil->wdev->iftype) { + case NL80211_IFTYPE_STATION: + cfg_type = fst_link_loss ? + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_STA : + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_STA; + break; + case NL80211_IFTYPE_AP: + cfg_type = fst_link_loss ? + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_FST_AP : + WMI_LINK_MAINTAIN_CFG_TYPE_DEFAULT_NORMAL_AP; + break; + default: + wil_err(wil, "Unsupported for iftype %d", wil->wdev->iftype); + return -EINVAL; + } + + wil_dbg_misc(wil, "Setting cid:%d with cfg_type:%d\n", cid, cfg_type); + + cmd.cfg_type = cpu_to_le32(cfg_type); + cmd.cid = cpu_to_le32(cid); + + reply.evt.status = cpu_to_le32(WMI_FW_STATUS_FAILURE); + + rc = wmi_call(wil, WMI_LINK_MAINTAIN_CFG_WRITE_CMDID, &cmd, sizeof(cmd), + WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENTID, &reply, + sizeof(reply), 250); + if (rc) { + wil_err(wil, "Failed to %s FST link loss", + fst_link_loss ? "enable" : "disable"); + } else if (reply.evt.status == WMI_FW_STATUS_SUCCESS) { + wil->sta[cid].fst_link_loss = fst_link_loss; + } else { + wil_err(wil, "WMI_LINK_MAINTAIN_CFG_WRITE_CMDID returned status %d", + reply.evt.status); + rc = -EINVAL; + } + return rc; +} + static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id, void *d, int len) { diff --git a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c index e93416ebd343..09c37c2383c6 100644 --- a/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c +++ b/drivers/net/wireless/cnss_prealloc/cnss_prealloc.c @@ -181,7 +181,6 @@ void *wcnss_prealloc_get(unsigned int size) pr_err("wcnss: %s: prealloc not available for size: %d\n", __func__, size); - WARN_ON(1); return NULL; } diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c index 3f9eeabc5464..9db2871e8150 100644 --- a/drivers/net/wireless/wcnss/wcnss_wlan.c +++ b/drivers/net/wireless/wcnss/wcnss_wlan.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -192,6 +192,8 @@ static DEFINE_SPINLOCK(reg_spinlock); #define WCNSS_USR_WLAN_MAC_ADDR (WCNSS_USR_CTRL_MSG_START + 3) #define MAC_ADDRESS_STR "%02x:%02x:%02x:%02x:%02x:%02x" +#define SHOW_MAC_ADDRESS_STR "%02x:%02x:%02x:%02x:%02x:%02x\n" +#define WCNSS_USER_MAC_ADDR_LENGTH 18 /* message types */ #define WCNSS_CTRL_MSG_START 0x01000000 @@ -427,23 +429,28 @@ static struct { static ssize_t wcnss_wlan_macaddr_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - char macAddr[WLAN_MAC_ADDR_SIZE]; + int index; + int macAddr[WLAN_MAC_ADDR_SIZE]; if (!penv) return -ENODEV; - pr_debug("%s: Receive MAC Addr From user space: %s\n", __func__, buf); + if (strlen(buf) != WCNSS_USER_MAC_ADDR_LENGTH) { + dev_err(dev, "%s: Invalid MAC addr length\n", __func__); + return -EINVAL; + } if (WLAN_MAC_ADDR_SIZE != sscanf(buf, MAC_ADDRESS_STR, - (int *)&macAddr[0], (int *)&macAddr[1], - (int *)&macAddr[2], (int *)&macAddr[3], - (int *)&macAddr[4], (int *)&macAddr[5])) { - + &macAddr[0], &macAddr[1], &macAddr[2], + &macAddr[3], &macAddr[4], &macAddr[5])) { pr_err("%s: Failed to Copy MAC\n", __func__); return -EINVAL; } - memcpy(penv->wlan_nv_macAddr, macAddr, sizeof(penv->wlan_nv_macAddr)); + for (index = 0; index < WLAN_MAC_ADDR_SIZE; index++) { + memcpy(&penv->wlan_nv_macAddr[index], + (char *)&macAddr[index], sizeof(char)); + } pr_info("%s: Write MAC Addr:" MAC_ADDRESS_STR "\n", __func__, penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1], @@ -459,7 +466,7 @@ static ssize_t wcnss_wlan_macaddr_show(struct device *dev, if (!penv) return -ENODEV; - return scnprintf(buf, PAGE_SIZE, MAC_ADDRESS_STR, + return scnprintf(buf, PAGE_SIZE, SHOW_MAC_ADDRESS_STR, penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1], penv->wlan_nv_macAddr[2], penv->wlan_nv_macAddr[3], penv->wlan_nv_macAddr[4], penv->wlan_nv_macAddr[5]); diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index eade4f85632a..f364882943e1 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -24,6 +24,7 @@ #include <linux/kernel.h> #include <linux/of_pci.h> #include <linux/pci.h> +#include <linux/iommu.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/regulator/rpm-smd-regulator.h> @@ -480,6 +481,11 @@ enum msm_pcie_link_status { MSM_PCIE_LINK_DISABLED }; +enum msm_pcie_boot_option { + MSM_PCIE_NO_PROBE_ENUMERATION = BIT(0), + MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1) +}; + /* gpio info structure */ struct msm_pcie_gpio_info_t { char *name; @@ -628,7 +634,7 @@ struct msm_pcie_dev_t { uint32_t perst_delay_us_max; uint32_t tlp_rd_size; bool linkdown_panic; - bool ep_wakeirq; + uint32_t boot_option; uint32_t rc_idx; uint32_t phy_ver; @@ -1946,8 +1952,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev) dev->aer_enable ? "" : "not"); PCIE_DBG_FS(dev, "ext_ref_clk is %d\n", dev->ext_ref_clk); - PCIE_DBG_FS(dev, "ep_wakeirq is %d\n", - dev->ep_wakeirq); + PCIE_DBG_FS(dev, "boot_option is 0x%x\n", + dev->boot_option); PCIE_DBG_FS(dev, "phy_ver is %d\n", dev->phy_ver); PCIE_DBG_FS(dev, "drv_ready is %d\n", @@ -2562,7 +2568,7 @@ static struct dentry *dfile_linkdown_panic; static struct dentry *dfile_wr_offset; static struct dentry *dfile_wr_mask; static struct dentry *dfile_wr_value; -static struct dentry *dfile_ep_wakeirq; +static struct dentry *dfile_boot_option; static struct dentry *dfile_aer_enable; static struct dentry *dfile_corr_counter_limit; @@ -2831,13 +2837,13 @@ const struct file_operations msm_pcie_wr_value_ops = { .write = msm_pcie_set_wr_value, }; -static ssize_t msm_pcie_set_ep_wakeirq(struct file *file, +static ssize_t msm_pcie_set_boot_option(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long ret; char str[MAX_MSG_LEN]; - u32 new_ep_wakeirq = 0; + u32 new_boot_option = 0; int i; memset(str, 0, sizeof(str)); @@ -2846,33 +2852,33 @@ static ssize_t msm_pcie_set_ep_wakeirq(struct file *file, return -EFAULT; for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i) - new_ep_wakeirq = (new_ep_wakeirq * 10) + (str[i] - '0'); + new_boot_option = (new_boot_option * 10) + (str[i] - '0'); - if (new_ep_wakeirq <= 1) { + if (new_boot_option <= 1) { for (i = 0; i < MAX_RC_NUM; i++) { if (!rc_sel) { - msm_pcie_dev[0].ep_wakeirq = new_ep_wakeirq; + msm_pcie_dev[0].boot_option = new_boot_option; PCIE_DBG_FS(&msm_pcie_dev[0], - "PCIe: RC0: ep_wakeirq is now %d\n", - msm_pcie_dev[0].ep_wakeirq); + "PCIe: RC0: boot_option is now 0x%x\n", + msm_pcie_dev[0].boot_option); break; } else if (rc_sel & (1 << i)) { - msm_pcie_dev[i].ep_wakeirq = new_ep_wakeirq; + msm_pcie_dev[i].boot_option = new_boot_option; PCIE_DBG_FS(&msm_pcie_dev[i], - "PCIe: RC%d: ep_wakeirq is now %d\n", - i, msm_pcie_dev[i].ep_wakeirq); + "PCIe: RC%d: boot_option is now 0x%x\n", + i, msm_pcie_dev[i].boot_option); } } } else { - pr_err("PCIe: Invalid input for ep_wakeirq: %d. Please enter 0 or 1.\n", - new_ep_wakeirq); + pr_err("PCIe: Invalid input for boot_option: 0x%x.\n", + new_boot_option); } return count; } -const struct file_operations msm_pcie_ep_wakeirq_ops = { - .write = msm_pcie_set_ep_wakeirq, +const struct file_operations msm_pcie_boot_option_ops = { + .write = msm_pcie_set_boot_option, }; static ssize_t msm_pcie_set_aer_enable(struct file *file, @@ -3025,12 +3031,12 @@ static void msm_pcie_debugfs_init(void) goto wr_value_error; } - dfile_ep_wakeirq = debugfs_create_file("ep_wakeirq", 0664, + dfile_boot_option = debugfs_create_file("boot_option", 0664, dent_msm_pcie, 0, - &msm_pcie_ep_wakeirq_ops); - if (!dfile_ep_wakeirq || IS_ERR(dfile_ep_wakeirq)) { - pr_err("PCIe: fail to create the file for debug_fs ep_wakeirq.\n"); - goto ep_wakeirq_error; + &msm_pcie_boot_option_ops); + if (!dfile_boot_option || IS_ERR(dfile_boot_option)) { + pr_err("PCIe: fail to create the file for debug_fs boot_option.\n"); + goto boot_option_error; } dfile_aer_enable = debugfs_create_file("aer_enable", 0664, @@ -3053,8 +3059,8 @@ static void msm_pcie_debugfs_init(void) corr_counter_limit_error: debugfs_remove(dfile_aer_enable); aer_enable_error: - debugfs_remove(dfile_ep_wakeirq); -ep_wakeirq_error: + debugfs_remove(dfile_boot_option); +boot_option_error: debugfs_remove(dfile_wr_value); wr_value_error: debugfs_remove(dfile_wr_mask); @@ -3081,7 +3087,7 @@ static void msm_pcie_debugfs_exit(void) debugfs_remove(dfile_wr_offset); debugfs_remove(dfile_wr_mask); debugfs_remove(dfile_wr_value); - debugfs_remove(dfile_ep_wakeirq); + debugfs_remove(dfile_boot_option); debugfs_remove(dfile_aer_enable); debugfs_remove(dfile_corr_counter_limit); } @@ -3312,7 +3318,7 @@ static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper, word_offset = where & ~0x3; byte_offset = where & 0x3; - mask = (~0 >> (8 * (4 - size))) << (8 * byte_offset); + mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset); if (rc || !dev->enumerated) { config_base = rc ? dev->dm_core : dev->conf; @@ -3347,12 +3353,17 @@ static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper, writel_relaxed(wr_val, config_base + word_offset); wmb(); /* ensure config data is written to hardware register */ - if (rd_val == PCIE_LINK_DOWN) - PCIE_ERR(dev, - "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n", - rc_idx, bus->number, devfn, where, size); - else if (dev->shadow_en) - msm_pcie_save_shadow(dev, word_offset, wr_val, bdf, rc); + if (dev->shadow_en) { + if (rd_val == PCIE_LINK_DOWN && + (readl_relaxed(config_base) == PCIE_LINK_DOWN)) + PCIE_ERR(dev, + "Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n", + rc_idx, bus->number, devfn, + where, size); + else + msm_pcie_save_shadow(dev, word_offset, wr_val, + bdf, rc); + } PCIE_DBG3(dev, "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n", @@ -5410,14 +5421,10 @@ static irqreturn_t handle_wake_irq(int irq, void *data) PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n", dev->rc_idx); - if (!dev->enumerated) { - PCIE_DBG(dev, "Start enumeating RC%d\n", dev->rc_idx); - if (dev->ep_wakeirq) - schedule_work(&dev->handle_wake_work); - else - PCIE_DBG(dev, - "wake irq is received but ep_wakeirq is not supported for RC%d.\n", - dev->rc_idx); + if (!dev->enumerated && !(dev->boot_option & + MSM_PCIE_NO_WAKE_ENUMERATION)) { + PCIE_DBG(dev, "Start enumerating RC%d\n", dev->rc_idx); + schedule_work(&dev->handle_wake_work); } else { PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx); __pm_stay_awake(&dev->ws); @@ -5561,7 +5568,7 @@ static irqreturn_t handle_global_irq(int irq, void *data) handle_aer_irq(irq, data); break; default: - PCIE_ERR(dev, + PCIE_DUMP(dev, "PCIe: RC%d: Unexpected event %d is caught!\n", dev->rc_idx, i); } @@ -5573,34 +5580,84 @@ static irqreturn_t handle_global_irq(int irq, void *data) return IRQ_HANDLED; } -void msm_pcie_destroy_irq(unsigned int irq, struct msm_pcie_dev_t *pcie_dev) +static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev, + struct pci_dev *pdev) { - int pos, i; + struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev); + int bypass_en = 0; + + if (!domain) { + PCIE_DBG(dev, + "PCIe: RC%d: client does not have an iommu domain\n", + dev->rc_idx); + return; + } + + iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en); + if (!bypass_en) { + int ret; + phys_addr_t pcie_base_addr = + dev->res[MSM_PCIE_RES_DM_CORE].resource->start; + dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE); + + ret = iommu_unmap(domain, iova, PAGE_SIZE); + if (ret != PAGE_SIZE) + PCIE_ERR(dev, + "PCIe: RC%d: failed to unmap QGIC address. ret = %d\n", + dev->rc_idx, ret); + } +} + +void msm_pcie_destroy_irq(unsigned int irq) +{ + int pos; + struct pci_dev *pdev = irq_get_chip_data(irq); + struct msi_desc *entry = irq_get_msi_desc(irq); + struct msi_desc *firstentry; struct msm_pcie_dev_t *dev; + u32 nvec; + int firstirq; - if (pcie_dev) - dev = pcie_dev; - else - dev = irq_get_chip_data(irq); + if (!pdev) { + pr_err("PCIe: pci device is null. IRQ:%d\n", irq); + return; + } + dev = PCIE_BUS_PRIV_DATA(pdev->bus); if (!dev) { - pr_err("PCIe: device is null. IRQ:%d\n", irq); + pr_err("PCIe: could not find RC. IRQ:%d\n", irq); + return; + } + + if (!entry) { + PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n", + dev->rc_idx, irq); + return; + } + + firstentry = first_pci_msi_entry(pdev); + if (!firstentry) { + PCIE_ERR(dev, + "PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n", + dev->rc_idx, irq); return; } + firstirq = firstentry->irq; + nvec = (1 << entry->msi_attrib.multiple); + if (dev->msi_gicm_addr) { PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq); - for (i = 0; i < MSM_PCIE_MAX_MSI; i++) - if (irq == dev->msi[i].num) - break; - if (i == MSM_PCIE_MAX_MSI) { + if (irq < firstirq || irq > firstirq + nvec - 1) { PCIE_ERR(dev, "Could not find irq: %d in RC%d MSI table\n", irq, dev->rc_idx); return; } else { - pos = i; + if (irq == firstirq + nvec - 1) + msm_pcie_unmap_qgic_addr(dev, pdev); + pos = irq - firstirq; } } else { PCIE_DBG(dev, "destroy default MSI irq %d\n", irq); @@ -5620,7 +5677,7 @@ void msm_pcie_destroy_irq(unsigned int irq, struct msm_pcie_dev_t *pcie_dev) void arch_teardown_msi_irq(unsigned int irq) { PCIE_GEN_DBG("irq %d deallocated\n", irq); - msm_pcie_destroy_irq(irq, NULL); + msm_pcie_destroy_irq(irq); } void arch_teardown_msi_irqs(struct pci_dev *dev) @@ -5639,7 +5696,7 @@ void arch_teardown_msi_irqs(struct pci_dev *dev) continue; nvec = 1 << entry->msi_attrib.multiple; for (i = 0; i < nvec; i++) - msm_pcie_destroy_irq(entry->irq + i, pcie_dev); + arch_teardown_msi_irq(entry->irq + i); } } @@ -5701,6 +5758,7 @@ static int arch_setup_msi_irq_default(struct pci_dev *pdev, PCIE_DBG(dev, "irq %d allocated\n", irq); + irq_set_chip_data(irq, pdev); irq_set_msi_desc(irq, desc); /* write msi vector and data */ @@ -5748,10 +5806,76 @@ again: return irq; } +static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev, + struct pci_dev *pdev, + struct msi_msg *msg) +{ + struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev); + struct iommu_domain_geometry geometry; + int ret, fastmap_en = 0, bypass_en = 0; + dma_addr_t iova; + phys_addr_t gicm_db_offset; + + msg->address_hi = 0; + msg->address_lo = dev->msi_gicm_addr; + + if (!domain) { + PCIE_DBG(dev, + "PCIe: RC%d: client does not have an iommu domain\n", + dev->rc_idx); + return 0; + } + + iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en); + + PCIE_DBG(dev, + "PCIe: RC%d: Stage 1 is %s for endpoint: %04x:%02x\n", + dev->rc_idx, bypass_en ? "bypass" : "enabled", + pdev->bus->number, pdev->devfn); + + if (bypass_en) + return 0; + + iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap_en); + if (fastmap_en) { + iommu_domain_get_attr(domain, DOMAIN_ATTR_GEOMETRY, &geometry); + iova = geometry.aperture_start; + PCIE_DBG(dev, + "PCIe: RC%d: Use client's IOVA 0x%llx to map QGIC MSI address\n", + dev->rc_idx, iova); + } else { + phys_addr_t pcie_base_addr; + + /* + * Use PCIe DBI address as the IOVA since client cannot + * use this address for their IOMMU mapping. This will + * prevent any conflicts between PCIe host and + * client's mapping. + */ + pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start; + iova = rounddown(pcie_base_addr, PAGE_SIZE); + } + + ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE), + PAGE_SIZE, IOMMU_READ | IOMMU_WRITE); + if (ret < 0) { + PCIE_ERR(dev, + "PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n", + dev->rc_idx, ret); + return -ENOMEM; + } + + gicm_db_offset = dev->msi_gicm_addr - + rounddown(dev->msi_gicm_addr, PAGE_SIZE); + msg->address_lo = iova + gicm_db_offset; + + return 0; +} + static int arch_setup_msi_irq_qgic(struct pci_dev *pdev, struct msi_desc *desc, int nvec) { - int irq, index, firstirq = 0; + int irq, index, ret, firstirq = 0; struct msi_msg msg; struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus); @@ -5768,12 +5892,16 @@ static int arch_setup_msi_irq_qgic(struct pci_dev *pdev, firstirq = irq; irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING); + irq_set_chip_data(irq, pdev); } /* write msi vector and data */ irq_set_msi_desc(firstirq, desc); - msg.address_hi = 0; - msg.address_lo = dev->msi_gicm_addr; + + ret = msm_pcie_map_qgic_addr(dev, pdev, &msg); + if (ret) + return ret; + msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num); write_msi_msg(firstirq, &msg); @@ -5845,7 +5973,6 @@ static int msm_pcie_msi_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); return 0; } @@ -6091,12 +6218,12 @@ static int msm_pcie_probe(struct platform_device *pdev) msm_pcie_dev[rc_idx].rc_idx, msm_pcie_dev[rc_idx].smmu_sid_base); - msm_pcie_dev[rc_idx].ep_wakeirq = - of_property_read_bool((&pdev->dev)->of_node, - "qcom,ep-wakeirq"); + msm_pcie_dev[rc_idx].boot_option = 0; + ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,boot-option", + &msm_pcie_dev[rc_idx].boot_option); PCIE_DBG(&msm_pcie_dev[rc_idx], - "PCIe: EP of RC%d does %s assert wake when it is up.\n", - rc_idx, msm_pcie_dev[rc_idx].ep_wakeirq ? "" : "not"); + "PCIe: RC%d boot option is 0x%x.\n", + rc_idx, msm_pcie_dev[rc_idx].boot_option); msm_pcie_dev[rc_idx].phy_ver = 1; ret = of_property_read_u32((&pdev->dev)->of_node, @@ -6375,9 +6502,10 @@ static int msm_pcie_probe(struct platform_device *pdev) msm_pcie_dev[rc_idx].drv_ready = true; - if (msm_pcie_dev[rc_idx].ep_wakeirq) { + if (msm_pcie_dev[rc_idx].boot_option & + MSM_PCIE_NO_PROBE_ENUMERATION) { PCIE_DBG(&msm_pcie_dev[rc_idx], - "PCIe: RC%d will be enumerated upon WAKE signal from Endpoint.\n", + "PCIe: RC%d will be enumerated by client or endpoint.\n", rc_idx); mutex_unlock(&pcie_drv.drv_lock); return 0; diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 4c75b4d392c6..39400dda27c2 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -367,6 +367,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) return err; } + armpmu->pmu_state = ARM_PMU_STATE_RUNNING; + return 0; } @@ -601,10 +603,12 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) struct platform_device *pmu_device = cpu_pmu->plat_device; struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events; + cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN; + irqs = min(pmu_device->num_resources, num_possible_cpus()); irq = platform_get_irq(pmu_device, 0); - if (irq >= 0 && irq_is_percpu(irq)) { + if (irq > 0 && irq_is_percpu(irq)) { on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); free_percpu_irq(irq, &hw_events->percpu_pmu); } else { @@ -617,10 +621,11 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); - if (irq >= 0) + if (irq > 0) free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); } } + cpu_pmu->pmu_state = ARM_PMU_STATE_OFF; } static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) @@ -639,7 +644,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) } irq = platform_get_irq(pmu_device, 0); - if (irq >= 0 && irq_is_percpu(irq)) { + if (irq > 0 && irq_is_percpu(irq)) { err = request_percpu_irq(irq, handler, "arm-pmu", &hw_events->percpu_pmu); if (err) { @@ -648,6 +653,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) return err; } on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); + cpu_pmu->percpu_irq = irq; } else { for (i = 0; i < irqs; ++i) { int cpu = i; @@ -754,13 +760,6 @@ static void cpu_pm_pmu_common(void *info) return; } - /* - * Always reset the PMU registers on power-up even if - * there are no events running. - */ - if (cmd == CPU_PM_EXIT && armpmu->reset) - armpmu->reset(armpmu); - if (!enabled) { data->ret = NOTIFY_OK; return; @@ -795,6 +794,13 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, .cpu = smp_processor_id(), }; + /* + * Always reset the PMU registers on power-up even if + * there are no events running. + */ + if (cmd == CPU_PM_EXIT && data.armpmu->reset) + data.armpmu->reset(data.armpmu); + cpu_pm_pmu_common(&data); return data.ret; } @@ -824,6 +830,7 @@ static inline void cpu_pm_pmu_common(void *info) { } static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, void *hcpu) { + int irq = -1; unsigned long masked_action = (action & ~CPU_TASKS_FROZEN); struct cpu_pm_pmu_args data = { .armpmu = container_of(b, struct arm_pmu, hotplug_nb), @@ -835,37 +842,37 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, switch (masked_action) { case CPU_STARTING: - data.cmd = CPU_PM_EXIT; - break; - case CPU_DYING: - data.cmd = CPU_PM_ENTER; - break; case CPU_DOWN_FAILED: - data.cmd = CPU_PM_ENTER_FAILED; - break; - case CPU_ONLINE: - if (data.armpmu->plat_device) { - struct platform_device *pmu_device = - data.armpmu->plat_device; - int irq = platform_get_irq(pmu_device, 0); - - if (irq >= 0 && irq_is_percpu(irq)) { - smp_call_function_single(data.cpu, - cpu_pmu_enable_percpu_irq, &irq, 1); - } + /* + * Always reset the PMU registers on power-up even if + * there are no events running. + */ + if (data.armpmu->reset) + data.armpmu->reset(data.armpmu); + if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING) { + if (data.armpmu->plat_device) + irq = data.armpmu->percpu_irq; + /* Arm the PMU IRQ before appearing. */ + if (irq > 0 && irq_is_percpu(irq)) + cpu_pmu_enable_percpu_irq(&irq); + data.cmd = CPU_PM_EXIT; + cpu_pm_pmu_common(&data); } - return NOTIFY_DONE; + return NOTIFY_OK; + case CPU_DYING: + if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF) { + data.cmd = CPU_PM_ENTER; + cpu_pm_pmu_common(&data); + /* Disarm the PMU IRQ before disappearing. */ + if (data.armpmu->plat_device) + irq = data.armpmu->percpu_irq; + if (irq > 0 && irq_is_percpu(irq)) + cpu_pmu_disable_percpu_irq(&irq); + } + return NOTIFY_OK; default: return NOTIFY_DONE; } - - if (smp_processor_id() == data.cpu) - cpu_pm_pmu_common(&data); - else - smp_call_function_single(data.cpu, - cpu_pm_pmu_common, &data, 1); - - return data.ret; } static int cpu_pmu_init(struct arm_pmu *cpu_pmu) @@ -966,7 +973,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) /* Check the IRQ type and prohibit a mix of PPIs and SPIs */ irq = platform_get_irq(pdev, i); - if (irq >= 0) { + if (irq > 0) { bool spi = !irq_is_percpu(irq); if (i > 0 && spi != using_spi) { @@ -1085,6 +1092,9 @@ int arm_pmu_device_probe(struct platform_device *pdev, if (ret) goto out_destroy; + pmu->pmu_state = ARM_PMU_STATE_OFF; + pmu->percpu_irq = -1; + pr_info("enabled with %s PMU driver, %d counters available\n", pmu->name, pmu->num_events); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c index 553480660722..f01743d04e84 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -1584,6 +1584,7 @@ static int ipa_init_smem_region(int memory_region_size, struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; struct ipa_desc desc; struct ipa_mem_buffer mem; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); int rc; if (memory_region_size == 0) @@ -1603,7 +1604,7 @@ static int ipa_init_smem_region(int memory_region_size, memset(mem.base, 0, mem.size); cmd = kzalloc(sizeof(*cmd), - GFP_KERNEL); + flag); if (cmd == NULL) { IPAERR("Failed to alloc immediate command object\n"); rc = -ENOMEM; @@ -2166,6 +2167,7 @@ int _ipa_init_sram_v2(void) struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL; struct ipa_desc desc = {0}; struct ipa_mem_buffer mem; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); int rc = 0; phys_addr = ipa_ctx->ipa_wrapper_base + @@ -2203,7 +2205,7 @@ int _ipa_init_sram_v2(void) } memset(mem.base, 0, mem.size); - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), flag); if (cmd == NULL) { IPAERR("Failed to alloc immediate command object\n"); rc = -ENOMEM; @@ -2314,6 +2316,7 @@ int _ipa_init_hdr_v2(void) struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; struct ipa_hdr_init_local *cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); int rc = 0; mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); @@ -2325,7 +2328,7 @@ int _ipa_init_hdr_v2(void) } memset(mem.base, 0, mem.size); - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), flag); if (cmd == NULL) { IPAERR("Failed to alloc header init command object\n"); rc = -ENOMEM; @@ -2360,6 +2363,7 @@ int _ipa_init_hdr_v2_5(void) struct ipa_mem_buffer mem; struct ipa_hdr_init_local *cmd = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size); mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base, @@ -2370,7 +2374,7 @@ int _ipa_init_hdr_v2_5(void) } memset(mem.base, 0, mem.size); - cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + cmd = kzalloc(sizeof(*cmd), flag); if (cmd == NULL) { IPAERR("Failed to alloc header init command object\n"); dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, @@ -2411,7 +2415,7 @@ int _ipa_init_hdr_v2_5(void) memset(mem.base, 0, mem.size); memset(&desc, 0, sizeof(desc)); - dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_KERNEL); + dma_cmd = kzalloc(sizeof(*dma_cmd), flag); if (dma_cmd == NULL) { IPAERR("Failed to alloc immediate command object\n"); dma_free_coherent(ipa_ctx->pdev, @@ -2462,6 +2466,7 @@ int _ipa_init_rt4_v2(void) struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; struct ipa_ip_v4_routing_init *v4_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); u32 *entry; int i; int rc = 0; @@ -2486,7 +2491,7 @@ int _ipa_init_rt4_v2(void) entry++; } - v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL); + v4_cmd = kzalloc(sizeof(*v4_cmd), flag); if (v4_cmd == NULL) { IPAERR("Failed to alloc v4 routing init command object\n"); rc = -ENOMEM; @@ -2522,6 +2527,7 @@ int _ipa_init_rt6_v2(void) struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; struct ipa_ip_v6_routing_init *v6_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); u32 *entry; int i; int rc = 0; @@ -2546,7 +2552,7 @@ int _ipa_init_rt6_v2(void) entry++; } - v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL); + v6_cmd = kzalloc(sizeof(*v6_cmd), flag); if (v6_cmd == NULL) { IPAERR("Failed to alloc v6 routing init command object\n"); rc = -ENOMEM; @@ -2582,6 +2588,7 @@ int _ipa_init_flt4_v2(void) struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; struct ipa_ip_v4_filter_init *v4_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); u32 *entry; int i; int rc = 0; @@ -2604,7 +2611,7 @@ int _ipa_init_flt4_v2(void) entry++; } - v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL); + v4_cmd = kzalloc(sizeof(*v4_cmd), flag); if (v4_cmd == NULL) { IPAERR("Failed to alloc v4 fliter init command object\n"); rc = -ENOMEM; @@ -2640,6 +2647,7 @@ int _ipa_init_flt6_v2(void) struct ipa_desc desc = { 0 }; struct ipa_mem_buffer mem; struct ipa_ip_v6_filter_init *v6_cmd = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); u32 *entry; int i; int rc = 0; @@ -2662,7 +2670,7 @@ int _ipa_init_flt6_v2(void) entry++; } - v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL); + v6_cmd = kzalloc(sizeof(*v6_cmd), flag); if (v6_cmd == NULL) { IPAERR("Failed to alloc v6 fliter init command object\n"); rc = -ENOMEM; @@ -3848,11 +3856,8 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p, } ipa_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0); - if (ipa_ctx->logbuf == NULL) { - IPAERR("failed to get logbuf\n"); - result = -ENOMEM; - goto fail_logbuf; - } + if (ipa_ctx->logbuf == NULL) + IPAERR("failed to create IPC log, continue...\n"); ipa_ctx->pdev = ipa_dev; ipa_ctx->uc_pdev = ipa_dev; @@ -4390,8 +4395,8 @@ fail_bus_reg: fail_bind: kfree(ipa_ctx->ctrl); fail_mem_ctrl: - ipc_log_context_destroy(ipa_ctx->logbuf); -fail_logbuf: + if (ipa_ctx->logbuf) + ipc_log_context_destroy(ipa_ctx->logbuf); kfree(ipa_ctx); ipa_ctx = NULL; fail_mem_ctx: diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c index 25364e8efa38..2fdb20d99ce2 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c @@ -322,8 +322,8 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc, dma_address = desc->dma_address; tx_pkt->no_unmap_dma = true; } - if (!dma_address) { - IPAERR("failed to DMA wrap\n"); + if (dma_mapping_error(ipa_ctx->pdev, dma_address)) { + IPAERR("dma_map_single failed\n"); goto fail_dma_map; } @@ -445,7 +445,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc, } dma_addr = dma_map_single(ipa_ctx->pdev, transfer.iovec, size, DMA_TO_DEVICE); - if (!dma_addr) { + if (dma_mapping_error(ipa_ctx->pdev, dma_addr)) { IPAERR("dma_map_single failed for sps xfr buff\n"); kfree(transfer.iovec); return -EFAULT; @@ -493,6 +493,15 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc, tx_pkt->mem.base, tx_pkt->mem.size, DMA_TO_DEVICE); + + if (dma_mapping_error(ipa_ctx->pdev, + tx_pkt->mem.phys_base)) { + IPAERR("dma_map_single "); + IPAERR("failed\n"); + fail_dma_wrap = 1; + goto failure; + } + } else { tx_pkt->mem.phys_base = desc[i].dma_address; tx_pkt->no_unmap_dma = true; @@ -1874,8 +1883,8 @@ begin: rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, sys->rx_buff_sz, DMA_FROM_DEVICE); - if (rx_pkt->data.dma_addr == 0 || - rx_pkt->data.dma_addr == ~0) { + if (dma_mapping_error(ipa_ctx->pdev, + rx_pkt->data.dma_addr)) { pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n", __func__, (void *)rx_pkt->data.dma_addr, ptr, sys); @@ -2030,8 +2039,8 @@ static void ipa_alloc_wlan_rx_common_cache(u32 size) ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ); rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE); - if (rx_pkt->data.dma_addr == 0 || - rx_pkt->data.dma_addr == ~0) { + if (dma_mapping_error(ipa_ctx->pdev, + rx_pkt->data.dma_addr)) { IPAERR("dma_map_single failure %p for %p\n", (void *)rx_pkt->data.dma_addr, ptr); goto fail_dma_mapping; @@ -2102,8 +2111,8 @@ static void ipa_replenish_rx_cache(struct ipa_sys_context *sys) rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, sys->rx_buff_sz, DMA_FROM_DEVICE); - if (rx_pkt->data.dma_addr == 0 || - rx_pkt->data.dma_addr == ~0) { + if (dma_mapping_error(ipa_ctx->pdev, + rx_pkt->data.dma_addr)) { IPAERR("dma_map_single failure %p for %p\n", (void *)rx_pkt->data.dma_addr, ptr); goto fail_dma_mapping; @@ -2160,9 +2169,10 @@ static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys) ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz); rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr, sys->rx_buff_sz, DMA_FROM_DEVICE); - if (rx_pkt->data.dma_addr == 0 || - rx_pkt->data.dma_addr == ~0) + if (dma_mapping_error(ipa_ctx->pdev, rx_pkt->data.dma_addr)) { + IPAERR("dma_map_single failure for rx_pkt\n"); goto fail_dma_mapping; + } list_add_tail(&rx_pkt->link, &sys->head_desc_list); rx_len_cached = ++sys->len; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c index e23de3f26613..f43981f15c31 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c @@ -268,6 +268,7 @@ int __ipa_commit_hdr_v2(void) struct ipa_mem_buffer mem; struct ipa_hdr_init_system *cmd = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL; + gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); int rc = -EFAULT; if (ipa_generate_hdr_hw_tbl(&mem)) { @@ -281,7 +282,7 @@ int __ipa_commit_hdr_v2(void) IPA_MEM_PART(apps_hdr_size)); goto fail_send_cmd; } else { - dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_ATOMIC); + dma_cmd = kzalloc(sizeof(*dma_cmd), flag); if (dma_cmd == NULL) { IPAERR("fail to alloc immediate cmd\n"); rc = -ENOMEM; @@ -303,7 +304,7 @@ int __ipa_commit_hdr_v2(void) IPA_MEM_PART(apps_hdr_size_ddr)); goto fail_send_cmd; } else { - cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); + cmd = kzalloc(sizeof(*cmd), flag); if (cmd == NULL) { IPAERR("fail to alloc hdr init cmd\n"); rc = -ENOMEM; @@ -359,6 +360,7 @@ int __ipa_commit_hdr_v2_5(void) struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_hdr = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_ctx = NULL; struct ipa_register_write *reg_write_cmd = NULL; + gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); int rc = -EFAULT; u32 proc_ctx_size; u32 proc_ctx_ofst; @@ -383,7 +385,7 @@ int __ipa_commit_hdr_v2_5(void) IPA_MEM_PART(apps_hdr_size)); goto fail_send_cmd1; } else { - dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), GFP_ATOMIC); + dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), flag); if (dma_cmd_hdr == NULL) { IPAERR("fail to alloc immediate cmd\n"); rc = -ENOMEM; @@ -406,7 +408,7 @@ int __ipa_commit_hdr_v2_5(void) goto fail_send_cmd1; } else { hdr_init_cmd = kzalloc(sizeof(*hdr_init_cmd), - GFP_ATOMIC); + flag); if (hdr_init_cmd == NULL) { IPAERR("fail to alloc immediate cmd\n"); rc = -ENOMEM; @@ -431,7 +433,7 @@ int __ipa_commit_hdr_v2_5(void) goto fail_send_cmd1; } else { dma_cmd_ctx = kzalloc(sizeof(*dma_cmd_ctx), - GFP_ATOMIC); + flag); if (dma_cmd_ctx == NULL) { IPAERR("fail to alloc immediate cmd\n"); rc = -ENOMEM; @@ -456,7 +458,7 @@ int __ipa_commit_hdr_v2_5(void) goto fail_send_cmd1; } else { reg_write_cmd = kzalloc(sizeof(*reg_write_cmd), - GFP_ATOMIC); + flag); if (reg_write_cmd == NULL) { IPAERR("fail to alloc immediate cmd\n"); rc = -ENOMEM; @@ -722,6 +724,11 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) entry->hdr, entry->hdr_len, DMA_TO_DEVICE); + if (dma_mapping_error(ipa_ctx->pdev, + entry->phys_base)) { + IPAERR("dma_map_single failure for entry\n"); + goto fail_dma_mapping; + } } } else { entry->is_hdr_proc_ctx = false; @@ -798,6 +805,8 @@ fail_add_proc_ctx: list_del(&entry->link); dma_unmap_single(ipa_ctx->pdev, entry->phys_base, entry->hdr_len, DMA_TO_DEVICE); +fail_dma_mapping: + entry->is_hdr_proc_ctx = false; bad_hdr_len: entry->cookie = 0; kmem_cache_free(ipa_ctx->hdr_cache, entry); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c index f8f8fd12161a..5c07bc7d43b5 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c @@ -562,6 +562,8 @@ ssize_t ipa_read(struct file *filp, char __user *buf, size_t count, mutex_unlock(&ipa_ctx->msg_lock); if (copy_to_user(buf, &msg->meta, sizeof(struct ipa_msg_meta))) { + kfree(msg); + msg = NULL; ret = -EFAULT; break; } @@ -570,6 +572,8 @@ ssize_t ipa_read(struct file *filp, char __user *buf, size_t count, if (msg->buff) { if (copy_to_user(buf, msg->buff, msg->meta.msg_len)) { + kfree(msg); + msg = NULL; ret = -EFAULT; break; } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c index f5dea76764f8..11c77934e04f 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -698,6 +698,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip) struct ipa_mem_buffer head; struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL; struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL; + gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0); u16 avail; u32 num_modem_rt_index; int rc = 0; @@ -748,7 +749,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip) } cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), - GFP_KERNEL); + flag); if (cmd1 == NULL) { IPAERR("Failed to alloc immediate command object\n"); rc = -ENOMEM; @@ -765,7 +766,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip) if (lcl) { cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem), - GFP_KERNEL); + flag); if (cmd2 == NULL) { IPAERR("Failed to alloc immediate command object\n"); rc = -ENOMEM; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 5b706b6f493b..5ee6e5d2d9e3 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -2276,6 +2276,36 @@ static int ipa3_q6_set_ex_path_to_apps(void) desc[num_descs].len = cmd_pyld->len; num_descs++; } + + /* disable statuses for modem producers */ + if (IPA_CLIENT_IS_Q6_PROD(client_idx)) { + ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes); + + reg_write.skip_pipeline_clear = false; + reg_write.pipeline_clear_options = + IPAHAL_HPS_CLEAR; + reg_write.offset = + ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n, + ep_idx); + reg_write.value = 0; + reg_write.value_mask = ~0; + cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_REGISTER_WRITE, ®_write, false); + if (!cmd_pyld) { + IPAERR("fail construct register_write cmd\n"); + ipa_assert(); + return -EFAULT; + } + + desc[num_descs].opcode = ipahal_imm_cmd_get_opcode( + IPA_IMM_CMD_REGISTER_WRITE); + desc[num_descs].type = IPA_IMM_CMD_DESC; + desc[num_descs].callback = ipa3_destroy_imm; + desc[num_descs].user1 = cmd_pyld; + desc[num_descs].pyld = cmd_pyld->data; + desc[num_descs].len = cmd_pyld->len; + num_descs++; + } } /* Will wait 500msecs for IPA tag process completion */ @@ -4036,6 +4066,7 @@ fail_register_device: unregister_chrdev_region(ipa3_ctx->dev_num, 1); if (ipa3_ctx->pipe_mem_pool) gen_pool_destroy(ipa3_ctx->pipe_mem_pool); + ipa3_free_dma_task_for_gsi(); ipa3_destroy_flt_tbl_idrs(); idr_destroy(&ipa3_ctx->ipa_idr); kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache); @@ -4264,11 +4295,8 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, } ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0); - if (ipa3_ctx->logbuf == NULL) { - IPAERR("failed to get logbuf\n"); - result = -ENOMEM; - goto fail_logbuf; - } + if (ipa3_ctx->logbuf == NULL) + IPAERR("failed to create IPC log, continue...\n"); ipa3_ctx->pdev = ipa_dev; ipa3_ctx->uc_pdev = ipa_dev; @@ -4554,6 +4582,13 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, goto fail_dma_pool; } + /* allocate memory for DMA_TASK workaround */ + result = ipa3_allocate_dma_task_for_gsi(); + if (result) { + IPAERR("failed to allocate dma task\n"); + goto fail_dma_task; + } + /* init the various list heads */ INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list); for (i = 0; i < IPA_HDR_BIN_MAX; i++) { @@ -4726,6 +4761,8 @@ fail_cdev_add: fail_device_create: unregister_chrdev_region(ipa3_ctx->dev_num, 1); fail_alloc_chrdev_region: + ipa3_free_dma_task_for_gsi(); +fail_dma_task: if (ipa3_ctx->pipe_mem_pool) gen_pool_destroy(ipa3_ctx->pipe_mem_pool); ipa3_destroy_flt_tbl_idrs(); @@ -4769,8 +4806,8 @@ fail_bind: fail_mem_ctrl: kfree(ipa3_ctx->ipa_tz_unlock_reg); fail_tz_unlock_reg: - ipc_log_context_destroy(ipa3_ctx->logbuf); -fail_logbuf: + if (ipa3_ctx->logbuf) + ipc_log_context_destroy(ipa3_ctx->logbuf); kfree(ipa3_ctx); ipa3_ctx = NULL; fail_mem_ctx: diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 0cf77bbde496..ac7ef6a21952 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -1027,6 +1027,11 @@ struct ipa_tz_unlock_reg_info { u32 size; }; +struct ipa_dma_task_info { + struct ipa_mem_buffer mem; + struct ipahal_imm_cmd_pyld *cmd_pyld; +}; + /** * struct ipa3_context - IPA context * @class: pointer to the struct class @@ -1246,6 +1251,7 @@ struct ipa3_context { struct ipa3_smp2p_info smp2p_info; u32 ipa_tz_unlock_reg_num; struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg; + struct ipa_dma_task_info dma_task_info; }; /** @@ -2053,4 +2059,6 @@ int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats); struct dentry *ipa_debugfs_get_root(void); bool ipa3_is_msm_device(void); struct device *ipa3_get_pdev(void); +int ipa3_allocate_dma_task_for_gsi(void); +void ipa3_free_dma_task_for_gsi(void); #endif /* _IPA3_I_H_ */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c index b687b711dc20..16a567644f79 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c @@ -572,6 +572,8 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count, if (copy_to_user(buf, &msg->meta, sizeof(struct ipa_msg_meta))) { ret = -EFAULT; + kfree(msg); + msg = NULL; break; } buf += sizeof(struct ipa_msg_meta); @@ -580,6 +582,8 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count, if (copy_to_user(buf, msg->buff, msg->meta.msg_len)) { ret = -EFAULT; + kfree(msg); + msg = NULL; break; } buf += msg->meta.msg_len; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index c8ff06ddda87..f4a7319ca290 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -3430,6 +3430,7 @@ static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep) /* queue a work to start polling if don't have one */ atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1); if (!atomic_read(&ep->sys->curr_polling_state)) { + ipa3_inc_acquire_wakelock(); atomic_set(&ep->sys->curr_polling_state, 1); queue_work(ep->sys->wq, &ep->sys->work); } @@ -3482,6 +3483,51 @@ void ipa3_suspend_apps_pipes(bool suspend) } } +int ipa3_allocate_dma_task_for_gsi(void) +{ + struct ipahal_imm_cmd_dma_task_32b_addr cmd = { 0 }; + + IPADBG("Allocate mem\n"); + ipa3_ctx->dma_task_info.mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE; + ipa3_ctx->dma_task_info.mem.base = dma_alloc_coherent(ipa3_ctx->pdev, + ipa3_ctx->dma_task_info.mem.size, + &ipa3_ctx->dma_task_info.mem.phys_base, + GFP_KERNEL); + if (!ipa3_ctx->dma_task_info.mem.base) { + IPAERR("no mem\n"); + return -EFAULT; + } + + cmd.flsh = 1; + cmd.size1 = ipa3_ctx->dma_task_info.mem.size; + cmd.addr1 = ipa3_ctx->dma_task_info.mem.phys_base; + cmd.packet_size = ipa3_ctx->dma_task_info.mem.size; + ipa3_ctx->dma_task_info.cmd_pyld = ipahal_construct_imm_cmd( + IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false); + if (!ipa3_ctx->dma_task_info.cmd_pyld) { + IPAERR("failed to construct dma_task_32b_addr cmd\n"); + dma_free_coherent(ipa3_ctx->pdev, + ipa3_ctx->dma_task_info.mem.size, + ipa3_ctx->dma_task_info.mem.base, + ipa3_ctx->dma_task_info.mem.phys_base); + memset(&ipa3_ctx->dma_task_info, 0, + sizeof(ipa3_ctx->dma_task_info)); + return -EFAULT; + } + + return 0; +} + +void ipa3_free_dma_task_for_gsi(void) +{ + dma_free_coherent(ipa3_ctx->pdev, + ipa3_ctx->dma_task_info.mem.size, + ipa3_ctx->dma_task_info.mem.base, + ipa3_ctx->dma_task_info.mem.phys_base); + ipahal_destroy_imm_cmd(ipa3_ctx->dma_task_info.cmd_pyld); + memset(&ipa3_ctx->dma_task_info, 0, sizeof(ipa3_ctx->dma_task_info)); +} + /** * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel * @@ -3490,41 +3536,12 @@ void ipa3_suspend_apps_pipes(bool suspend) */ int ipa3_inject_dma_task_for_gsi(void) { - static struct ipa_mem_buffer mem = {0}; - struct ipahal_imm_cmd_dma_task_32b_addr cmd = {0}; - static struct ipahal_imm_cmd_pyld *cmd_pyld; struct ipa3_desc desc = {0}; - /* allocate the memory only for the very first time */ - if (!mem.base) { - IPADBG("Allocate mem\n"); - mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE; - mem.base = dma_alloc_coherent(ipa3_ctx->pdev, - mem.size, - &mem.phys_base, - GFP_KERNEL); - if (!mem.base) { - IPAERR("no mem\n"); - return -EFAULT; - } - } - if (!cmd_pyld) { - cmd.flsh = 1; - cmd.size1 = mem.size; - cmd.addr1 = mem.phys_base; - cmd.packet_size = mem.size; - cmd_pyld = ipahal_construct_imm_cmd( - IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false); - if (!cmd_pyld) { - IPAERR("failed to construct dma_task_32b_addr cmd\n"); - return -EFAULT; - } - } - desc.opcode = ipahal_imm_cmd_get_opcode_param( IPA_IMM_CMD_DMA_TASK_32B_ADDR, 1); - desc.pyld = cmd_pyld->data; - desc.len = cmd_pyld->len; + desc.pyld = ipa3_ctx->dma_task_info.cmd_pyld->data; + desc.len = ipa3_ctx->dma_task_info.cmd_pyld->len; desc.type = IPA_IMM_CMD_DESC; IPADBG("sending 1B packet to IPA\n"); diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index 6731150ce4e7..e3f8f4c0be46 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -2365,32 +2365,41 @@ static int rmnet_ipa_ap_suspend(struct device *dev) { struct net_device *netdev = IPA_NETDEV(); struct ipa3_wwan_private *wwan_ptr; + int ret; + + IPAWANDBG("Enter...\n"); - IPAWANDBG_LOW("Enter...\n"); if (netdev == NULL) { IPAWANERR("netdev is NULL.\n"); - return 0; + ret = 0; + goto bail; } + netif_tx_lock_bh(netdev); wwan_ptr = netdev_priv(netdev); if (wwan_ptr == NULL) { IPAWANERR("wwan_ptr is NULL.\n"); - return 0; + ret = 0; + goto unlock_and_bail; } /* Do not allow A7 to suspend in case there are oustanding packets */ if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) { IPAWANDBG("Outstanding packets, postponing AP suspend.\n"); - return -EAGAIN; + ret = -EAGAIN; + goto unlock_and_bail; } /* Make sure that there is no Tx operation ongoing */ - netif_tx_lock_bh(netdev); + netif_stop_queue(netdev); ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD); - netif_tx_unlock_bh(netdev); - IPAWANDBG_LOW("Exit\n"); + ret = 0; - return 0; +unlock_and_bail: + netif_tx_unlock_bh(netdev); +bail: + IPAWANDBG("Exit with %d\n", ret); + return ret; } /** @@ -2407,10 +2416,10 @@ static int rmnet_ipa_ap_resume(struct device *dev) { struct net_device *netdev = IPA_NETDEV(); - IPAWANDBG_LOW("Enter...\n"); + IPAWANDBG("Enter...\n"); if (netdev) netif_wake_queue(netdev); - IPAWANDBG_LOW("Exit\n"); + IPAWANDBG("Exit\n"); return 0; } diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c index ab3c3503c2fc..3191ec065a95 100644 --- a/drivers/platform/msm/mhi_uci/mhi_uci.c +++ b/drivers/platform/msm/mhi_uci/mhi_uci.c @@ -1185,9 +1185,43 @@ static void uci_xfer_cb(struct mhi_cb_info *cb_info) mutex_unlock(&chan_attr->chan_lock); wake_up(&chan_attr->wq); break; + case MHI_CB_SYS_ERROR: + case MHI_CB_MHI_SHUTDOWN: case MHI_CB_MHI_DISABLED: uci_log(uci_handle->uci_ipc_log, UCI_DBG_INFO, - "MHI disabled CB received\n"); + "MHI disabled CB received 0x%x for chan:%d\n", + cb_info->cb_reason, cb_info->chan); + + chan_attr = (cb_info->chan % 2) ? &uci_handle->in_attr : + &uci_handle->out_attr; + mutex_lock(&chan_attr->chan_lock); + chan_attr->enabled = false; + /* we disable entire handler by grabbing only one lock */ + uci_handle->enabled = false; + mutex_unlock(&chan_attr->chan_lock); + wake_up(&chan_attr->wq); + + /* + * if it's ctrl channel clear the resource now + * otherwise during file close we will release the + * resources + */ + if (uci_handle == uci_handle->uci_ctxt->ctrl_client && + chan_attr == &uci_handle->out_attr) { + struct uci_buf *itr, *tmp; + + mutex_lock(&chan_attr->chan_lock); + atomic_set(&uci_handle->out_attr.avail_pkts, 0); + atomic_set(&uci_handle->out_pkt_pend_ack, 0); + list_for_each_entry_safe(itr, tmp, &chan_attr->buf_head, + node) { + list_del(&itr->node); + kfree(itr->data); + } + atomic_set(&uci_handle->completion_ack, 0); + INIT_LIST_HEAD(&uci_handle->out_attr.buf_head); + mutex_unlock(&chan_attr->chan_lock); + } break; case MHI_CB_XFER: if (!cb_info->result) { diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c index c6009d767db5..e88f3d8c14d2 100644 --- a/drivers/platform/msm/msm_11ad/msm_11ad.c +++ b/drivers/platform/msm/msm_11ad/msm_11ad.c @@ -643,6 +643,9 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx) int rc; int force_pt_coherent = 1; int smmu_bypass = !ctx->smmu_s1_en; + dma_addr_t iova_base = 0; + dma_addr_t iova_end = ctx->smmu_base + ctx->smmu_size - 1; + struct iommu_domain_geometry geometry; if (!ctx->use_smmu) return 0; @@ -700,6 +703,17 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx) rc); goto release_mapping; } + memset(&geometry, 0, sizeof(geometry)); + geometry.aperture_start = iova_base; + geometry.aperture_end = iova_end; + rc = iommu_domain_set_attr(ctx->mapping->domain, + DOMAIN_ATTR_GEOMETRY, + &geometry); + if (rc) { + dev_err(ctx->dev, "Set geometry attribute to SMMU failed (%d)\n", + rc); + goto release_mapping; + } } } @@ -864,6 +878,8 @@ static int msm_11ad_ssr_init(struct msm11ad_ctx *ctx) ctx->dump_data.addr = virt_to_phys(ctx->ramdump_addr); ctx->dump_data.len = WIGIG_RAMDUMP_SIZE; + strlcpy(ctx->dump_data.name, "KWIGIG", + sizeof(ctx->dump_data.name)); dump_entry.id = MSM_DUMP_DATA_WIGIG; dump_entry.addr = virt_to_phys(&ctx->dump_data); diff --git a/drivers/platform/msm/seemp_core/seemp_logk.c b/drivers/platform/msm/seemp_core/seemp_logk.c index 9b6096485c39..d0f21943cb0f 100644 --- a/drivers/platform/msm/seemp_core/seemp_logk.c +++ b/drivers/platform/msm/seemp_core/seemp_logk.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -184,6 +184,8 @@ static int seemp_logk_usr_record(const char __user *buf, size_t count) if (copy_from_user(&usr_blk.payload, &local_blk->payload, sizeof(struct blk_payload)) != 0) return -EFAULT; + } else { + return -EFAULT; } idx = ret = 0; now = current_kernel_time(); @@ -283,7 +285,12 @@ static bool seemp_logk_get_bit_from_vector(__u8 *pVec, __u32 index) { unsigned int byte_num = index/8; unsigned int bit_num = index%8; - unsigned char byte = pVec[byte_num]; + unsigned char byte; + + if (DIV_ROUND_UP(index, 8) > MASK_BUFFER_SIZE) + return false; + + byte = pVec[byte_num]; return !(byte & (1 << bit_num)); } diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index 438da2c51dd6..723b9eaf658a 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -293,6 +293,7 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(die_health), POWER_SUPPLY_ATTR(connector_health), POWER_SUPPLY_ATTR(ctm_current_max), + POWER_SUPPLY_ATTR(hw_current_max), /* Local extensions of type int64_t */ POWER_SUPPLY_ATTR(charge_counter_ext), /* Properties of type `const char *' */ diff --git a/drivers/power/qcom/msm-core.c b/drivers/power/qcom/msm-core.c index 3ac4611da9bd..43ad33ea234b 100644 --- a/drivers/power/qcom/msm-core.c +++ b/drivers/power/qcom/msm-core.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1069,6 +1069,7 @@ static int msm_core_dev_probe(struct platform_device *pdev) if (ret) goto failed; + INIT_DEFERRABLE_WORK(&sampling_work, samplequeue_handle); ret = msm_core_task_init(&pdev->dev); if (ret) goto failed; @@ -1076,7 +1077,6 @@ static int msm_core_dev_probe(struct platform_device *pdev) for_each_possible_cpu(cpu) set_threshold(&activity[cpu]); - INIT_DEFERRABLE_WORK(&sampling_work, samplequeue_handle); schedule_delayed_work(&sampling_work, msecs_to_jiffies(0)); cpufreq_register_notifier(&cpu_policy, CPUFREQ_POLICY_NOTIFIER); pm_notifier(system_suspend_handler, 0); diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c index a30ed90d6e92..8d038ba0770d 100644 --- a/drivers/power/reset/msm-poweroff.c +++ b/drivers/power/reset/msm-poweroff.c @@ -33,6 +33,7 @@ #include <soc/qcom/scm.h> #include <soc/qcom/restart.h> #include <soc/qcom/watchdog.h> +#include <soc/qcom/minidump.h> #define EMERGENCY_DLOAD_MAGIC1 0x322A4F99 #define EMERGENCY_DLOAD_MAGIC2 0xC67E4350 @@ -42,9 +43,10 @@ #define SCM_IO_DISABLE_PMIC_ARBITER 1 #define SCM_IO_DEASSERT_PS_HOLD 2 #define SCM_WDOG_DEBUG_BOOT_PART 0x9 -#define SCM_DLOAD_MODE 0X10 +#define SCM_DLOAD_FULLDUMP 0X10 #define SCM_EDLOAD_MODE 0X01 #define SCM_DLOAD_CMD 0x10 +#define SCM_DLOAD_MINIDUMP 0X20 static int restart_mode; @@ -69,6 +71,7 @@ static void scm_disable_sdi(void); #endif static int in_panic; +static int dload_type = SCM_DLOAD_FULLDUMP; static int download_mode = 1; static struct kobject dload_kobj; static void *dload_mode_addr, *dload_type_addr; @@ -142,7 +145,7 @@ static void set_dload_mode(int on) mb(); } - ret = scm_set_dload_mode(on ? SCM_DLOAD_MODE : 0, 0); + ret = scm_set_dload_mode(on ? dload_type : 0, 0); if (ret) pr_err("Failed to set secure DLOAD mode: %d\n", ret); @@ -185,7 +188,6 @@ static int dload_set(const char *val, struct kernel_param *kp) int old_val = download_mode; ret = param_set_int(val, kp); - if (ret) return ret; @@ -454,7 +456,7 @@ static ssize_t show_emmc_dload(struct kobject *kobj, struct attribute *attr, else show_val = 0; - return snprintf(buf, sizeof(show_val), "%u\n", show_val); + return scnprintf(buf, sizeof(show_val), "%u\n", show_val); } static size_t store_emmc_dload(struct kobject *kobj, struct attribute *attr, @@ -477,10 +479,50 @@ static size_t store_emmc_dload(struct kobject *kobj, struct attribute *attr, return count; } + +#ifdef CONFIG_QCOM_MINIDUMP + +static DEFINE_MUTEX(tcsr_lock); + +static ssize_t show_dload_mode(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "DLOAD dump type: %s\n", + (dload_type == SCM_DLOAD_MINIDUMP) ? "mini" : "full"); +} + +static size_t store_dload_mode(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + if (sysfs_streq(buf, "full")) { + dload_type = SCM_DLOAD_FULLDUMP; + } else if (sysfs_streq(buf, "mini")) { + if (!msm_minidump_enabled()) { + pr_info("Minidump is not enabled\n"); + return -ENODEV; + } + dload_type = SCM_DLOAD_MINIDUMP; + } else { + pr_info("Invalid value. Use 'full' or 'mini'\n"); + return -EINVAL; + } + + mutex_lock(&tcsr_lock); + /*Overwrite TCSR reg*/ + set_dload_mode(dload_type); + mutex_unlock(&tcsr_lock); + return count; +} +RESET_ATTR(dload_mode, 0644, show_dload_mode, store_dload_mode); +#endif + RESET_ATTR(emmc_dload, 0644, show_emmc_dload, store_emmc_dload); static struct attribute *reset_attrs[] = { &reset_attr_emmc_dload.attr, +#ifdef CONFIG_QCOM_MINIDUMP + &reset_attr_dload_mode.attr, +#endif NULL }; diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c index 0c80c8be7909..539e757d3e99 100644 --- a/drivers/power/supply/qcom/battery.c +++ b/drivers/power/supply/qcom/battery.c @@ -13,6 +13,7 @@ #define pr_fmt(fmt) "QCOM-BATT: %s: " fmt, __func__ #include <linux/device.h> +#include <linux/delay.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regmap.h> @@ -36,6 +37,7 @@ #define PL_HW_ABSENT_VOTER "PL_HW_ABSENT_VOTER" #define PL_VOTER "PL_VOTER" #define RESTRICT_CHG_VOTER "RESTRICT_CHG_VOTER" +#define ICL_CHANGE_VOTER "ICL_CHANGE_VOTER" struct pl_data { int pl_mode; @@ -49,14 +51,16 @@ struct pl_data { struct votable *pl_disable_votable; struct votable *pl_awake_votable; struct votable *hvdcp_hw_inov_dis_votable; + struct votable *usb_icl_votable; struct work_struct status_change_work; struct work_struct pl_disable_forever_work; struct delayed_work pl_taper_work; struct power_supply *main_psy; struct power_supply *pl_psy; struct power_supply *batt_psy; + struct power_supply *usb_psy; int charge_type; - int main_settled_ua; + int total_settled_ua; int pl_settled_ua; struct class qcom_batt_class; struct wakeup_source *pl_ws; @@ -92,15 +96,10 @@ enum { ********/ static void split_settled(struct pl_data *chip) { - int slave_icl_pct; + int slave_icl_pct, total_current_ua; int slave_ua = 0, main_settled_ua = 0; union power_supply_propval pval = {0, }; - int rc; - - /* TODO some parallel chargers do not have a fine ICL resolution. For - * them implement a psy interface which returns the closest lower ICL - * for desired split - */ + int rc, total_settled_ua = 0; if ((chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN) && (chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN_EXT)) @@ -122,12 +121,31 @@ static void split_settled(struct pl_data *chip) slave_icl_pct = max(0, chip->slave_pct - 10); slave_ua = ((main_settled_ua + chip->pl_settled_ua) * slave_icl_pct) / 100; + total_settled_ua = main_settled_ua + chip->pl_settled_ua; } - /* ICL_REDUCTION on main could be 0mA when pl is disabled */ - pval.intval = slave_ua; + total_current_ua = get_effective_result_locked(chip->usb_icl_votable); + if (total_current_ua < 0) { + if (!chip->usb_psy) + chip->usb_psy = power_supply_get_by_name("usb"); + if (!chip->usb_psy) { + pr_err("Couldn't get usbpsy while splitting settled\n"); + return; + } + /* no client is voting, so get the total current from charger */ + rc = power_supply_get_property(chip->usb_psy, + POWER_SUPPLY_PROP_HW_CURRENT_MAX, &pval); + if (rc < 0) { + pr_err("Couldn't get max current rc=%d\n", rc); + return; + } + total_current_ua = pval.intval; + } + + pval.intval = total_current_ua - slave_ua; + /* Set ICL on main charger */ rc = power_supply_set_property(chip->main_psy, - POWER_SUPPLY_PROP_ICL_REDUCTION, &pval); + POWER_SUPPLY_PROP_CURRENT_MAX, &pval); if (rc < 0) { pr_err("Couldn't change slave suspend state rc=%d\n", rc); return; @@ -142,10 +160,12 @@ static void split_settled(struct pl_data *chip) return; } - /* main_settled_ua represents the total capability of adapter */ - if (!chip->main_settled_ua) - chip->main_settled_ua = main_settled_ua; + chip->total_settled_ua = total_settled_ua; chip->pl_settled_ua = slave_ua; + + pl_dbg(chip, PR_PARALLEL, + "Split total_current_ua=%d main_settled_ua=%d slave_ua=%d\n", + total_current_ua, main_settled_ua, slave_ua); } static ssize_t version_show(struct class *c, struct class_attribute *attr, @@ -213,6 +233,10 @@ static ssize_t restrict_chg_store(struct class *c, struct class_attribute *attr, chip->restricted_charging_enabled = !!val; + /* disable parallel charger in case of restricted charging */ + vote(chip->pl_disable_votable, RESTRICT_CHG_VOTER, + chip->restricted_charging_enabled, 0); + vote(chip->fcc_votable, RESTRICT_CHG_VOTER, chip->restricted_charging_enabled, chip->restricted_current); @@ -410,23 +434,28 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data, return rc; } - split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua); - pval.intval = slave_fcc_ua; - rc = power_supply_set_property(chip->pl_psy, - POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); - if (rc < 0) { - pr_err("Couldn't set parallel fcc, rc=%d\n", rc); - return rc; - } + if (chip->pl_mode != POWER_SUPPLY_PL_NONE) { + split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua); - chip->slave_fcc_ua = slave_fcc_ua; + pval.intval = slave_fcc_ua; + rc = power_supply_set_property(chip->pl_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + &pval); + if (rc < 0) { + pr_err("Couldn't set parallel fcc, rc=%d\n", rc); + return rc; + } - pval.intval = master_fcc_ua; - rc = power_supply_set_property(chip->main_psy, - POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval); - if (rc < 0) { - pr_err("Could not set main fcc, rc=%d\n", rc); - return rc; + chip->slave_fcc_ua = slave_fcc_ua; + + pval.intval = master_fcc_ua; + rc = power_supply_set_property(chip->main_psy, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + &pval); + if (rc < 0) { + pr_err("Could not set main fcc, rc=%d\n", rc); + return rc; + } } pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n", @@ -487,6 +516,66 @@ static int pl_fv_vote_callback(struct votable *votable, void *data, return 0; } +#define ICL_STEP_UA 25000 +static int usb_icl_vote_callback(struct votable *votable, void *data, + int icl_ua, const char *client) +{ + int rc; + struct pl_data *chip = data; + union power_supply_propval pval = {0, }; + bool rerun_aicl = false; + + if (!chip->main_psy) + return 0; + + if (client == NULL) + icl_ua = INT_MAX; + + /* + * Disable parallel for new ICL vote - the call to split_settled will + * ensure that all the input current limit gets assigned to the main + * charger. + */ + vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, true, 0); + + /* rerun AICL */ + /* get the settled current */ + rc = power_supply_get_property(chip->main_psy, + POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, + &pval); + if (rc < 0) { + pr_err("Couldn't get aicl settled value rc=%d\n", rc); + return rc; + } + + /* rerun AICL if new ICL is above settled ICL */ + if (icl_ua > pval.intval) + rerun_aicl = true; + + if (rerun_aicl) { + /* set a lower ICL */ + pval.intval = max(pval.intval - ICL_STEP_UA, ICL_STEP_UA); + power_supply_set_property(chip->main_psy, + POWER_SUPPLY_PROP_CURRENT_MAX, + &pval); + /* wait for ICL change */ + msleep(100); + } + + /* set the effective ICL */ + pval.intval = icl_ua; + power_supply_set_property(chip->main_psy, + POWER_SUPPLY_PROP_CURRENT_MAX, + &pval); + if (rerun_aicl) + /* wait for ICL change */ + msleep(100); + + vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0); + + return 0; +} + static void pl_disable_forever_work(struct work_struct *work) { struct pl_data *chip = container_of(work, @@ -508,7 +597,7 @@ static int pl_disable_vote_callback(struct votable *votable, int rc; chip->taper_pct = 100; - chip->main_settled_ua = 0; + chip->total_settled_ua = 0; chip->pl_settled_ua = 0; if (!pl_disable) { /* enable */ @@ -596,13 +685,12 @@ static int pl_awake_vote_callback(struct votable *votable, static bool is_main_available(struct pl_data *chip) { - if (!chip->main_psy) - chip->main_psy = power_supply_get_by_name("main"); + if (chip->main_psy) + return true; - if (!chip->main_psy) - return false; + chip->main_psy = power_supply_get_by_name("main"); - return true; + return !!chip->main_psy; } static bool is_batt_available(struct pl_data *chip) @@ -711,6 +799,7 @@ static void handle_main_charge_type(struct pl_data *chip) static void handle_settled_icl_change(struct pl_data *chip) { union power_supply_propval pval = {0, }; + int new_total_settled_ua; int rc; if (get_effective_result(chip->pl_disable_votable)) @@ -730,9 +819,15 @@ static void handle_settled_icl_change(struct pl_data *chip) return; } + new_total_settled_ua = pval.intval + chip->pl_settled_ua; + pl_dbg(chip, PR_PARALLEL, + "total_settled_ua=%d settled_ua=%d new_total_settled_ua=%d\n", + chip->total_settled_ua, pval.intval, + new_total_settled_ua); + /* If ICL change is small skip splitting */ - if (abs((chip->main_settled_ua - chip->pl_settled_ua) - - pval.intval) > MIN_ICL_CHANGE_DELTA_UA) + if (abs(new_total_settled_ua - chip->total_settled_ua) + > MIN_ICL_CHANGE_DELTA_UA) split_settled(chip); } else { rerun_election(chip->fcc_votable); @@ -773,7 +868,18 @@ static void status_change_work(struct work_struct *work) struct pl_data *chip = container_of(work, struct pl_data, status_change_work); - if (!is_main_available(chip)) + if (!chip->main_psy && is_main_available(chip)) { + /* + * re-run election for FCC/FV/ICL once main_psy + * is available to ensure all votes are reflected + * on hardware + */ + rerun_election(chip->usb_icl_votable); + rerun_election(chip->fcc_votable); + rerun_election(chip->fv_votable); + } + + if (!chip->main_psy) return; if (!is_batt_available(chip)) @@ -855,6 +961,14 @@ static int pl_init(void) goto destroy_votable; } + chip->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN, + usb_icl_vote_callback, + chip); + if (IS_ERR(chip->usb_icl_votable)) { + rc = PTR_ERR(chip->usb_icl_votable); + goto destroy_votable; + } + chip->pl_disable_votable = create_votable("PL_DISABLE", VOTE_SET_ANY, pl_disable_vote_callback, chip); @@ -909,6 +1023,7 @@ destroy_votable: destroy_votable(chip->pl_disable_votable); destroy_votable(chip->fv_votable); destroy_votable(chip->fcc_votable); + destroy_votable(chip->usb_icl_votable); release_wakeup_source: wakeup_source_unregister(chip->pl_ws); cleanup: diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h index f2047592a94b..25c9d0251cf1 100644 --- a/drivers/power/supply/qcom/fg-core.h +++ b/drivers/power/supply/qcom/fg-core.h @@ -128,11 +128,6 @@ enum fg_irq_index { FG_IRQ_MAX, }; -/* WA flags */ -enum { - DELTA_SOC_IRQ_WA = BIT(0), -}; - /* * List of FG_SRAM parameters. Please add a parameter only if it is an entry * that will be used either to configure an entity (e.g. termination current) @@ -152,6 +147,7 @@ enum fg_sram_param_id { FG_SRAM_CC_SOC, FG_SRAM_CC_SOC_SW, FG_SRAM_ACT_BATT_CAP, + FG_SRAM_TIMEBASE, /* Entries below here are configurable during initialization */ FG_SRAM_CUTOFF_VOLT, FG_SRAM_EMPTY_VOLT, @@ -162,14 +158,17 @@ enum fg_sram_param_id { FG_SRAM_ESR_TIMER_DISCHG_INIT, FG_SRAM_ESR_TIMER_CHG_MAX, FG_SRAM_ESR_TIMER_CHG_INIT, + FG_SRAM_ESR_PULSE_THRESH, FG_SRAM_SYS_TERM_CURR, FG_SRAM_CHG_TERM_CURR, + FG_SRAM_CHG_TERM_BASE_CURR, FG_SRAM_DELTA_MSOC_THR, FG_SRAM_DELTA_BSOC_THR, FG_SRAM_RECHARGE_SOC_THR, FG_SRAM_RECHARGE_VBATT_THR, FG_SRAM_KI_COEFF_MED_DISCHG, FG_SRAM_KI_COEFF_HI_DISCHG, + FG_SRAM_KI_COEFF_FULL_SOC, FG_SRAM_ESR_TIGHT_FILTER, FG_SRAM_ESR_BROAD_FILTER, FG_SRAM_SLOPE_LIMIT, @@ -209,6 +208,7 @@ struct fg_alg_flag { enum wa_flags { PMI8998_V1_REV_WA = BIT(0), + PM660_TSMC_OSC_WA = BIT(1), }; enum slope_limit_status { @@ -228,6 +228,7 @@ struct fg_dt_props { int empty_volt_mv; int vbatt_low_thr_mv; int chg_term_curr_ma; + int chg_term_base_curr_ma; int sys_term_curr_ma; int delta_soc_thr; int recharge_soc_thr; @@ -253,6 +254,8 @@ struct fg_dt_props { int esr_tight_lt_flt_upct; int esr_broad_lt_flt_upct; int slope_limit_temp; + int esr_pulse_thresh_ma; + int esr_meas_curr_ma; int jeita_thresholds[NUM_JEITA_LEVELS]; int ki_coeff_soc[KI_COEFF_SOC_LEVELS]; int ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS]; @@ -319,6 +322,23 @@ static const struct fg_pt fg_ln_table[] = { { 128000, 4852 }, }; +/* each tuple is - <temperature in degC, Timebase> */ +static const struct fg_pt fg_tsmc_osc_table[] = { + { -20, 395064 }, + { -10, 398114 }, + { 0, 401669 }, + { 10, 404641 }, + { 20, 408856 }, + { 25, 412449 }, + { 30, 416532 }, + { 40, 420289 }, + { 50, 425020 }, + { 60, 430160 }, + { 70, 434175 }, + { 80, 439475 }, + { 90, 444992 }, +}; + struct fg_chip { struct device *dev; struct pmic_revid_data *pmic_rev_id; @@ -330,6 +350,7 @@ struct fg_chip { struct power_supply *dc_psy; struct power_supply *parallel_psy; struct iio_channel *batt_id_chan; + struct iio_channel *die_temp_chan; struct fg_memif *sram; struct fg_irq_info *irqs; struct votable *awake_votable; @@ -353,6 +374,7 @@ struct fg_chip { u32 rradc_base; u32 wa_flags; int batt_id_ohms; + int ki_coeff_full_soc; int charge_status; int prev_charge_status; int charge_done; diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h index bf2827fb550d..cd0b2fb4391f 100644 --- a/drivers/power/supply/qcom/fg-reg.h +++ b/drivers/power/supply/qcom/fg-reg.h @@ -167,6 +167,7 @@ /* BATT_INFO_ESR_PULL_DN_CFG */ #define ESR_PULL_DOWN_IVAL_MASK GENMASK(3, 2) +#define ESR_PULL_DOWN_IVAL_SHIFT 2 #define ESR_MEAS_CUR_60MA 0x0 #define ESR_MEAS_CUR_120MA 0x1 #define ESR_MEAS_CUR_180MA 0x2 diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index 59216a567662..a12183f5f387 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -31,6 +31,8 @@ #define FG_MEM_INFO_PMI8998 0x0D /* SRAM address and offset in ascending order */ +#define ESR_PULSE_THRESH_WORD 2 +#define ESR_PULSE_THRESH_OFFSET 3 #define SLOPE_LIMIT_WORD 3 #define SLOPE_LIMIT_OFFSET 0 #define CUTOFF_VOLT_WORD 5 @@ -45,11 +47,14 @@ #define ESR_UPD_TIGHT_LOW_TEMP_OFFSET 2 #define ESR_UPD_BROAD_LOW_TEMP_OFFSET 3 #define KI_COEFF_MED_DISCHG_WORD 9 +#define TIMEBASE_OFFSET 1 #define KI_COEFF_MED_DISCHG_OFFSET 3 #define KI_COEFF_HI_DISCHG_WORD 10 #define KI_COEFF_HI_DISCHG_OFFSET 0 #define KI_COEFF_LOW_DISCHG_WORD 10 #define KI_COEFF_LOW_DISCHG_OFFSET 2 +#define KI_COEFF_FULL_SOC_WORD 12 +#define KI_COEFF_FULL_SOC_OFFSET 2 #define DELTA_MSOC_THR_WORD 12 #define DELTA_MSOC_THR_OFFSET 3 #define DELTA_BSOC_THR_WORD 13 @@ -126,6 +131,7 @@ #define RECHARGE_SOC_THR_v2_WORD 14 #define RECHARGE_SOC_THR_v2_OFFSET 1 #define CHG_TERM_CURR_v2_WORD 15 +#define CHG_TERM_BASE_CURR_v2_OFFSET 0 #define CHG_TERM_CURR_v2_OFFSET 1 #define EMPTY_VOLT_v2_WORD 15 #define EMPTY_VOLT_v2_OFFSET 3 @@ -216,12 +222,17 @@ static struct fg_sram_param pmi8998_v1_sram_params[] = { ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL), PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD, ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET, + 1, 100000, 390625, 0, fg_encode_default, NULL), PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD, KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 244141, 0, fg_encode_default, NULL), PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_WORD, KI_COEFF_HI_DISCHG_OFFSET, 1, 1000, 244141, 0, fg_encode_default, NULL), + PARAM(KI_COEFF_FULL_SOC, KI_COEFF_FULL_SOC_WORD, + KI_COEFF_FULL_SOC_OFFSET, 1, 1000, 244141, 0, + fg_encode_default, NULL), PARAM(ESR_TIGHT_FILTER, ESR_FILTER_WORD, ESR_UPD_TIGHT_OFFSET, 1, 512, 1000000, 0, fg_encode_default, NULL), PARAM(ESR_BROAD_FILTER, ESR_FILTER_WORD, ESR_UPD_BROAD_OFFSET, @@ -251,6 +262,8 @@ static struct fg_sram_param pmi8998_v2_sram_params[] = { fg_decode_cc_soc), PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2, 1, 1, 0, NULL, fg_decode_default), + PARAM(TIMEBASE, KI_COEFF_MED_DISCHG_WORD, TIMEBASE_OFFSET, 2, 1000, + 61000, 0, fg_encode_default, NULL), /* Entries below here are configurable during initialization */ PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000, 244141, 0, fg_encode_voltage, NULL), @@ -266,6 +279,9 @@ static struct fg_sram_param pmi8998_v2_sram_params[] = { 1000000, 122070, 0, fg_encode_current, NULL), PARAM(CHG_TERM_CURR, CHG_TERM_CURR_v2_WORD, CHG_TERM_CURR_v2_OFFSET, 1, 100000, 390625, 0, fg_encode_current, NULL), + PARAM(CHG_TERM_BASE_CURR, CHG_TERM_CURR_v2_WORD, + CHG_TERM_BASE_CURR_v2_OFFSET, 1, 1024, 1000, 0, + fg_encode_current, NULL), PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_v2_WORD, DELTA_MSOC_THR_v2_OFFSET, 1, 2048, 100, 0, fg_encode_default, NULL), PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_v2_WORD, DELTA_BSOC_THR_v2_OFFSET, @@ -286,12 +302,17 @@ static struct fg_sram_param pmi8998_v2_sram_params[] = { ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL), PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD, ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL), + PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET, + 1, 100000, 390625, 0, fg_encode_default, NULL), PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_v2_WORD, KI_COEFF_MED_DISCHG_v2_OFFSET, 1, 1000, 244141, 0, fg_encode_default, NULL), PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_v2_WORD, KI_COEFF_HI_DISCHG_v2_OFFSET, 1, 1000, 244141, 0, fg_encode_default, NULL), + PARAM(KI_COEFF_FULL_SOC, KI_COEFF_FULL_SOC_WORD, + KI_COEFF_FULL_SOC_OFFSET, 1, 1000, 244141, 0, + fg_encode_default, NULL), PARAM(ESR_TIGHT_FILTER, ESR_FILTER_WORD, ESR_UPD_TIGHT_OFFSET, 1, 512, 1000000, 0, fg_encode_default, NULL), PARAM(ESR_BROAD_FILTER, ESR_FILTER_WORD, ESR_UPD_BROAD_OFFSET, @@ -981,6 +1002,29 @@ static inline void get_batt_temp_delta(int delta, u8 *val) }; } +static inline void get_esr_meas_current(int curr_ma, u8 *val) +{ + switch (curr_ma) { + case 60: + *val = ESR_MEAS_CUR_60MA; + break; + case 120: + *val = ESR_MEAS_CUR_120MA; + break; + case 180: + *val = ESR_MEAS_CUR_180MA; + break; + case 240: + *val = ESR_MEAS_CUR_240MA; + break; + default: + *val = ESR_MEAS_CUR_120MA; + break; + }; + + *val <<= ESR_PULL_DOWN_IVAL_SHIFT; +} + static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging, int flags) { @@ -1453,6 +1497,37 @@ static int fg_adjust_ki_coeff_dischg(struct fg_chip *chip) return 0; } +#define KI_COEFF_FULL_SOC_DEFAULT 733 +static int fg_adjust_ki_coeff_full_soc(struct fg_chip *chip, int batt_temp) +{ + int rc, ki_coeff_full_soc; + u8 val; + + if (batt_temp < 0) + ki_coeff_full_soc = 0; + else + ki_coeff_full_soc = KI_COEFF_FULL_SOC_DEFAULT; + + if (chip->ki_coeff_full_soc == ki_coeff_full_soc) + return 0; + + fg_encode(chip->sp, FG_SRAM_KI_COEFF_FULL_SOC, ki_coeff_full_soc, &val); + rc = fg_sram_write(chip, + chip->sp[FG_SRAM_KI_COEFF_FULL_SOC].addr_word, + chip->sp[FG_SRAM_KI_COEFF_FULL_SOC].addr_byte, &val, + chip->sp[FG_SRAM_KI_COEFF_FULL_SOC].len, + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing ki_coeff_full_soc, rc=%d\n", rc); + return rc; + } + + chip->ki_coeff_full_soc = ki_coeff_full_soc; + fg_dbg(chip, FG_STATUS, "Wrote ki_coeff_full_soc %d\n", + ki_coeff_full_soc); + return 0; +} + static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv) { u8 buf; @@ -2037,6 +2112,11 @@ static void status_change_work(struct work_struct *work) if (rc < 0) pr_err("Error in configuring slope limiter rc:%d\n", rc); + + rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp); + if (rc < 0) + pr_err("Error in configuring ki_coeff_full_soc rc:%d\n", + rc); } fg_batt_avg_update(chip); @@ -2189,6 +2269,35 @@ static int fg_get_cycle_count(struct fg_chip *chip) return count; } +static int fg_bp_params_config(struct fg_chip *chip) +{ + int rc = 0; + u8 buf; + + /* This SRAM register is only present in v2.0 and above */ + if (!(chip->wa_flags & PMI8998_V1_REV_WA) && + chip->bp.float_volt_uv > 0) { + fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT, + chip->bp.float_volt_uv / 1000, &buf); + rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word, + chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, &buf, + chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing float_volt, rc=%d\n", rc); + return rc; + } + } + + if (chip->bp.vbatt_full_mv > 0) { + rc = fg_set_constant_chg_voltage(chip, + chip->bp.vbatt_full_mv * 1000); + if (rc < 0) + return rc; + } + + return rc; +} + #define PROFILE_LOAD_BIT BIT(0) #define BOOTLOADER_LOAD_BIT BIT(1) #define BOOTLOADER_RESTART_BIT BIT(2) @@ -2367,6 +2476,11 @@ static void profile_load_work(struct work_struct *work) } done: + rc = fg_bp_params_config(chip); + if (rc < 0) + pr_err("Error in configuring battery profile params, rc:%d\n", + rc); + rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2, FG_IMA_DEFAULT); if (rc < 0) { @@ -3018,27 +3132,6 @@ static int fg_hw_init(struct fg_chip *chip) return rc; } - /* This SRAM register is only present in v2.0 and above */ - if (!(chip->wa_flags & PMI8998_V1_REV_WA) && - chip->bp.float_volt_uv > 0) { - fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT, - chip->bp.float_volt_uv / 1000, buf); - rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word, - chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, buf, - chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT); - if (rc < 0) { - pr_err("Error in writing float_volt, rc=%d\n", rc); - return rc; - } - } - - if (chip->bp.vbatt_full_mv > 0) { - rc = fg_set_constant_chg_voltage(chip, - chip->bp.vbatt_full_mv * 1000); - if (rc < 0) - return rc; - } - fg_encode(chip->sp, FG_SRAM_CHG_TERM_CURR, chip->dt.chg_term_curr_ma, buf); rc = fg_sram_write(chip, chip->sp[FG_SRAM_CHG_TERM_CURR].addr_word, @@ -3059,6 +3152,21 @@ static int fg_hw_init(struct fg_chip *chip) return rc; } + if (!(chip->wa_flags & PMI8998_V1_REV_WA)) { + fg_encode(chip->sp, FG_SRAM_CHG_TERM_BASE_CURR, + chip->dt.chg_term_base_curr_ma, buf); + rc = fg_sram_write(chip, + chip->sp[FG_SRAM_CHG_TERM_BASE_CURR].addr_word, + chip->sp[FG_SRAM_CHG_TERM_BASE_CURR].addr_byte, + buf, chip->sp[FG_SRAM_CHG_TERM_BASE_CURR].len, + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing chg_term_base_curr, rc=%d\n", + rc); + return rc; + } + } + if (chip->dt.vbatt_low_thr_mv > 0) { fg_encode(chip->sp, FG_SRAM_VBATT_LOW, chip->dt.vbatt_low_thr_mv, buf); @@ -3234,6 +3342,24 @@ static int fg_hw_init(struct fg_chip *chip) return rc; } + fg_encode(chip->sp, FG_SRAM_ESR_PULSE_THRESH, + chip->dt.esr_pulse_thresh_ma, buf); + rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_PULSE_THRESH].addr_word, + chip->sp[FG_SRAM_ESR_PULSE_THRESH].addr_byte, buf, + chip->sp[FG_SRAM_ESR_PULSE_THRESH].len, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing esr_pulse_thresh_ma, rc=%d\n", rc); + return rc; + } + + get_esr_meas_current(chip->dt.esr_meas_curr_ma, &val); + rc = fg_masked_write(chip, BATT_INFO_ESR_PULL_DN_CFG(chip), + ESR_PULL_DOWN_IVAL_MASK, val); + if (rc < 0) { + pr_err("Error in writing esr_meas_curr_ma, rc=%d\n", rc); + return rc; + } + return 0; } @@ -3242,6 +3368,40 @@ static int fg_memif_init(struct fg_chip *chip) return fg_ima_init(chip); } +static int fg_adjust_timebase(struct fg_chip *chip) +{ + int rc = 0, die_temp; + s32 time_base = 0; + u8 buf[2] = {0}; + + if ((chip->wa_flags & PM660_TSMC_OSC_WA) && chip->die_temp_chan) { + rc = iio_read_channel_processed(chip->die_temp_chan, &die_temp); + if (rc < 0) { + pr_err("Error in reading die_temp, rc:%d\n", rc); + return rc; + } + + rc = fg_lerp(fg_tsmc_osc_table, ARRAY_SIZE(fg_tsmc_osc_table), + die_temp / 1000, &time_base); + if (rc < 0) { + pr_err("Error to lookup fg_tsmc_osc_table rc=%d\n", rc); + return rc; + } + + fg_encode(chip->sp, FG_SRAM_TIMEBASE, time_base, buf); + rc = fg_sram_write(chip, + chip->sp[FG_SRAM_TIMEBASE].addr_word, + chip->sp[FG_SRAM_TIMEBASE].addr_byte, buf, + chip->sp[FG_SRAM_TIMEBASE].len, FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("Error in writing timebase, rc=%d\n", rc); + return rc; + } + } + + return 0; +} + /* INTERRUPT HANDLERS STAY HERE */ static irqreturn_t fg_mem_xcp_irq_handler(int irq, void *data) @@ -3349,6 +3509,10 @@ static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data) if (rc < 0) pr_err("Error in configuring slope limiter rc:%d\n", rc); + rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp); + if (rc < 0) + pr_err("Error in configuring ki_coeff_full_soc rc:%d\n", rc); + if (!batt_psy_initialized(chip)) { chip->last_batt_temp = batt_temp; return IRQ_HANDLED; @@ -3359,6 +3523,10 @@ static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data) chip->health = prop.intval; if (chip->last_batt_temp != batt_temp) { + rc = fg_adjust_timebase(chip); + if (rc < 0) + pr_err("Error in adjusting timebase, rc=%d\n", rc); + chip->last_batt_temp = batt_temp; power_supply_changed(chip->batt_psy); } @@ -3428,6 +3596,10 @@ static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data) if (rc < 0) pr_err("Error in validating ESR, rc=%d\n", rc); + rc = fg_adjust_timebase(chip); + if (rc < 0) + pr_err("Error in adjusting timebase, rc=%d\n", rc); + if (batt_psy_initialized(chip)) power_supply_changed(chip->batt_psy); @@ -3695,6 +3867,7 @@ static int fg_parse_ki_coefficients(struct fg_chip *chip) #define DEFAULT_EMPTY_VOLT_MV 2800 #define DEFAULT_RECHARGE_VOLT_MV 4250 #define DEFAULT_CHG_TERM_CURR_MA 100 +#define DEFAULT_CHG_TERM_BASE_CURR_MA 75 #define DEFAULT_SYS_TERM_CURR_MA -125 #define DEFAULT_DELTA_SOC_THR 1 #define DEFAULT_RECHARGE_SOC_THR 95 @@ -3717,6 +3890,8 @@ static int fg_parse_ki_coefficients(struct fg_chip *chip) #define DEFAULT_ESR_TIGHT_LT_FLT_UPCT 48829 #define DEFAULT_ESR_BROAD_LT_FLT_UPCT 148438 #define DEFAULT_ESR_CLAMP_MOHMS 20 +#define DEFAULT_ESR_PULSE_THRESH_MA 110 +#define DEFAULT_ESR_MEAS_CURR_MA 120 static int fg_parse_dt(struct fg_chip *chip) { struct device_node *child, *revid_node, *node = chip->dev->of_node; @@ -3767,6 +3942,8 @@ static int fg_parse_dt(struct fg_chip *chip) chip->sp = pmi8998_v2_sram_params; chip->alg_flags = pmi8998_v2_alg_flags; chip->use_ima_single_mode = true; + if (chip->pmic_rev_id->fab_id == PM660_FAB_ID_TSMC) + chip->wa_flags |= PM660_TSMC_OSC_WA; break; default: return -EINVAL; @@ -3847,6 +4024,12 @@ static int fg_parse_dt(struct fg_chip *chip) else chip->dt.sys_term_curr_ma = temp; + rc = of_property_read_u32(node, "qcom,fg-chg-term-base-current", &temp); + if (rc < 0) + chip->dt.chg_term_base_curr_ma = DEFAULT_CHG_TERM_BASE_CURR_MA; + else + chip->dt.chg_term_base_curr_ma = temp; + rc = of_property_read_u32(node, "qcom,fg-delta-soc-thr", &temp); if (rc < 0) chip->dt.delta_soc_thr = DEFAULT_DELTA_SOC_THR; @@ -4035,6 +4218,22 @@ static int fg_parse_dt(struct fg_chip *chip) else chip->dt.esr_clamp_mohms = temp; + chip->dt.esr_pulse_thresh_ma = DEFAULT_ESR_PULSE_THRESH_MA; + rc = of_property_read_u32(node, "qcom,fg-esr-pulse-thresh-ma", &temp); + if (!rc) { + /* ESR pulse qualification threshold range is 1-997 mA */ + if (temp > 0 && temp < 997) + chip->dt.esr_pulse_thresh_ma = temp; + } + + chip->dt.esr_meas_curr_ma = DEFAULT_ESR_MEAS_CURR_MA; + rc = of_property_read_u32(node, "qcom,fg-esr-meas-curr-ma", &temp); + if (!rc) { + /* ESR measurement current range is 60-240 mA */ + if (temp >= 60 || temp <= 240) + chip->dt.esr_meas_curr_ma = temp; + } + return 0; } @@ -4069,6 +4268,7 @@ static int fg_gen3_probe(struct platform_device *pdev) chip->irqs = fg_irqs; chip->charge_status = -EINVAL; chip->prev_charge_status = -EINVAL; + chip->ki_coeff_full_soc = -EINVAL; chip->regmap = dev_get_regmap(chip->dev->parent, NULL); if (!chip->regmap) { dev_err(chip->dev, "Parent regmap is unavailable\n"); @@ -4085,6 +4285,21 @@ static int fg_gen3_probe(struct platform_device *pdev) return rc; } + rc = of_property_match_string(chip->dev->of_node, + "io-channel-names", "rradc_die_temp"); + if (rc >= 0) { + chip->die_temp_chan = iio_channel_get(chip->dev, + "rradc_die_temp"); + if (IS_ERR(chip->die_temp_chan)) { + if (PTR_ERR(chip->die_temp_chan) != -EPROBE_DEFER) + pr_err("rradc_die_temp unavailable %ld\n", + PTR_ERR(chip->die_temp_chan)); + rc = PTR_ERR(chip->die_temp_chan); + chip->die_temp_chan = NULL; + return rc; + } + } + chip->awake_votable = create_votable("FG_WS", VOTE_SET_ANY, fg_awake_cb, chip); if (IS_ERR(chip->awake_votable)) { diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c index c74dc8989821..eb97eb0ff2ac 100644 --- a/drivers/power/supply/qcom/qpnp-qnovo.c +++ b/drivers/power/supply/qcom/qpnp-qnovo.c @@ -89,7 +89,16 @@ #define QNOVO_STRM_CTRL 0xA8 #define QNOVO_IADC_OFFSET_OVR_VAL 0xA9 #define QNOVO_IADC_OFFSET_OVR 0xAA + #define QNOVO_DISABLE_CHARGING 0xAB +#define ERR_SWITCHER_DISABLED BIT(7) +#define ERR_JEITA_SOFT_CONDITION BIT(6) +#define ERR_BAT_OV BIT(5) +#define ERR_CV_MODE BIT(4) +#define ERR_BATTERY_MISSING BIT(3) +#define ERR_SAFETY_TIMER_EXPIRED BIT(2) +#define ERR_CHARGING_DISABLED BIT(1) +#define ERR_JEITA_HARD_CONDITION BIT(0) #define QNOVO_TR_IADC_OFFSET_0 0xF1 #define QNOVO_TR_IADC_OFFSET_1 0xF2 @@ -1107,24 +1116,28 @@ static int qnovo_update_status(struct qnovo *chip) { u8 val = 0; int rc; - bool charging; + bool ok_to_qnovo; bool changed = false; rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1); if (rc < 0) { pr_err("Couldn't read error sts rc = %d\n", rc); - charging = false; + ok_to_qnovo = false; } else { - charging = !(val & QNOVO_ERROR_CHARGING_DISABLED); + /* + * For CV mode keep qnovo enabled, userspace is expected to + * disable it after few runs + */ + ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ? true : false; } - if (chip->ok_to_qnovo ^ charging) { + if (chip->ok_to_qnovo ^ ok_to_qnovo) { - vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !charging, 0); - if (!charging) + vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !ok_to_qnovo, 0); + if (!ok_to_qnovo) vote(chip->disable_votable, USER_VOTER, true, 0); - chip->ok_to_qnovo = charging; + chip->ok_to_qnovo = ok_to_qnovo; changed = true; } @@ -1247,6 +1260,16 @@ static int qnovo_hw_init(struct qnovo *chip) chip->v_gain_mega = 1000000000 + (s64)(s8)vadc_gain * GAIN_LSB_FACTOR; chip->v_gain_mega = div_s64(chip->v_gain_mega, 1000); + /* allow charger error conditions to disable qnovo, CV mode excluded */ + val = ERR_SWITCHER_DISABLED | ERR_JEITA_SOFT_CONDITION | ERR_BAT_OV | + ERR_BATTERY_MISSING | ERR_SAFETY_TIMER_EXPIRED | + ERR_CHARGING_DISABLED | ERR_JEITA_HARD_CONDITION; + rc = qnovo_write(chip, QNOVO_DISABLE_CHARGING, &val, 1); + if (rc < 0) { + pr_err("Couldn't write QNOVO_DISABLE_CHARGING rc = %d\n", rc); + return rc; + } + return 0; } diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c index b4fb80a2d4f3..8855a1c74e0b 100644 --- a/drivers/power/supply/qcom/qpnp-smb2.c +++ b/drivers/power/supply/qcom/qpnp-smb2.c @@ -244,6 +244,8 @@ struct smb_dt_props { int boost_threshold_ua; int fv_uv; int wipower_max_uw; + int min_freq_khz; + int max_freq_khz; u32 step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1]; s32 step_cc_delta[STEP_CHARGING_MAX_STEPS]; struct device_node *revid_dev_node; @@ -338,6 +340,18 @@ static int smb2_parse_dt(struct smb2 *chip) if (rc < 0) chip->dt.boost_threshold_ua = MICRO_P1A; + rc = of_property_read_u32(node, + "qcom,min-freq-khz", + &chip->dt.min_freq_khz); + if (rc < 0) + chip->dt.min_freq_khz = -EINVAL; + + rc = of_property_read_u32(node, + "qcom,max-freq-khz", + &chip->dt.max_freq_khz); + if (rc < 0) + chip->dt.max_freq_khz = -EINVAL; + rc = of_property_read_u32(node, "qcom,wipower-max-uw", &chip->dt.wipower_max_uw); if (rc < 0) @@ -414,6 +428,7 @@ static enum power_supply_property smb2_usb_props[] = { POWER_SUPPLY_PROP_BOOST_CURRENT, POWER_SUPPLY_PROP_PE_START, POWER_SUPPLY_PROP_CTM_CURRENT_MAX, + POWER_SUPPLY_PROP_HW_CURRENT_MAX, }; static int smb2_usb_get_prop(struct power_supply *psy, @@ -502,6 +517,9 @@ static int smb2_usb_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_CTM_CURRENT_MAX: val->intval = get_client_vote(chg->usb_icl_votable, CTM_VOTER); break; + case POWER_SUPPLY_PROP_HW_CURRENT_MAX: + rc = smblib_get_charge_current(chg, &val->intval); + break; default: pr_err("get prop %d is not supported in usb\n", psp); rc = -EINVAL; @@ -522,6 +540,12 @@ static int smb2_usb_set_prop(struct power_supply *psy, struct smb_charger *chg = &chip->chg; int rc = 0; + mutex_lock(&chg->lock); + if (!chg->typec_present) { + rc = -EINVAL; + goto unlock; + } + switch (psp) { case POWER_SUPPLY_PROP_VOLTAGE_MIN: rc = smblib_set_prop_usb_voltage_min(chg, val); @@ -560,6 +584,8 @@ static int smb2_usb_set_prop(struct power_supply *psy, break; } +unlock: + mutex_unlock(&chg->lock); return rc; } @@ -610,12 +636,12 @@ static int smb2_init_usb_psy(struct smb2 *chip) static enum power_supply_property smb2_usb_main_props[] = { POWER_SUPPLY_PROP_VOLTAGE_MAX, - POWER_SUPPLY_PROP_ICL_REDUCTION, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_TYPE, POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED, POWER_SUPPLY_PROP_FCC_DELTA, + POWER_SUPPLY_PROP_CURRENT_MAX, /* * TODO move the TEMP and TEMP_MAX properties here, * and update the thermal balancer to look here @@ -634,9 +660,6 @@ static int smb2_usb_main_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_VOLTAGE_MAX: rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval); break; - case POWER_SUPPLY_PROP_ICL_REDUCTION: - val->intval = chg->icl_reduction_ua; - break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: rc = smblib_get_charge_param(chg, &chg->param.fcc, &val->intval); @@ -653,6 +676,9 @@ static int smb2_usb_main_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_FCC_DELTA: rc = smblib_get_prop_fcc_delta(chg, val); break; + case POWER_SUPPLY_PROP_CURRENT_MAX: + val->intval = get_effective_result(chg->usb_icl_votable); + break; default: pr_debug("get prop %d is not supported in usb-main\n", psp); rc = -EINVAL; @@ -677,12 +703,12 @@ static int smb2_usb_main_set_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_VOLTAGE_MAX: rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval); break; - case POWER_SUPPLY_PROP_ICL_REDUCTION: - rc = smblib_set_icl_reduction(chg, val->intval); - break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval); break; + case POWER_SUPPLY_PROP_CURRENT_MAX: + rc = smblib_set_icl_current(chg, val->intval); + break; default: pr_err("set prop %d is not supported\n", psp); rc = -EINVAL; @@ -1332,10 +1358,12 @@ static int smb2_configure_typec(struct smb_charger *chg) return rc; } - /* disable try.SINK mode */ - rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT, 0); + /* disable try.SINK mode and legacy cable IRQs */ + rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT | + TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN_BIT | + TYPEC_LEGACY_CABLE_INT_EN_BIT, 0); if (rc < 0) { - dev_err(chg->dev, "Couldn't set TRYSINK_MODE rc=%d\n", rc); + dev_err(chg->dev, "Couldn't set Type-C config rc=%d\n", rc); return rc; } @@ -1434,6 +1462,16 @@ static int smb2_init_hw(struct smb2 *chip) smblib_get_charge_param(chg, &chg->param.dc_icl, &chip->dt.dc_icl_ua); + if (chip->dt.min_freq_khz > 0) { + chg->param.freq_buck.min_u = chip->dt.min_freq_khz; + chg->param.freq_boost.min_u = chip->dt.min_freq_khz; + } + + if (chip->dt.max_freq_khz > 0) { + chg->param.freq_buck.max_u = chip->dt.max_freq_khz; + chg->param.freq_boost.max_u = chip->dt.max_freq_khz; + } + /* set a slower soft start setting for OTG */ rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG2_REG, ENG_SSUPPLY_IVREF_OTG_SS_MASK, OTG_SS_SLOW); @@ -1481,6 +1519,8 @@ static int smb2_init_hw(struct smb2 *chip) DEFAULT_VOTER, true, chip->dt.dc_icl_ua); vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER, true, 0); + vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, + true, 0); vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER, chip->dt.hvdcp_disable, 0); vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, @@ -1572,6 +1612,16 @@ static int smb2_init_hw(struct smb2 *chip) return rc; } + /* disable h/w autonomous parallel charging control */ + rc = smblib_masked_write(chg, MISC_CFG_REG, + STAT_PARALLEL_1400MA_EN_CFG_BIT, 0); + if (rc < 0) { + dev_err(chg->dev, + "Couldn't disable h/w autonomous parallel control rc=%d\n", + rc); + return rc; + } + /* configure float charger options */ switch (chip->dt.float_option) { case 1: @@ -2008,6 +2058,16 @@ static int smb2_request_interrupts(struct smb2 *chip) return rc; } +static void smb2_disable_interrupts(struct smb_charger *chg) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(smb2_irqs); i++) { + if (smb2_irqs[i].irq > 0) + disable_irq(smb2_irqs[i].irq); + } +} + #if defined(CONFIG_DEBUG_FS) static int force_batt_psy_update_write(void *data, u64 val) @@ -2219,7 +2279,7 @@ static int smb2_probe(struct platform_device *pdev) rc = smblib_get_prop_batt_health(chg, &val); if (rc < 0) { pr_err("Couldn't get batt health rc=%d\n", rc); - goto cleanup; + val.intval = POWER_SUPPLY_HEALTH_UNKNOWN; } batt_health = val.intval; @@ -2230,6 +2290,8 @@ static int smb2_probe(struct platform_device *pdev) } batt_charge_type = val.intval; + device_init_wakeup(chg->dev, true); + pr_info("QPNP SMB2 probed successfully usb:present=%d type=%d batt:present = %d health = %d charge = %d\n", usb_present, chg->usb_psy_desc.type, batt_present, batt_health, batt_charge_type); @@ -2268,6 +2330,9 @@ static void smb2_shutdown(struct platform_device *pdev) struct smb2 *chip = platform_get_drvdata(pdev); struct smb_charger *chg = &chip->chg; + /* disable all interrupts */ + smb2_disable_interrupts(chg); + /* configure power role for UFP */ smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, TYPEC_POWER_ROLE_CMD_MASK, UFP_EN_CMD_BIT); diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index c74b310bd470..1e417e8aa22d 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -543,30 +543,6 @@ static void smblib_rerun_apsd(struct smb_charger *chg) smblib_err(chg, "Couldn't re-run APSD rc=%d\n", rc); } -static int try_rerun_apsd_for_hvdcp(struct smb_charger *chg) -{ - const struct apsd_result *apsd_result; - - /* - * PD_INACTIVE_VOTER on hvdcp_disable_votable indicates whether - * apsd rerun was tried earlier - */ - if (get_client_vote(chg->hvdcp_disable_votable_indirect, - PD_INACTIVE_VOTER)) { - vote(chg->hvdcp_disable_votable_indirect, - PD_INACTIVE_VOTER, false, 0); - /* ensure hvdcp is enabled */ - if (!get_effective_result( - chg->hvdcp_disable_votable_indirect)) { - apsd_result = smblib_get_apsd_result(chg); - if (apsd_result->bit & (QC_2P0_BIT | QC_3P0_BIT)) { - smblib_rerun_apsd(chg); - } - } - } - return 0; -} - static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg) { const struct apsd_result *apsd_result = smblib_get_apsd_result(chg); @@ -654,10 +630,13 @@ static void smblib_uusb_removal(struct smb_charger *chg) { int rc; + cancel_delayed_work_sync(&chg->pl_enable_work); + vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0); + vote(chg->awake_votable, PL_DELAY_VOTER, false, 0); + /* reset both usbin current and voltage votes */ vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0); vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0); - vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0); cancel_delayed_work_sync(&chg->hvdcp_detect_work); @@ -682,6 +661,7 @@ static void smblib_uusb_removal(struct smb_charger *chg) chg->voltage_max_uv = MICRO_5V; chg->usb_icl_delta_ua = 0; chg->pulse_cnt = 0; + chg->uusb_apsd_rerun_done = false; /* clear USB ICL vote for USB_PSY_VOTER */ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0); @@ -693,13 +673,6 @@ static void smblib_uusb_removal(struct smb_charger *chg) if (rc < 0) smblib_err(chg, "Couldn't un-vote DCP from USB ICL rc=%d\n", rc); - - /* clear USB ICL vote for PL_USBIN_USBIN_VOTER */ - rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0); - if (rc < 0) - smblib_err(chg, - "Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n", - rc); } void smblib_suspend_on_debug_battery(struct smb_charger *chg) @@ -756,6 +729,7 @@ int smblib_rerun_apsd_if_required(struct smb_charger *chg) rc); } + chg->uusb_apsd_rerun_done = true; smblib_rerun_apsd(chg); return 0; @@ -795,29 +769,12 @@ static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count) return 0; } -/********************* - * VOTABLE CALLBACKS * - *********************/ - -static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data, - int suspend, const char *client) -{ - struct smb_charger *chg = data; - - /* resume input if suspend is invalid */ - if (suspend < 0) - suspend = 0; - - return smblib_set_dc_suspend(chg, (bool)suspend); -} - #define USBIN_25MA 25000 #define USBIN_100MA 100000 #define USBIN_150MA 150000 #define USBIN_500MA 500000 #define USBIN_900MA 900000 - static int set_sdp_current(struct smb_charger *chg, int icl_ua) { int rc; @@ -856,20 +813,18 @@ static int set_sdp_current(struct smb_charger *chg, int icl_ua) return rc; } -static int smblib_usb_icl_vote_callback(struct votable *votable, void *data, - int icl_ua, const char *client) +int smblib_set_icl_current(struct smb_charger *chg, int icl_ua) { - struct smb_charger *chg = data; int rc = 0; bool override; union power_supply_propval pval; /* suspend and return if 25mA or less is requested */ - if (client && (icl_ua < USBIN_25MA)) + if (icl_ua < USBIN_25MA) return smblib_set_usb_suspend(chg, true); disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq); - if (!client) + if (icl_ua == INT_MAX) goto override_suspend_config; rc = smblib_get_prop_typec_mode(chg, &pval); @@ -887,8 +842,7 @@ static int smblib_usb_icl_vote_callback(struct votable *votable, void *data, goto enable_icl_changed_interrupt; } } else { - rc = smblib_set_charge_param(chg, &chg->param.usb_icl, - icl_ua - chg->icl_reduction_ua); + rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua); if (rc < 0) { smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc); goto enable_icl_changed_interrupt; @@ -898,7 +852,7 @@ static int smblib_usb_icl_vote_callback(struct votable *votable, void *data, override_suspend_config: /* determine if override needs to be enforced */ override = true; - if (client == NULL) { + if (icl_ua == INT_MAX) { /* remove override if no voters - hw defaults is desired */ override = false; } else if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) { @@ -906,7 +860,7 @@ override_suspend_config: /* For std cable with type = SDP never override */ override = false; else if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_CDP - && icl_ua - chg->icl_reduction_ua == 1500000) + && icl_ua == 1500000) /* * For std cable with type = CDP override only if * current is not 1500mA @@ -936,6 +890,22 @@ enable_icl_changed_interrupt: return rc; } +/********************* + * VOTABLE CALLBACKS * + *********************/ + +static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data, + int suspend, const char *client) +{ + struct smb_charger *chg = data; + + /* resume input if suspend is invalid */ + if (suspend < 0) + suspend = 0; + + return smblib_set_dc_suspend(chg, (bool)suspend); +} + static int smblib_dc_icl_vote_callback(struct votable *votable, void *data, int icl_ua, const char *client) { @@ -1029,6 +999,7 @@ static int smblib_hvdcp_enable_vote_callback(struct votable *votable, struct smb_charger *chg = data; int rc; u8 val = HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT; + u8 stat; /* vote to enable/disable HW autonomous INOV */ vote(chg->hvdcp_hw_inov_dis_votable, client, !hvdcp_enable, 0); @@ -1050,6 +1021,16 @@ static int smblib_hvdcp_enable_vote_callback(struct votable *votable, return rc; } + rc = smblib_read(chg, APSD_STATUS_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read APSD status rc=%d\n", rc); + return rc; + } + + /* re-run APSD if HVDCP was detected */ + if (stat & QC_CHARGER_BIT) + smblib_rerun_apsd(chg); + return 0; } @@ -1143,6 +1124,22 @@ static int smblib_usb_irq_enable_vote_callback(struct votable *votable, return 0; } +static int smblib_typec_irq_disable_vote_callback(struct votable *votable, + void *data, int disable, const char *client) +{ + struct smb_charger *chg = data; + + if (!chg->irq_info[TYPE_C_CHANGE_IRQ].irq) + return 0; + + if (disable) + disable_irq_nosync(chg->irq_info[TYPE_C_CHANGE_IRQ].irq); + else + enable_irq(chg->irq_info[TYPE_C_CHANGE_IRQ].irq); + + return 0; +} + /******************* * VCONN REGULATOR * * *****************/ @@ -1151,7 +1148,7 @@ static int smblib_usb_irq_enable_vote_callback(struct votable *votable, static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev) { struct smb_charger *chg = rdev_get_drvdata(rdev); - u8 otg_stat, stat4; + u8 otg_stat, val; int rc = 0, i; if (!chg->external_vconn) { @@ -1182,17 +1179,12 @@ static int _smblib_vconn_regulator_enable(struct regulator_dev *rdev) * VCONN_EN_ORIENTATION is overloaded with overriding the CC pin used * for Vconn, and it should be set with reverse polarity of CC_OUT. */ - rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4); - if (rc < 0) { - smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc); - return rc; - } - smblib_dbg(chg, PR_OTG, "enabling VCONN\n"); - stat4 = stat4 & CC_ORIENTATION_BIT ? 0 : VCONN_EN_ORIENTATION_BIT; + val = chg->typec_status[3] & + CC_ORIENTATION_BIT ? 0 : VCONN_EN_ORIENTATION_BIT; rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, VCONN_EN_VALUE_BIT | VCONN_EN_ORIENTATION_BIT, - VCONN_EN_VALUE_BIT | stat4); + VCONN_EN_VALUE_BIT | val); if (rc < 0) { smblib_err(chg, "Couldn't enable vconn setting rc=%d\n", rc); return rc; @@ -1540,6 +1532,21 @@ int smblib_get_prop_batt_status(struct smb_charger *chg, break; } + if (val->intval != POWER_SUPPLY_STATUS_CHARGING) + return 0; + + rc = smblib_read(chg, BATTERY_CHARGER_STATUS_7_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n", + rc); + return rc; + } + + stat &= ENABLE_TRICKLE_BIT | ENABLE_PRE_CHARGING_BIT | + ENABLE_FAST_CHARGING_BIT | ENABLE_FULLON_MODE_BIT; + if (!stat) + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + return 0; } @@ -1793,6 +1800,10 @@ int smblib_set_prop_system_temp_level(struct smb_charger *chg, return -EINVAL; chg->system_temp_level = val->intval; + /* disable parallel charge in case of system temp level */ + vote(chg->pl_disable_votable, THERMAL_DAEMON_VOTER, + chg->system_temp_level ? true : false, 0); + if (chg->system_temp_level == chg->thermal_levels) return vote(chg->chg_disable_votable, THERMAL_DAEMON_VOTER, true, 0); @@ -2027,7 +2038,7 @@ int smblib_get_prop_usb_online(struct smb_charger *chg, int rc = 0; u8 stat; - if (get_client_vote(chg->usb_icl_votable, USER_VOTER) == 0) { + if (get_client_vote_locked(chg->usb_icl_votable, USER_VOTER) == 0) { val->intval = false; return rc; } @@ -2136,23 +2147,13 @@ int smblib_get_prop_charger_temp_max(struct smb_charger *chg, int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg, union power_supply_propval *val) { - int rc = 0; - u8 stat; - - rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat); - if (rc < 0) { - smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc); - return rc; - } - smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", - stat); - - if (stat & CC_ATTACHED_BIT) - val->intval = (bool)(stat & CC_ORIENTATION_BIT) + 1; + if (chg->typec_status[3] & CC_ATTACHED_BIT) + val->intval = + (bool)(chg->typec_status[3] & CC_ORIENTATION_BIT) + 1; else val->intval = 0; - return rc; + return 0; } static const char * const smblib_typec_mode_name[] = { @@ -2170,17 +2171,7 @@ static const char * const smblib_typec_mode_name[] = { static int smblib_get_prop_ufp_mode(struct smb_charger *chg) { - int rc; - u8 stat; - - rc = smblib_read(chg, TYPE_C_STATUS_1_REG, &stat); - if (rc < 0) { - smblib_err(chg, "Couldn't read TYPE_C_STATUS_1 rc=%d\n", rc); - return POWER_SUPPLY_TYPEC_NONE; - } - smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_1 = 0x%02x\n", stat); - - switch (stat) { + switch (chg->typec_status[0]) { case 0: return POWER_SUPPLY_TYPEC_NONE; case UFP_TYPEC_RDSTD_BIT: @@ -2198,17 +2189,7 @@ static int smblib_get_prop_ufp_mode(struct smb_charger *chg) static int smblib_get_prop_dfp_mode(struct smb_charger *chg) { - int rc; - u8 stat; - - rc = smblib_read(chg, TYPE_C_STATUS_2_REG, &stat); - if (rc < 0) { - smblib_err(chg, "Couldn't read TYPE_C_STATUS_2 rc=%d\n", rc); - return POWER_SUPPLY_TYPEC_NONE; - } - smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_2 = 0x%02x\n", stat); - - switch (stat & DFP_TYPEC_MASK) { + switch (chg->typec_status[1] & DFP_TYPEC_MASK) { case DFP_RA_RA_BIT: return POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER; case DFP_RD_RD_BIT: @@ -2229,28 +2210,17 @@ static int smblib_get_prop_dfp_mode(struct smb_charger *chg) int smblib_get_prop_typec_mode(struct smb_charger *chg, union power_supply_propval *val) { - int rc; - u8 stat; - - rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat); - if (rc < 0) { - smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc); - val->intval = POWER_SUPPLY_TYPEC_NONE; - return rc; - } - smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat); - - if (!(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT)) { + if (!(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT)) { val->intval = POWER_SUPPLY_TYPEC_NONE; - return rc; + return 0; } - if (stat & UFP_DFP_MODE_STATUS_BIT) + if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT) val->intval = smblib_get_prop_dfp_mode(chg); else val->intval = smblib_get_prop_ufp_mode(chg); - return rc; + return 0; } int smblib_get_prop_typec_power_role(struct smb_charger *chg, @@ -2342,16 +2312,7 @@ int smblib_get_prop_input_voltage_settled(struct smb_charger *chg, int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg, union power_supply_propval *val) { - int rc; - u8 ctrl; - - rc = smblib_read(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, &ctrl); - if (rc < 0) { - smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG rc=%d\n", - rc); - return rc; - } - val->intval = ctrl & EXIT_SNK_BASED_ON_CC_BIT; + val->intval = chg->pd_hard_reset; return 0; } @@ -2521,10 +2482,6 @@ int smblib_set_prop_usb_voltage_min(struct smb_charger *chg, return rc; } - if (chg->mode == PARALLEL_MASTER) - vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, - min_uv > MICRO_5V, 0); - chg->voltage_min_uv = min_uv; return rc; } @@ -2551,53 +2508,60 @@ int smblib_set_prop_pd_active(struct smb_charger *chg, const union power_supply_propval *val) { int rc; - u8 stat = 0; - bool cc_debounced; - bool orientation; - bool pd_active = val->intval; + bool orientation, cc_debounced, sink_attached, hvdcp; + u8 stat; - if (!get_effective_result(chg->pd_allowed_votable)) { - smblib_err(chg, "PD is not allowed\n"); + if (!get_effective_result(chg->pd_allowed_votable)) return -EINVAL; - } - - vote(chg->apsd_disable_votable, PD_VOTER, pd_active, 0); - vote(chg->pd_allowed_votable, PD_VOTER, pd_active, 0); - vote(chg->usb_irq_enable_votable, PD_VOTER, pd_active, 0); - /* - * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2 line - * when TYPEC_SPARE_CFG_BIT (CC pin selection s/w override) is set - * or when VCONN_EN_VALUE_BIT is set. - */ - rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat); + rc = smblib_read(chg, APSD_STATUS_REG, &stat); if (rc < 0) { - smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc); + smblib_err(chg, "Couldn't read APSD status rc=%d\n", rc); return rc; } - if (pd_active) { - orientation = stat & CC_ORIENTATION_BIT; + cc_debounced = (bool) + (chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT); + sink_attached = (bool) + (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT); + hvdcp = stat & QC_CHARGER_BIT; + + chg->pd_active = val->intval; + if (chg->pd_active) { + vote(chg->apsd_disable_votable, PD_VOTER, true, 0); + vote(chg->pd_allowed_votable, PD_VOTER, true, 0); + vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0); + + /* + * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2 + * line when TYPEC_SPARE_CFG_BIT (CC pin selection s/w override) + * is set or when VCONN_EN_VALUE_BIT is set. + */ + orientation = chg->typec_status[3] & CC_ORIENTATION_BIT; rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, VCONN_EN_ORIENTATION_BIT, orientation ? 0 : VCONN_EN_ORIENTATION_BIT); - if (rc < 0) { + if (rc < 0) smblib_err(chg, "Couldn't enable vconn on CC line rc=%d\n", rc); - return rc; - } + + /* SW controlled CC_OUT */ + rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG, + TYPEC_SPARE_CFG_BIT, TYPEC_SPARE_CFG_BIT); + if (rc < 0) + smblib_err(chg, "Couldn't enable SW cc_out rc=%d\n", + rc); + /* * Enforce 500mA for PD until the real vote comes in later. * It is guaranteed that pd_active is set prior to * pd_current_max */ rc = vote(chg->usb_icl_votable, PD_VOTER, true, USBIN_500MA); - if (rc < 0) { + if (rc < 0) smblib_err(chg, "Couldn't vote for USB ICL rc=%d\n", - rc); - return rc; - } + rc); /* since PD was found the cable must be non-legacy */ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0); @@ -2605,53 +2569,40 @@ int smblib_set_prop_pd_active(struct smb_charger *chg, /* clear USB ICL vote for DCP_VOTER */ rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0); if (rc < 0) - smblib_err(chg, - "Couldn't un-vote DCP from USB ICL rc=%d\n", - rc); - - /* clear USB ICL vote for PL_USBIN_USBIN_VOTER */ - rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0); - if (rc < 0) - smblib_err(chg, - "Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n", - rc); + smblib_err(chg, "Couldn't un-vote DCP from USB ICL rc=%d\n", + rc); /* remove USB_PSY_VOTER */ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0); - if (rc < 0) { + if (rc < 0) smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc); - return rc; - } + } else { + vote(chg->apsd_disable_votable, PD_VOTER, false, 0); + vote(chg->pd_allowed_votable, PD_VOTER, true, 0); + vote(chg->usb_irq_enable_votable, PD_VOTER, true, 0); + vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER, + false, 0); - /* pd active set, parallel charger can be enabled now */ - rc = vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, - false, 0); - if (rc < 0) { - smblib_err(chg, - "Couldn't unvote PL_DELAY_HVDCP_VOTER rc=%d\n", - rc); - return rc; - } - } + /* HW controlled CC_OUT */ + rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG, + TYPEC_SPARE_CFG_BIT, 0); + if (rc < 0) + smblib_err(chg, "Couldn't enable HW cc_out rc=%d\n", + rc); - /* CC pin selection s/w override in PD session; h/w otherwise. */ - rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG, - TYPEC_SPARE_CFG_BIT, - pd_active ? TYPEC_SPARE_CFG_BIT : 0); - if (rc < 0) { - smblib_err(chg, "Couldn't change cc_out ctrl to %s rc=%d\n", - pd_active ? "SW" : "HW", rc); - return rc; + /* + * This WA should only run for HVDCP. Non-legacy SDP/CDP could + * draw more, but this WA will remove Rd causing VBUS to drop, + * and data could be interrupted. Non-legacy DCP could also draw + * more, but it may impact compliance. + */ + if (!chg->typec_legacy_valid && cc_debounced && + !sink_attached && hvdcp) + schedule_work(&chg->legacy_detection_work); } - cc_debounced = (bool)(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT); - if (!pd_active && cc_debounced) - try_rerun_apsd_for_hvdcp(chg); - - chg->pd_active = pd_active; smblib_update_usb_type(chg); power_supply_changed(chg->usb_psy); - return rc; } @@ -2754,88 +2705,70 @@ static struct reg_info cc2_detach_settings[] = { static int smblib_cc2_sink_removal_enter(struct smb_charger *chg) { - int rc = 0; - union power_supply_propval cc2_val = {0, }; + int rc, ccout, ufp_mode; + u8 stat; if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0) - return rc; + return 0; - if (chg->cc2_sink_detach_flag != CC2_SINK_NONE) - return rc; + if (chg->cc2_detach_wa_active) + return 0; - rc = smblib_get_prop_typec_cc_orientation(chg, &cc2_val); + rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat); if (rc < 0) { - smblib_err(chg, "Couldn't get cc orientation rc=%d\n", rc); + smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc); return rc; } - if (cc2_val.intval == 1) - return rc; + ccout = (stat & CC_ATTACHED_BIT) ? + (!!(stat & CC_ORIENTATION_BIT) + 1) : 0; + ufp_mode = (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT) ? + !(stat & UFP_DFP_MODE_STATUS_BIT) : 0; - rc = smblib_get_prop_typec_mode(chg, &cc2_val); - if (rc < 0) { - smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc); - return rc; - } + if (ccout != 2) + return 0; - switch (cc2_val.intval) { - case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT: - smblib_reg_block_update(chg, cc2_detach_settings); - chg->cc2_sink_detach_flag = CC2_SINK_STD; - schedule_work(&chg->rdstd_cc2_detach_work); - break; - case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM: - case POWER_SUPPLY_TYPEC_SOURCE_HIGH: - chg->cc2_sink_detach_flag = CC2_SINK_MEDIUM_HIGH; - break; - default: - break; - } + if (!ufp_mode) + return 0; + chg->cc2_detach_wa_active = true; + /* The CC2 removal WA will cause a type-c-change IRQ storm */ + smblib_reg_block_update(chg, cc2_detach_settings); + schedule_work(&chg->rdstd_cc2_detach_work); return rc; } static int smblib_cc2_sink_removal_exit(struct smb_charger *chg) { - int rc = 0; - if ((chg->wa_flags & TYPEC_CC2_REMOVAL_WA_BIT) == 0) - return rc; - - if (chg->cc2_sink_detach_flag == CC2_SINK_STD) { - cancel_work_sync(&chg->rdstd_cc2_detach_work); - smblib_reg_block_restore(chg, cc2_detach_settings); - } + return 0; - chg->cc2_sink_detach_flag = CC2_SINK_NONE; + if (!chg->cc2_detach_wa_active) + return 0; - return rc; + chg->cc2_detach_wa_active = false; + cancel_work_sync(&chg->rdstd_cc2_detach_work); + smblib_reg_block_restore(chg, cc2_detach_settings); + return 0; } int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg, const union power_supply_propval *val) { - int rc; + int rc = 0; - rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, - EXIT_SNK_BASED_ON_CC_BIT, - (val->intval) ? EXIT_SNK_BASED_ON_CC_BIT : 0); - if (rc < 0) { - smblib_err(chg, "Could not set EXIT_SNK_BASED_ON_CC rc=%d\n", - rc); + if (chg->pd_hard_reset == val->intval) return rc; - } - vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, val->intval, 0); - - if (val->intval) - rc = smblib_cc2_sink_removal_enter(chg); - else - rc = smblib_cc2_sink_removal_exit(chg); + chg->pd_hard_reset = val->intval; + rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, + EXIT_SNK_BASED_ON_CC_BIT, + (chg->pd_hard_reset) ? EXIT_SNK_BASED_ON_CC_BIT : 0); + if (rc < 0) + smblib_err(chg, "Couldn't set EXIT_SNK_BASED_ON_CC rc=%d\n", + rc); - if (rc < 0) { - smblib_err(chg, "Could not detect cc2 removal rc=%d\n", rc); - return rc; - } + vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, + chg->pd_hard_reset, 0); return rc; } @@ -2880,15 +2813,21 @@ int smblib_get_prop_fcc_delta(struct smb_charger *chg, #define TYPEC_DEFAULT_CURRENT_MA 900000 #define TYPEC_MEDIUM_CURRENT_MA 1500000 #define TYPEC_HIGH_CURRENT_MA 3000000 -static int smblib_get_charge_current(struct smb_charger *chg, +int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua) { const struct apsd_result *apsd_result = smblib_update_usb_type(chg); union power_supply_propval val = {0, }; - int rc, typec_source_rd, current_ua; + int rc = 0, typec_source_rd, current_ua; bool non_compliant; u8 stat5; + if (chg->pd_active) { + *total_current_ua = + get_client_vote_locked(chg->usb_icl_votable, PD_VOTER); + return rc; + } + rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5); if (rc < 0) { smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc); @@ -2963,39 +2902,12 @@ static int smblib_get_charge_current(struct smb_charger *chg, return 0; } -int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua) -{ - int current_ua, rc; - - if (reduction_ua == 0) { - vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0); - } else { - /* - * No usb_icl voter means we are defaulting to hw chosen - * max limit. We need a vote from s/w to enforce the reduction. - */ - if (get_effective_result(chg->usb_icl_votable) == -EINVAL) { - rc = smblib_get_charge_current(chg, ¤t_ua); - if (rc < 0) { - pr_err("Failed to get ICL rc=%d\n", rc); - return rc; - } - vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, true, - current_ua); - } - } - - chg->icl_reduction_ua = reduction_ua; - - return rerun_election(chg->usb_icl_votable); -} - /************************ * PARALLEL PSY GETTERS * ************************/ int smblib_get_prop_slave_current_now(struct smb_charger *chg, - union power_supply_propval *pval) + union power_supply_propval *pval) { if (IS_ERR_OR_NULL(chg->iio.batt_i_chan)) chg->iio.batt_i_chan = iio_channel_get(chg->dev, "batt_i"); @@ -3058,7 +2970,7 @@ irqreturn_t smblib_handle_chg_state_change(int irq, void *data) rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat); if (rc < 0) { smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n", - rc); + rc); return IRQ_HANDLED; } @@ -3163,24 +3075,43 @@ irqreturn_t smblib_handle_usbin_uv(int irq, void *data) return IRQ_HANDLED; } -irqreturn_t smblib_handle_usb_plugin(int irq, void *data) +static void smblib_micro_usb_plugin(struct smb_charger *chg, bool vbus_rising) +{ + if (vbus_rising) { + /* use the typec flag even though its not typec */ + chg->typec_present = 1; + } else { + chg->typec_present = 0; + smblib_update_usb_type(chg); + extcon_set_cable_state_(chg->extcon, EXTCON_USB, false); + smblib_uusb_removal(chg); + } +} + +static void smblib_typec_usb_plugin(struct smb_charger *chg, bool vbus_rising) +{ + if (vbus_rising) + smblib_cc2_sink_removal_exit(chg); + else + smblib_cc2_sink_removal_enter(chg); +} + +#define PL_DELAY_MS 30000 +void smblib_usb_plugin_locked(struct smb_charger *chg) { - struct smb_irq_data *irq_data = data; - struct smb_charger *chg = irq_data->parent_data; int rc; u8 stat; bool vbus_rising; rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat); if (rc < 0) { - dev_err(chg->dev, "Couldn't read USB_INT_RT_STS rc=%d\n", rc); - return IRQ_HANDLED; + smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc); + return; } vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT); - smblib_set_opt_freq_buck(chg, - vbus_rising ? chg->chg_freq.freq_5V : - chg->chg_freq.freq_removal); + smblib_set_opt_freq_buck(chg, vbus_rising ? chg->chg_freq.freq_5V : + chg->chg_freq.freq_removal); /* fetch the DPDM regulator */ if (!chg->dpdm_reg && of_get_property(chg->dev->of_node, @@ -3201,6 +3132,11 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data) smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n", rc); } + + /* Schedule work to enable parallel charger */ + vote(chg->awake_votable, PL_DELAY_VOTER, true, 0); + schedule_delayed_work(&chg->pl_enable_work, + msecs_to_jiffies(PL_DELAY_MS)); } else { if (chg->wa_flags & BOOST_BACK_WA) vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0); @@ -3212,17 +3148,26 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data) smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n", rc); } - - if (chg->micro_usb_mode) { - smblib_update_usb_type(chg); - extcon_set_cable_state_(chg->extcon, EXTCON_USB, false); - smblib_uusb_removal(chg); - } } + if (chg->micro_usb_mode) + smblib_micro_usb_plugin(chg, vbus_rising); + else + smblib_typec_usb_plugin(chg, vbus_rising); + power_supply_changed(chg->usb_psy); - smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s %s\n", - irq_data->name, vbus_rising ? "attached" : "detached"); + smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n", + vbus_rising ? "attached" : "detached"); +} + +irqreturn_t smblib_handle_usb_plugin(int irq, void *data) +{ + struct smb_irq_data *irq_data = data; + struct smb_charger *chg = irq_data->parent_data; + + mutex_lock(&chg->lock); + smblib_usb_plugin_locked(chg); + mutex_unlock(&chg->lock); return IRQ_HANDLED; } @@ -3378,9 +3323,6 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg, } } - /* QC authentication done, parallel charger can be enabled now */ - vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0); - smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n", apsd_result->name); } @@ -3394,9 +3336,6 @@ static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg, if (rising) { vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, false, 0); - if (get_effective_result(chg->pd_disallowed_votable_indirect)) - /* could be a legacy cable, try doing hvdcp */ - try_rerun_apsd_for_hvdcp(chg); /* enable HDC and ICL irq for QC2/3 charger */ if (qc_charger) @@ -3410,15 +3349,6 @@ static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg, /* enforce DCP ICL if specified */ vote(chg->usb_icl_votable, DCP_VOTER, chg->dcp_icl_ua != -EINVAL, chg->dcp_icl_ua); - /* - * If adapter is not QC2.0/QC3.0 remove vote for parallel - * disable. - * Otherwise if adapter is QC2.0/QC3.0 wait for authentication - * to complete. - */ - if (!qc_charger) - vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, - false, 0); } smblib_dbg(chg, PR_INTERRUPT, "IRQ: smblib_handle_hvdcp_check_timeout %s\n", @@ -3440,6 +3370,10 @@ static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg, static void smblib_force_legacy_icl(struct smb_charger *chg, int pst) { + /* while PD is active it should have complete ICL control */ + if (chg->pd_active) + return; + switch (pst) { case POWER_SUPPLY_TYPE_USB: /* @@ -3479,7 +3413,7 @@ static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising) apsd_result = smblib_update_usb_type(chg); - if (!chg->pd_active) + if (!chg->typec_legacy_valid) smblib_force_legacy_icl(chg, apsd_result->pst); switch (apsd_result->bit) { @@ -3490,13 +3424,9 @@ static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising) true); case OCP_CHARGER_BIT: case FLOAT_CHARGER_BIT: - /* - * if not DCP then no hvdcp timeout happens. Enable - * pd/parallel here. - */ + /* if not DCP then no hvdcp timeout happens, Enable pd here. */ vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, false, 0); - vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0); break; case DCP_CHARGER_BIT: if (chg->wa_flags & QC_CHARGER_DETECTION_WA_BIT) @@ -3525,6 +3455,17 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data) } smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat); + if (chg->micro_usb_mode && (stat & APSD_DTC_STATUS_DONE_BIT) + && !chg->uusb_apsd_rerun_done) { + /* + * Force re-run APSD to handle slow insertion related + * charger-mis-detection. + */ + chg->uusb_apsd_rerun_done = true; + smblib_rerun_apsd(chg); + return IRQ_HANDLED; + } + smblib_handle_apsd_done(chg, (bool)(stat & APSD_DTC_STATUS_DONE_BIT)); @@ -3558,77 +3499,6 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data) return IRQ_HANDLED; } -static void typec_source_removal(struct smb_charger *chg) -{ - int rc; - - /* reset legacy unknown vote */ - vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0); - - /* reset both usbin current and voltage votes */ - vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0); - vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0); - - cancel_delayed_work_sync(&chg->hvdcp_detect_work); - - if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) { - /* re-enable AUTH_IRQ_EN_CFG_BIT */ - rc = smblib_masked_write(chg, - USBIN_SOURCE_CHANGE_INTRPT_ENB_REG, - AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT); - if (rc < 0) - smblib_err(chg, - "Couldn't enable QC auth setting rc=%d\n", rc); - } - - /* reconfigure allowed voltage for HVDCP */ - rc = smblib_set_adapter_allowance(chg, - USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V); - if (rc < 0) - smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n", - rc); - - chg->voltage_min_uv = MICRO_5V; - chg->voltage_max_uv = MICRO_5V; - - /* clear USB ICL vote for PD_VOTER */ - rc = vote(chg->usb_icl_votable, PD_VOTER, false, 0); - if (rc < 0) - smblib_err(chg, "Couldn't un-vote PD from USB ICL rc=%d\n", rc); - - /* clear USB ICL vote for USB_PSY_VOTER */ - rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0); - if (rc < 0) - smblib_err(chg, - "Couldn't un-vote USB_PSY from USB ICL rc=%d\n", rc); - - /* clear USB ICL vote for DCP_VOTER */ - rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0); - if (rc < 0) - smblib_err(chg, - "Couldn't un-vote DCP from USB ICL rc=%d\n", rc); - - /* clear USB ICL vote for PL_USBIN_USBIN_VOTER */ - rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0); - if (rc < 0) - smblib_err(chg, - "Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n", - rc); -} - -static void typec_source_insertion(struct smb_charger *chg) -{ - /* - * at any time we want LEGACY_UNKNOWN, PD, or USB_PSY to be voting for - * ICL, so vote LEGACY_UNKNOWN here if none of the above three have - * casted their votes - */ - if (!is_client_vote_enabled(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER) - && !is_client_vote_enabled(chg->usb_icl_votable, PD_VOTER) - && !is_client_vote_enabled(chg->usb_icl_votable, USB_PSY_VOTER)) - vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000); -} - static void typec_sink_insertion(struct smb_charger *chg) { /* when a sink is inserted we should not wait on hvdcp timeout to @@ -3649,27 +3519,50 @@ static void smblib_handle_typec_removal(struct smb_charger *chg) { int rc; + chg->cc2_detach_wa_active = false; + + /* reset APSD voters */ + vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0); + vote(chg->apsd_disable_votable, PD_VOTER, false, 0); + + cancel_delayed_work_sync(&chg->pl_enable_work); + cancel_delayed_work_sync(&chg->hvdcp_detect_work); + + /* reset input current limit voters */ + vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000); + vote(chg->usb_icl_votable, PD_VOTER, false, 0); + vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0); + vote(chg->usb_icl_votable, DCP_VOTER, false, 0); + vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0); + + /* reset hvdcp voters */ + vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0); + vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER, true, 0); + + /* reset power delivery voters */ + vote(chg->pd_allowed_votable, PD_VOTER, false, 0); vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0); vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, true, 0); - vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0); + + /* reset usb irq voters */ vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0); vote(chg->usb_irq_enable_votable, QC_VOTER, false, 0); - /* reset votes from vbus_cc_short */ - vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, - true, 0); - vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER, - true, 0); - /* - * cable could be removed during hard reset, remove its vote to - * disable apsd - */ - vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0); + /* reset parallel voters */ + vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0); + vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0); + vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0); + vote(chg->awake_votable, PL_DELAY_VOTER, false, 0); chg->vconn_attempts = 0; chg->otg_attempts = 0; chg->pulse_cnt = 0; chg->usb_icl_delta_ua = 0; + chg->voltage_min_uv = MICRO_5V; + chg->voltage_max_uv = MICRO_5V; + chg->pd_active = 0; + chg->pd_hard_reset = 0; + chg->typec_legacy_valid = false; /* enable APSD CC trigger for next insertion */ rc = smblib_masked_write(chg, TYPE_C_CFG_REG, @@ -3677,15 +3570,48 @@ static void smblib_handle_typec_removal(struct smb_charger *chg) if (rc < 0) smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n", rc); - smblib_update_usb_type(chg); - typec_source_removal(chg); + if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) { + /* re-enable AUTH_IRQ_EN_CFG_BIT */ + rc = smblib_masked_write(chg, + USBIN_SOURCE_CHANGE_INTRPT_ENB_REG, + AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT); + if (rc < 0) + smblib_err(chg, + "Couldn't enable QC auth setting rc=%d\n", rc); + } + + /* reconfigure allowed voltage for HVDCP */ + rc = smblib_set_adapter_allowance(chg, + USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V); + if (rc < 0) + smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n", + rc); + + /* enable DRP */ + rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, + TYPEC_POWER_ROLE_CMD_MASK, 0); + if (rc < 0) + smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc); + + /* HW controlled CC_OUT */ + rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG, + TYPEC_SPARE_CFG_BIT, 0); + if (rc < 0) + smblib_err(chg, "Couldn't enable HW cc_out rc=%d\n", rc); + + /* restore crude sensor */ + rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5); + if (rc < 0) + smblib_err(chg, "Couldn't restore crude sensor rc=%d\n", rc); + typec_sink_removal(chg); + smblib_update_usb_type(chg); } static void smblib_handle_typec_insertion(struct smb_charger *chg, - bool sink_attached, bool legacy_cable) + bool sink_attached) { - int rp, rc; + int rc; vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, false, 0); @@ -3695,61 +3621,36 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg, smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n", rc); - if (sink_attached) { - typec_source_removal(chg); + if (sink_attached) typec_sink_insertion(chg); - } else { - typec_source_insertion(chg); + else typec_sink_removal(chg); - } - - rp = smblib_get_prop_ufp_mode(chg); - if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH - || rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) { - smblib_dbg(chg, PR_MISC, "VBUS & CC could be shorted; keeping HVDCP disabled\n"); - /* HVDCP is not going to be enabled; enable parallel */ - vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0); - vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, - true, 0); - } else { - vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, - false, 0); - } } static void smblib_handle_typec_debounce_done(struct smb_charger *chg, - bool rising, bool sink_attached, bool legacy_cable) + bool rising, bool sink_attached) { int rc; union power_supply_propval pval = {0, }; - if (rising) - smblib_handle_typec_insertion(chg, sink_attached, legacy_cable); - else - smblib_handle_typec_removal(chg); + if (rising) { + if (!chg->typec_present) { + chg->typec_present = true; + smblib_dbg(chg, PR_MISC, "TypeC insertion\n"); + smblib_handle_typec_insertion(chg, sink_attached); + } + } else { + if (chg->typec_present) { + chg->typec_present = false; + smblib_dbg(chg, PR_MISC, "TypeC removal\n"); + smblib_handle_typec_removal(chg); + } + } rc = smblib_get_prop_typec_mode(chg, &pval); if (rc < 0) smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc); - /* - * HW BUG - after cable is removed, medium or high rd reading - * falls to std. Use it for signal of typec cc detachment in - * software WA. - */ - if (chg->cc2_sink_detach_flag == CC2_SINK_MEDIUM_HIGH - && pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) { - - chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE; - - rc = smblib_masked_write(chg, - TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, - EXIT_SNK_BASED_ON_CC_BIT, 0); - if (rc < 0) - smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", - rc); - } - smblib_dbg(chg, PR_INTERRUPT, "IRQ: debounce-done %s; Type-C %s detected\n", rising ? "rising" : "falling", smblib_typec_mode_name[pval.intval]); @@ -3775,50 +3676,54 @@ irqreturn_t smblib_handle_usb_typec_change_for_uusb(struct smb_charger *chg) return IRQ_HANDLED; } -irqreturn_t smblib_handle_usb_typec_change(int irq, void *data) +static void smblib_usb_typec_change(struct smb_charger *chg) { - struct smb_irq_data *irq_data = data; - struct smb_charger *chg = irq_data->parent_data; int rc; - u8 stat4, stat5; - bool debounce_done, sink_attached, legacy_cable; - - if (chg->micro_usb_mode) - return smblib_handle_usb_typec_change_for_uusb(chg); - - /* WA - not when PD hard_reset WIP on cc2 in sink mode */ - if (chg->cc2_sink_detach_flag == CC2_SINK_STD) - return IRQ_HANDLED; - - rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4); - if (rc < 0) { - smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc); - return IRQ_HANDLED; - } + bool debounce_done, sink_attached; - rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5); + rc = smblib_multibyte_read(chg, TYPE_C_STATUS_1_REG, + chg->typec_status, 5); if (rc < 0) { - smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc); - return IRQ_HANDLED; + smblib_err(chg, "Couldn't cache USB Type-C status rc=%d\n", rc); + return; } - debounce_done = (bool)(stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT); - sink_attached = (bool)(stat4 & UFP_DFP_MODE_STATUS_BIT); - legacy_cable = (bool)(stat5 & TYPEC_LEGACY_CABLE_STATUS_BIT); + debounce_done = + (bool)(chg->typec_status[3] & TYPEC_DEBOUNCE_DONE_STATUS_BIT); + sink_attached = + (bool)(chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT); - smblib_handle_typec_debounce_done(chg, - debounce_done, sink_attached, legacy_cable); + smblib_handle_typec_debounce_done(chg, debounce_done, sink_attached); - if (stat4 & TYPEC_VBUS_ERROR_STATUS_BIT) - smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s vbus-error\n", - irq_data->name); + if (chg->typec_status[3] & TYPEC_VBUS_ERROR_STATUS_BIT) + smblib_dbg(chg, PR_INTERRUPT, "IRQ: vbus-error\n"); - if (stat4 & TYPEC_VCONN_OVERCURR_STATUS_BIT) + if (chg->typec_status[3] & TYPEC_VCONN_OVERCURR_STATUS_BIT) schedule_work(&chg->vconn_oc_work); power_supply_changed(chg->usb_psy); - smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat4); - smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_5 = 0x%02x\n", stat5); +} + +irqreturn_t smblib_handle_usb_typec_change(int irq, void *data) +{ + struct smb_irq_data *irq_data = data; + struct smb_charger *chg = irq_data->parent_data; + + if (chg->micro_usb_mode) { + smblib_handle_usb_typec_change_for_uusb(chg); + return IRQ_HANDLED; + } + + if (chg->cc2_detach_wa_active || chg->typec_en_dis_active) { + smblib_dbg(chg, PR_INTERRUPT, "Ignoring since %s active\n", + chg->cc2_detach_wa_active ? + "cc2_detach_wa" : "typec_en_dis"); + return IRQ_HANDLED; + } + + mutex_lock(&chg->lock); + smblib_usb_typec_change(chg); + mutex_unlock(&chg->lock); return IRQ_HANDLED; } @@ -3846,7 +3751,7 @@ irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data) { struct smb_irq_data *irq_data = data; struct smb_charger *chg = irq_data->parent_data; - int rc; + int rc, usb_icl; u8 stat; if (!(chg->wa_flags & BOOST_BACK_WA)) @@ -3858,8 +3763,9 @@ irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data) return IRQ_HANDLED; } - if ((stat & USE_USBIN_BIT) && - get_effective_result(chg->usb_icl_votable) < USBIN_25MA) + /* skip suspending input if its already suspended by some other voter */ + usb_icl = get_effective_result(chg->usb_icl_votable); + if ((stat & USE_USBIN_BIT) && usb_icl >= 0 && usb_icl < USBIN_25MA) return IRQ_HANDLED; if (stat & USE_DCIN_BIT) @@ -3897,12 +3803,7 @@ static void smblib_hvdcp_detect_work(struct work_struct *work) vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, false, 0); - if (get_effective_result(chg->pd_disallowed_votable_indirect)) - /* pd is still disabled, try hvdcp */ - try_rerun_apsd_for_hvdcp(chg); - else - /* notify pd now that pd is allowed */ - power_supply_changed(chg->usb_psy); + power_supply_changed(chg->usb_psy); } static void bms_update_work(struct work_struct *work) @@ -3943,11 +3844,13 @@ static void clear_hdc_work(struct work_struct *work) static void rdstd_cc2_detach_work(struct work_struct *work) { int rc; - u8 stat; - struct smb_irq_data irq_data = {NULL, "cc2-removal-workaround"}; + u8 stat4, stat5; struct smb_charger *chg = container_of(work, struct smb_charger, rdstd_cc2_detach_work); + if (!chg->cc2_detach_wa_active) + return; + /* * WA steps - * 1. Enable both UFP and DFP, wait for 10ms. @@ -3955,7 +3858,7 @@ static void rdstd_cc2_detach_work(struct work_struct *work) * 3. Removal detected if both TYPEC_DEBOUNCE_DONE_STATUS * and TIMER_STAGE bits are gone, otherwise repeat all by * work rescheduling. - * Note, work will be cancelled when pd_hard_reset is 0. + * Note, work will be cancelled when USB_PLUGIN rises. */ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, @@ -3978,30 +3881,35 @@ static void rdstd_cc2_detach_work(struct work_struct *work) usleep_range(30000, 31000); - rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat); + rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat4); if (rc < 0) { - smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", - rc); + smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc); return; } - if (stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT) - goto rerun; - rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat); + rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5); if (rc < 0) { smblib_err(chg, "Couldn't read TYPE_C_STATUS_5_REG rc=%d\n", rc); return; } - if (stat & TIMER_STAGE_2_BIT) + + if ((stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT) + || (stat5 & TIMER_STAGE_2_BIT)) { + smblib_dbg(chg, PR_MISC, "rerunning DD=%d TS2BIT=%d\n", + (int)(stat4 & TYPEC_DEBOUNCE_DONE_STATUS_BIT), + (int)(stat5 & TIMER_STAGE_2_BIT)); goto rerun; + } - /* Bingo, cc2 removal detected */ + smblib_dbg(chg, PR_MISC, "Bingo CC2 Removal detected\n"); + chg->cc2_detach_wa_active = false; + rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, + EXIT_SNK_BASED_ON_CC_BIT, 0); smblib_reg_block_restore(chg, cc2_detach_settings); - chg->cc2_sink_detach_flag = CC2_SINK_WA_DONE; - irq_data.parent_data = chg; - smblib_handle_usb_typec_change(0, &irq_data); - + mutex_lock(&chg->lock); + smblib_usb_typec_change(chg); + mutex_unlock(&chg->lock); return; rerun: @@ -4214,6 +4122,66 @@ static void smblib_icl_change_work(struct work_struct *work) smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua); } +static void smblib_pl_enable_work(struct work_struct *work) +{ + struct smb_charger *chg = container_of(work, struct smb_charger, + pl_enable_work.work); + + smblib_dbg(chg, PR_PARALLEL, "timer expired, enabling parallel\n"); + vote(chg->pl_disable_votable, PL_DELAY_VOTER, false, 0); + vote(chg->awake_votable, PL_DELAY_VOTER, false, 0); +} + +static void smblib_legacy_detection_work(struct work_struct *work) +{ + struct smb_charger *chg = container_of(work, struct smb_charger, + legacy_detection_work); + int rc; + u8 stat; + bool legacy, rp_high; + + mutex_lock(&chg->lock); + chg->typec_en_dis_active = 1; + smblib_dbg(chg, PR_MISC, "running legacy unknown workaround\n"); + rc = smblib_masked_write(chg, + TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, + TYPEC_DISABLE_CMD_BIT, + TYPEC_DISABLE_CMD_BIT); + if (rc < 0) + smblib_err(chg, "Couldn't disable type-c rc=%d\n", rc); + + /* wait for the adapter to turn off VBUS */ + msleep(500); + + rc = smblib_masked_write(chg, + TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, + TYPEC_DISABLE_CMD_BIT, 0); + if (rc < 0) + smblib_err(chg, "Couldn't enable type-c rc=%d\n", rc); + + /* wait for type-c detection to complete */ + msleep(100); + + rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat); + if (rc < 0) { + smblib_err(chg, "Couldn't read typec stat5 rc = %d\n", rc); + goto unlock; + } + + chg->typec_legacy_valid = true; + vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0); + legacy = stat & TYPEC_LEGACY_CABLE_STATUS_BIT; + rp_high = smblib_get_prop_ufp_mode(chg) == + POWER_SUPPLY_TYPEC_SOURCE_HIGH; + if (!legacy || !rp_high) + vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER, + false, 0); + +unlock: + chg->typec_en_dis_active = 0; + mutex_unlock(&chg->lock); +} + static int smblib_create_votables(struct smb_charger *chg) { int rc = 0; @@ -4230,13 +4198,19 @@ static int smblib_create_votables(struct smb_charger *chg) return rc; } + chg->usb_icl_votable = find_votable("USB_ICL"); + if (!chg->usb_icl_votable) { + rc = -EPROBE_DEFER; + return rc; + } + chg->pl_disable_votable = find_votable("PL_DISABLE"); if (!chg->pl_disable_votable) { rc = -EPROBE_DEFER; return rc; } vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0); - vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0); + vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0); chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY, smblib_dc_suspend_vote_callback, @@ -4246,14 +4220,6 @@ static int smblib_create_votables(struct smb_charger *chg) return rc; } - chg->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN, - smblib_usb_icl_vote_callback, - chg); - if (IS_ERR(chg->usb_icl_votable)) { - rc = PTR_ERR(chg->usb_icl_votable); - return rc; - } - chg->dc_icl_votable = create_votable("DC_ICL", VOTE_MIN, smblib_dc_icl_vote_callback, chg); @@ -4348,6 +4314,15 @@ static int smblib_create_votables(struct smb_charger *chg) return rc; } + chg->typec_irq_disable_votable = create_votable("TYPEC_IRQ_DISABLE", + VOTE_SET_ANY, + smblib_typec_irq_disable_vote_callback, + chg); + if (IS_ERR(chg->typec_irq_disable_votable)) { + rc = PTR_ERR(chg->typec_irq_disable_votable); + return rc; + } + return rc; } @@ -4373,6 +4348,8 @@ static void smblib_destroy_votables(struct smb_charger *chg) destroy_votable(chg->apsd_disable_votable); if (chg->hvdcp_hw_inov_dis_votable) destroy_votable(chg->hvdcp_hw_inov_dis_votable); + if (chg->typec_irq_disable_votable) + destroy_votable(chg->typec_irq_disable_votable); } static void smblib_iio_deinit(struct smb_charger *chg) @@ -4393,6 +4370,7 @@ int smblib_init(struct smb_charger *chg) { int rc = 0; + mutex_init(&chg->lock); mutex_init(&chg->write_lock); mutex_init(&chg->otg_oc_lock); INIT_WORK(&chg->bms_update_work, bms_update_work); @@ -4404,6 +4382,8 @@ int smblib_init(struct smb_charger *chg) INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work); INIT_DELAYED_WORK(&chg->otg_ss_done_work, smblib_otg_ss_done_work); INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work); + INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work); + INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work); chg->fake_capacity = -EINVAL; switch (chg->mode) { diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h index 048e7c2b4091..b0d84f014b0d 100644 --- a/drivers/power/supply/qcom/smb-lib.h +++ b/drivers/power/supply/qcom/smb-lib.h @@ -56,11 +56,12 @@ enum print_reason { #define MICRO_USB_VOTER "MICRO_USB_VOTER" #define DEBUG_BOARD_VOTER "DEBUG_BOARD_VOTER" #define PD_SUSPEND_SUPPORTED_VOTER "PD_SUSPEND_SUPPORTED_VOTER" -#define PL_DELAY_HVDCP_VOTER "PL_DELAY_HVDCP_VOTER" +#define PL_DELAY_VOTER "PL_DELAY_VOTER" #define CTM_VOTER "CTM_VOTER" #define SW_QC3_VOTER "SW_QC3_VOTER" #define AICL_RERUN_VOTER "AICL_RERUN_VOTER" #define LEGACY_UNKNOWN_VOTER "LEGACY_UNKNOWN_VOTER" +#define CC2_WA_VOTER "CC2_WA_VOTER" #define VCONN_MAX_ATTEMPTS 3 #define OTG_MAX_ATTEMPTS 3 @@ -71,13 +72,6 @@ enum smb_mode { NUM_MODES, }; -enum cc2_sink_type { - CC2_SINK_NONE = 0, - CC2_SINK_STD, - CC2_SINK_MEDIUM_HIGH, - CC2_SINK_WA_DONE, -}; - enum { QC_CHARGER_DETECTION_WA_BIT = BIT(0), BOOST_BACK_WA = BIT(1), @@ -236,6 +230,7 @@ struct smb_charger { int smb_version; /* locks */ + struct mutex lock; struct mutex write_lock; struct mutex ps_change_lock; struct mutex otg_oc_lock; @@ -276,6 +271,7 @@ struct smb_charger { struct votable *apsd_disable_votable; struct votable *hvdcp_hw_inov_dis_votable; struct votable *usb_irq_enable_votable; + struct votable *typec_irq_disable_votable; /* work */ struct work_struct bms_update_work; @@ -288,6 +284,8 @@ struct smb_charger { struct work_struct vconn_oc_work; struct delayed_work otg_ss_done_work; struct delayed_work icl_change_work; + struct delayed_work pl_enable_work; + struct work_struct legacy_detection_work; /* cached status */ int voltage_min_uv; @@ -311,18 +309,22 @@ struct smb_charger { int vconn_attempts; int default_icl_ua; int otg_cl_ua; + bool uusb_apsd_rerun_done; + bool pd_hard_reset; + bool typec_present; + u8 typec_status[5]; + bool typec_legacy_valid; /* workaround flag */ u32 wa_flags; - enum cc2_sink_type cc2_sink_detach_flag; + bool cc2_detach_wa_active; + bool typec_en_dis_active; int boost_current_ua; int temp_speed_reading_count; /* extcon for VBUS / ID notification to USB for uUSB */ struct extcon_dev *extcon; - int icl_reduction_ua; - /* qnovo */ int qnovo_fcc_ua; int qnovo_fv_uv; @@ -488,9 +490,10 @@ int smblib_rerun_apsd_if_required(struct smb_charger *chg); int smblib_get_prop_fcc_delta(struct smb_charger *chg, union power_supply_propval *val); int smblib_icl_override(struct smb_charger *chg, bool override); -int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua); int smblib_dp_dm(struct smb_charger *chg, int val); int smblib_rerun_aicl(struct smb_charger *chg); +int smblib_set_icl_current(struct smb_charger *chg, int icl_ua); +int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua); int smblib_init(struct smb_charger *chg); int smblib_deinit(struct smb_charger *chg); diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h index b79060094cf6..3f260a407721 100644 --- a/drivers/power/supply/qcom/smb-reg.h +++ b/drivers/power/supply/qcom/smb-reg.h @@ -919,6 +919,7 @@ enum { #define MISC_CFG_REG (MISC_BASE + 0x52) #define GSM_PA_ON_ADJ_SEL_BIT BIT(0) +#define STAT_PARALLEL_1400MA_EN_CFG_BIT BIT(3) #define TCC_DEBOUNCE_20MS_BIT BIT(5) #define SNARL_BARK_BITE_WD_CFG_REG (MISC_BASE + 0x53) @@ -1024,4 +1025,14 @@ enum { /* CHGR FREQ Peripheral registers */ #define FREQ_CLK_DIV_REG (CHGR_FREQ_BASE + 0x50) +/* SMB1355 specific registers */ +#define SMB1355_TEMP_COMP_STATUS_REG (MISC_BASE + 0x07) +#define SKIN_TEMP_RST_HOT_BIT BIT(6) +#define SKIN_TEMP_UB_HOT_BIT BIT(5) +#define SKIN_TEMP_LB_HOT_BIT BIT(4) +#define DIE_TEMP_TSD_HOT_BIT BIT(3) +#define DIE_TEMP_RST_HOT_BIT BIT(2) +#define DIE_TEMP_UB_HOT_BIT BIT(1) +#define DIE_TEMP_LB_HOT_BIT BIT(0) + #endif /* __SMB2_CHARGER_REG_H */ diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c index 8467d167512f..7014fd706d9e 100644 --- a/drivers/power/supply/qcom/smb1351-charger.c +++ b/drivers/power/supply/qcom/smb1351-charger.c @@ -1655,7 +1655,7 @@ static int smb1351_parallel_get_property(struct power_supply *psy, switch (prop) { case POWER_SUPPLY_PROP_CHARGING_ENABLED: - val->intval = !chip->usb_suspended_status; + val->intval = !chip->parallel_charger_suspended; break; case POWER_SUPPLY_PROP_CURRENT_MAX: if (!chip->parallel_charger_suspended) diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c index 4916c87aced8..4e710cae6b78 100644 --- a/drivers/power/supply/qcom/smb138x-charger.c +++ b/drivers/power/supply/qcom/smb138x-charger.c @@ -104,6 +104,8 @@ struct smb138x { struct smb_dt_props dt; struct power_supply *parallel_psy; u32 wa_flags; + struct pmic_revid_data *pmic_rev_id; + char *name; }; static int __debug_mask; @@ -167,6 +169,14 @@ static int smb138x_parse_dt(struct smb138x *chip) if (rc < 0) chip->dt.pl_mode = POWER_SUPPLY_PL_USBMID_USBMID; + /* check that smb1355 is configured to run in mid-mid mode */ + if (chip->pmic_rev_id->pmic_subtype == SMB1355_SUBTYPE + && chip->dt.pl_mode != POWER_SUPPLY_PL_USBMID_USBMID) { + pr_err("Smb1355 can only run in MID-MID mode, saw = %d mode\n", + chip->dt.pl_mode); + return -EINVAL; + } + chip->dt.suspend_input = of_property_read_bool(node, "qcom,suspend-input"); @@ -479,6 +489,30 @@ static int smb138x_init_batt_psy(struct smb138x *chip) * PARALLEL PSY REGISTRATION * *****************************/ +static int smb1355_get_prop_connector_health(struct smb138x *chip) +{ + struct smb_charger *chg = &chip->chg; + u8 temp; + int rc; + + rc = smblib_read(chg, SMB1355_TEMP_COMP_STATUS_REG, &temp); + if (rc < 0) { + pr_err("Couldn't read comp stat reg rc = %d\n", rc); + return POWER_SUPPLY_HEALTH_UNKNOWN; + } + + if (temp & SKIN_TEMP_RST_HOT_BIT) + return POWER_SUPPLY_HEALTH_OVERHEAT; + + if (temp & SKIN_TEMP_UB_HOT_BIT) + return POWER_SUPPLY_HEALTH_HOT; + + if (temp & SKIN_TEMP_LB_HOT_BIT) + return POWER_SUPPLY_HEALTH_WARM; + + return POWER_SUPPLY_HEALTH_COOL; +} + static int smb138x_get_prop_connector_health(struct smb138x *chip) { struct smb_charger *chg = &chip->chg; @@ -536,16 +570,32 @@ static enum power_supply_property smb138x_parallel_props[] = { POWER_SUPPLY_PROP_PIN_ENABLED, POWER_SUPPLY_PROP_INPUT_SUSPEND, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED, - POWER_SUPPLY_PROP_CURRENT_MAX, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, - POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_PARALLEL_MODE, + POWER_SUPPLY_PROP_CONNECTOR_HEALTH, + POWER_SUPPLY_PROP_SET_SHIP_MODE, POWER_SUPPLY_PROP_CHARGER_TEMP, POWER_SUPPLY_PROP_CHARGER_TEMP_MAX, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CURRENT_MAX, +}; + +static enum power_supply_property smb1355_parallel_props[] = { + POWER_SUPPLY_PROP_CHARGE_TYPE, + POWER_SUPPLY_PROP_CHARGING_ENABLED, + POWER_SUPPLY_PROP_PIN_ENABLED, + POWER_SUPPLY_PROP_INPUT_SUSPEND, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_PARALLEL_MODE, POWER_SUPPLY_PROP_CONNECTOR_HEALTH, POWER_SUPPLY_PROP_SET_SHIP_MODE, + POWER_SUPPLY_PROP_CHARGER_TEMP, + POWER_SUPPLY_PROP_CHARGER_TEMP_MAX, }; static int smb138x_parallel_get_prop(struct power_supply *psy, @@ -577,18 +627,12 @@ static int smb138x_parallel_get_prop(struct power_supply *psy, rc = smblib_get_usb_suspend(chg, &val->intval); break; case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED: - if (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN) + if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN) + || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) rc = smblib_get_prop_input_current_limited(chg, val); else val->intval = 0; break; - case POWER_SUPPLY_PROP_CURRENT_MAX: - if (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN) - rc = smblib_get_charge_param(chg, &chg->param.usb_icl, - &val->intval); - else - val->intval = 0; - break; case POWER_SUPPLY_PROP_VOLTAGE_MAX: rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval); break; @@ -596,28 +640,46 @@ static int smb138x_parallel_get_prop(struct power_supply *psy, rc = smblib_get_charge_param(chg, &chg->param.fcc, &val->intval); break; - case POWER_SUPPLY_PROP_CURRENT_NOW: - rc = smblib_get_prop_slave_current_now(chg, val); - break; - case POWER_SUPPLY_PROP_CHARGER_TEMP: - rc = smb138x_get_prop_charger_temp(chip, val); - break; - case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX: - rc = smblib_get_prop_charger_temp_max(chg, val); - break; case POWER_SUPPLY_PROP_MODEL_NAME: - val->strval = "smb138x"; + val->strval = chip->name; break; case POWER_SUPPLY_PROP_PARALLEL_MODE: val->intval = chip->dt.pl_mode; break; case POWER_SUPPLY_PROP_CONNECTOR_HEALTH: - val->intval = smb138x_get_prop_connector_health(chip); + if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE) + val->intval = smb138x_get_prop_connector_health(chip); + else + val->intval = smb1355_get_prop_connector_health(chip); break; case POWER_SUPPLY_PROP_SET_SHIP_MODE: /* Not in ship mode as long as device is active */ val->intval = 0; break; + case POWER_SUPPLY_PROP_CHARGER_TEMP: + if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE) + rc = smb138x_get_prop_charger_temp(chip, val); + else + rc = smblib_get_prop_charger_temp(chg, val); + break; + case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX: + rc = smblib_get_prop_charger_temp_max(chg, val); + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + if (chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE) + rc = smblib_get_prop_slave_current_now(chg, val); + else + rc = -ENODATA; + break; + case POWER_SUPPLY_PROP_CURRENT_MAX: + if ((chip->pmic_rev_id->pmic_subtype != SMB1355_SUBTYPE) + && ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN) + || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))) + rc = smblib_get_charge_param(chg, &chg->param.usb_icl, + &val->intval); + else + rc = -ENODATA; + break; default: pr_err("parallel power supply get prop %d not supported\n", prop); @@ -669,7 +731,8 @@ static int smb138x_parallel_set_prop(struct power_supply *psy, rc = smb138x_set_parallel_suspend(chip, (bool)val->intval); break; case POWER_SUPPLY_PROP_CURRENT_MAX: - if (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN) + if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN) + || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) rc = smblib_set_charge_param(chg, &chg->param.usb_icl, val->intval); break; @@ -700,7 +763,7 @@ static int smb138x_parallel_prop_is_writeable(struct power_supply *psy, return 0; } -static const struct power_supply_desc parallel_psy_desc = { +static struct power_supply_desc parallel_psy_desc = { .name = "parallel", .type = POWER_SUPPLY_TYPE_PARALLEL, .properties = smb138x_parallel_props, @@ -728,6 +791,28 @@ static int smb138x_init_parallel_psy(struct smb138x *chip) return 0; } +static int smb1355_init_parallel_psy(struct smb138x *chip) +{ + struct power_supply_config parallel_cfg = {}; + struct smb_charger *chg = &chip->chg; + + parallel_cfg.drv_data = chip; + parallel_cfg.of_node = chg->dev->of_node; + + /* change to smb1355's property list */ + parallel_psy_desc.properties = smb1355_parallel_props; + parallel_psy_desc.num_properties = ARRAY_SIZE(smb1355_parallel_props); + chip->parallel_psy = devm_power_supply_register(chg->dev, + ¶llel_psy_desc, + ¶llel_cfg); + if (IS_ERR(chip->parallel_psy)) { + pr_err("Couldn't register parallel power supply\n"); + return PTR_ERR(chip->parallel_psy); + } + + return 0; +} + /****************************** * VBUS REGULATOR REGISTRATION * ******************************/ @@ -1047,7 +1132,6 @@ static int smb138x_init_hw(struct smb138x *chip) static int smb138x_setup_wa_flags(struct smb138x *chip) { - struct pmic_revid_data *pmic_rev_id; struct device_node *revid_dev_node; revid_dev_node = of_parse_phandle(chip->chg.dev->of_node, @@ -1057,8 +1141,8 @@ static int smb138x_setup_wa_flags(struct smb138x *chip) return -EINVAL; } - pmic_rev_id = get_revid_data(revid_dev_node); - if (IS_ERR_OR_NULL(pmic_rev_id)) { + chip->pmic_rev_id = get_revid_data(revid_dev_node); + if (IS_ERR_OR_NULL(chip->pmic_rev_id)) { /* * the revid peripheral must be registered, any failure * here only indicates that the rev-id module has not @@ -1067,14 +1151,14 @@ static int smb138x_setup_wa_flags(struct smb138x *chip) return -EPROBE_DEFER; } - switch (pmic_rev_id->pmic_subtype) { + switch (chip->pmic_rev_id->pmic_subtype) { case SMB1381_SUBTYPE: - if (pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */ + if (chip->pmic_rev_id->rev4 < 2) /* SMB1381 rev 1.0 */ chip->wa_flags |= OOB_COMP_WA_BIT; break; default: pr_err("PMIC subtype %d not supported\n", - pmic_rev_id->pmic_subtype); + chip->pmic_rev_id->pmic_subtype); return -EINVAL; } @@ -1372,6 +1456,7 @@ static int smb138x_master_probe(struct smb138x *chip) chg->param = v1_params; + chip->name = "smb1381"; rc = smblib_init(chg); if (rc < 0) { pr_err("Couldn't initialize smblib rc=%d\n", rc); @@ -1432,7 +1517,7 @@ static int smb138x_master_probe(struct smb138x *chip) return rc; } -static int smb138x_slave_probe(struct smb138x *chip) +static int smb1355_slave_probe(struct smb138x *chip) { struct smb_charger *chg = &chip->chg; int rc = 0; @@ -1445,6 +1530,55 @@ static int smb138x_slave_probe(struct smb138x *chip) goto cleanup; } + rc = smb138x_parse_dt(chip); + if (rc < 0) { + pr_err("Couldn't parse device tree rc=%d\n", rc); + goto cleanup; + } + + rc = smb138x_init_slave_hw(chip); + if (rc < 0) { + pr_err("Couldn't initialize hardware rc=%d\n", rc); + goto cleanup; + } + + rc = smb1355_init_parallel_psy(chip); + if (rc < 0) { + pr_err("Couldn't initialize parallel psy rc=%d\n", rc); + goto cleanup; + } + + rc = smb138x_determine_initial_slave_status(chip); + if (rc < 0) { + pr_err("Couldn't determine initial status rc=%d\n", rc); + goto cleanup; + } + + rc = smb138x_request_interrupts(chip); + if (rc < 0) { + pr_err("Couldn't request interrupts rc=%d\n", rc); + goto cleanup; + } + + return 0; + +cleanup: + smblib_deinit(chg); + return rc; +} + +static int smb1381_slave_probe(struct smb138x *chip) +{ + struct smb_charger *chg = &chip->chg; + int rc = 0; + + chg->param = v1_params; + + rc = smblib_init(chg); + if (rc < 0) { + pr_err("Couldn't initialize smblib rc=%d\n", rc); + goto cleanup; + } chg->iio.temp_max_chan = iio_channel_get(chg->dev, "charger_temp_max"); if (IS_ERR(chg->iio.temp_max_chan)) { rc = PTR_ERR(chg->iio.temp_max_chan); @@ -1484,7 +1618,8 @@ static int smb138x_slave_probe(struct smb138x *chip) goto cleanup; } - if (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN) { + if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN) + || (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) { rc = smb138x_init_vbus_regulator(chip); if (rc < 0) { pr_err("Couldn't initialize vbus regulator rc=%d\n", @@ -1511,25 +1646,71 @@ static int smb138x_slave_probe(struct smb138x *chip) goto cleanup; } - return rc; + return 0; cleanup: smblib_deinit(chg); - if (chip->parallel_psy) - power_supply_unregister(chip->parallel_psy); - if (chg->vbus_vreg && chg->vbus_vreg->rdev) - regulator_unregister(chg->vbus_vreg->rdev); return rc; } +static int slave_probe(struct smb138x *chip) +{ + struct device_node *revid_dev_node; + int rc = 0; + + revid_dev_node = of_parse_phandle(chip->chg.dev->of_node, + "qcom,pmic-revid", 0); + if (!revid_dev_node) { + pr_err("Missing qcom,pmic-revid property\n"); + return -EINVAL; + } + + chip->pmic_rev_id = get_revid_data(revid_dev_node); + if (IS_ERR_OR_NULL(chip->pmic_rev_id)) { + /* + * the revid peripheral must be registered, any failure + * here only indicates that the rev-id module has not + * probed yet. + */ + return -EPROBE_DEFER; + } + + switch (chip->pmic_rev_id->pmic_subtype) { + case SMB1355_SUBTYPE: + chip->name = "smb1355"; + rc = smb1355_slave_probe(chip); + break; + case SMB1381_SUBTYPE: + chip->name = "smb1381"; + rc = smb1381_slave_probe(chip); + break; + default: + pr_err("Unsupported pmic subtype = 0x%02x\n", + chip->pmic_rev_id->pmic_subtype); + rc = -EINVAL; + } + + if (rc < 0) { + if (rc != -EPROBE_DEFER) + pr_err("Couldn't probe SMB138X rc=%d\n", rc); + return rc; + } + + return 0; +} + static const struct of_device_id match_table[] = { { - .compatible = "qcom,smb138x-charger", - .data = (void *) PARALLEL_MASTER + .compatible = "qcom,smb138x-charger", + .data = (void *) PARALLEL_MASTER, + }, + { + .compatible = "qcom,smb138x-parallel-slave", + .data = (void *) PARALLEL_SLAVE, }, { - .compatible = "qcom,smb138x-parallel-slave", - .data = (void *) PARALLEL_SLAVE + .compatible = "qcom,smb1355-parallel-slave", + .data = (void *) PARALLEL_SLAVE, }, { }, }; @@ -1576,7 +1757,7 @@ static int smb138x_probe(struct platform_device *pdev) rc = smb138x_master_probe(chip); break; case PARALLEL_SLAVE: - rc = smb138x_slave_probe(chip); + rc = slave_probe(chip); break; default: pr_err("Couldn't find a matching mode %d\n", chip->chg.mode); @@ -1590,7 +1771,8 @@ static int smb138x_probe(struct platform_device *pdev) goto cleanup; } - pr_info("SMB138X probed successfully mode=%d\n", chip->chg.mode); + pr_info("%s probed successfully mode=%d pl_mode = %d\n", + chip->name, chip->chg.mode, chip->dt.pl_mode); return rc; cleanup: diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c index 333ad7d5b45b..da72b0b59c3a 100644 --- a/drivers/pps/clients/pps-gpio.c +++ b/drivers/pps/clients/pps-gpio.c @@ -57,7 +57,7 @@ static irqreturn_t pps_gpio_irq_handler(int irq, void *data) int rising_edge; /* Get the time stamp first */ - pps_get_ts(&ts); + get_monotonic_boottime(&ts.ts_real); info = data; diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c index dbe2a08f1776..858ddcc228df 100644 --- a/drivers/regulator/qpnp-labibb-regulator.c +++ b/drivers/regulator/qpnp-labibb-regulator.c @@ -559,6 +559,7 @@ struct lab_regulator { int step_size; int slew_rate; int soft_start; + int sc_wait_time_ms; int vreg_enabled; }; @@ -608,6 +609,8 @@ struct qpnp_labibb { bool skip_2nd_swire_cmd; bool pfm_enable; bool notify_lab_vreg_ok_sts; + bool detect_lab_sc; + bool sc_detected; u32 swire_2nd_cmd_delay; u32 swire_ibb_ps_enable_delay; }; @@ -2138,8 +2141,10 @@ static void qpnp_lab_vreg_notifier_work(struct work_struct *work) u8 val; struct qpnp_labibb *labibb = container_of(work, struct qpnp_labibb, lab_vreg_ok_work); + if (labibb->lab_vreg.sc_wait_time_ms != -EINVAL) + retries = labibb->lab_vreg.sc_wait_time_ms / 5; - while (retries--) { + while (retries) { rc = qpnp_labibb_read(labibb, labibb->lab_base + REG_LAB_STATUS1, &val, 1); if (rc < 0) { @@ -2155,10 +2160,30 @@ static void qpnp_lab_vreg_notifier_work(struct work_struct *work) } usleep_range(dly, dly + 100); + retries--; } - if (!retries) - pr_err("LAB_VREG_OK not set, failed to notify\n"); + if (!retries) { + if (labibb->detect_lab_sc) { + pr_crit("short circuit detected on LAB rail.. disabling the LAB/IBB/OLEDB modules\n"); + /* Disable LAB module */ + val = 0; + rc = qpnp_labibb_write(labibb, labibb->lab_base + + REG_LAB_MODULE_RDY, &val, 1); + if (rc < 0) { + pr_err("write register %x failed rc = %d\n", + REG_LAB_MODULE_RDY, rc); + return; + } + raw_notifier_call_chain(&labibb_notifier, + LAB_VREG_NOT_OK, NULL); + labibb->sc_detected = true; + labibb->lab_vreg.vreg_enabled = 0; + labibb->ibb_vreg.vreg_enabled = 0; + } else { + pr_err("LAB_VREG_OK not set, failed to notify\n"); + } + } } static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb) @@ -2323,6 +2348,11 @@ static int qpnp_lab_regulator_enable(struct regulator_dev *rdev) struct qpnp_labibb *labibb = rdev_get_drvdata(rdev); + if (labibb->sc_detected) { + pr_info("Short circuit detected: disabled LAB/IBB rails\n"); + return 0; + } + if (labibb->skip_2nd_swire_cmd) { rc = qpnp_ibb_ps_config(labibb, false); if (rc < 0) { @@ -2363,7 +2393,7 @@ static int qpnp_lab_regulator_enable(struct regulator_dev *rdev) labibb->lab_vreg.vreg_enabled = 1; } - if (labibb->notify_lab_vreg_ok_sts) + if (labibb->notify_lab_vreg_ok_sts || labibb->detect_lab_sc) schedule_work(&labibb->lab_vreg_ok_work); return 0; @@ -2621,6 +2651,12 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb, labibb->notify_lab_vreg_ok_sts = of_property_read_bool(of_node, "qcom,notify-lab-vreg-ok-sts"); + labibb->lab_vreg.sc_wait_time_ms = -EINVAL; + if (labibb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE && + labibb->detect_lab_sc) + of_property_read_u32(of_node, "qcom,qpnp-lab-sc-wait-time-ms", + &labibb->lab_vreg.sc_wait_time_ms); + rc = of_property_read_u32(of_node, "qcom,qpnp-lab-soft-start", &(labibb->lab_vreg.soft_start)); if (!rc) { @@ -3255,6 +3291,11 @@ static int qpnp_ibb_regulator_enable(struct regulator_dev *rdev) u8 val; struct qpnp_labibb *labibb = rdev_get_drvdata(rdev); + if (labibb->sc_detected) { + pr_info("Short circuit detected: disabled LAB/IBB rails\n"); + return 0; + } + if (!labibb->ibb_vreg.vreg_enabled && !labibb->swire_control) { if (!labibb->standalone) @@ -3731,6 +3772,8 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev) if (labibb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) { labibb->mode = QPNP_LABIBB_AMOLED_MODE; + /* Enable polling for LAB short circuit detection for PM660A */ + labibb->detect_lab_sc = true; } else { rc = of_property_read_string(labibb->dev->of_node, "qcom,qpnp-labibb-mode", &mode_name); diff --git a/drivers/regulator/qpnp-oledb-regulator.c b/drivers/regulator/qpnp-oledb-regulator.c index c012f373e80e..bee9a3d82eeb 100644 --- a/drivers/regulator/qpnp-oledb-regulator.c +++ b/drivers/regulator/qpnp-oledb-regulator.c @@ -27,6 +27,7 @@ #include <linux/regulator/of_regulator.h> #include <linux/regulator/qpnp-labibb-regulator.h> #include <linux/qpnp/qpnp-pbs.h> +#include <linux/qpnp/qpnp-revid.h> #define QPNP_OLEDB_REGULATOR_DRIVER_NAME "qcom,qpnp-oledb-regulator" #define OLEDB_VOUT_STEP_MV 100 @@ -162,6 +163,7 @@ struct qpnp_oledb { struct notifier_block oledb_nb; struct mutex bus_lock; struct device_node *pbs_dev_node; + struct pmic_revid_data *pmic_rev_id; u32 base; u8 mod_enable; @@ -181,6 +183,8 @@ struct qpnp_oledb { bool dynamic_ext_pinctl_config; bool pbs_control; bool force_pd_control; + bool handle_lab_sc_notification; + bool lab_sc_detected; }; static const u16 oledb_warmup_dly_ns[] = {6700, 13300, 26700, 53400}; @@ -275,6 +279,11 @@ static int qpnp_oledb_regulator_enable(struct regulator_dev *rdev) struct qpnp_oledb *oledb = rdev_get_drvdata(rdev); + if (oledb->lab_sc_detected == true) { + pr_info("Short circuit detected: Disabled OLEDB rail\n"); + return 0; + } + if (oledb->ext_pin_control) { rc = qpnp_oledb_read(oledb, oledb->base + OLEDB_EXT_PIN_CTL, &val, 1); @@ -368,12 +377,19 @@ static int qpnp_oledb_regulator_disable(struct regulator_dev *rdev) } if (val & OLEDB_FORCE_PD_CTL_SPARE_BIT) { - rc = qpnp_pbs_trigger_event(oledb->pbs_dev_node, - trigger_bitmap); + rc = qpnp_oledb_sec_masked_write(oledb, oledb->base + + OLEDB_SPARE_CTL, + OLEDB_FORCE_PD_CTL_SPARE_BIT, 0); if (rc < 0) { - pr_err("Failed to trigger the PBS sequence\n"); + pr_err("Failed to write SPARE_CTL rc=%d\n", rc); return rc; } + + rc = qpnp_pbs_trigger_event(oledb->pbs_dev_node, + trigger_bitmap); + if (rc < 0) + pr_err("Failed to trigger the PBS sequence\n"); + pr_debug("PBS event triggered\n"); } else { pr_debug("OLEDB_SPARE_CTL register bit not set\n"); @@ -1085,8 +1101,22 @@ static int qpnp_oledb_parse_fast_precharge(struct qpnp_oledb *oledb) static int qpnp_oledb_parse_dt(struct qpnp_oledb *oledb) { int rc = 0; + struct device_node *revid_dev_node; struct device_node *of_node = oledb->dev->of_node; + revid_dev_node = of_parse_phandle(oledb->dev->of_node, + "qcom,pmic-revid", 0); + if (!revid_dev_node) { + pr_err("Missing qcom,pmic-revid property - driver failed\n"); + return -EINVAL; + } + + oledb->pmic_rev_id = get_revid_data(revid_dev_node); + if (IS_ERR(oledb->pmic_rev_id)) { + pr_debug("Unable to get revid data\n"); + return -EPROBE_DEFER; + } + oledb->swire_control = of_property_read_bool(of_node, "qcom,swire-control"); @@ -1100,8 +1130,14 @@ static int qpnp_oledb_parse_dt(struct qpnp_oledb *oledb) oledb->pbs_control = of_property_read_bool(of_node, "qcom,pbs-control"); - oledb->force_pd_control = - of_property_read_bool(of_node, "qcom,force-pd-control"); + /* Use the force_pd_control only for PM660A versions <= v2.0 */ + if (oledb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE && + oledb->pmic_rev_id->rev4 <= PM660L_V2P0_REV4) { + if (!(oledb->pmic_rev_id->rev4 == PM660L_V2P0_REV4 && + oledb->pmic_rev_id->rev2 > PM660L_V2P0_REV2)) { + oledb->force_pd_control = true; + } + } if (oledb->force_pd_control) { oledb->pbs_dev_node = of_parse_phandle(of_node, @@ -1199,13 +1235,6 @@ static int qpnp_oledb_force_pulldown_config(struct qpnp_oledb *oledb) int rc = 0; u8 val; - rc = qpnp_oledb_sec_masked_write(oledb, oledb->base + - OLEDB_SPARE_CTL, OLEDB_FORCE_PD_CTL_SPARE_BIT, 0); - if (rc < 0) { - pr_err("Failed to write SPARE_CTL rc=%d\n", rc); - return rc; - } - val = 1; rc = qpnp_oledb_write(oledb, oledb->base + OLEDB_PD_CTL, &val, 1); @@ -1227,14 +1256,31 @@ static int qpnp_labibb_notifier_cb(struct notifier_block *nb, unsigned long action, void *data) { int rc = 0; + u8 val; struct qpnp_oledb *oledb = container_of(nb, struct qpnp_oledb, oledb_nb); + if (action == LAB_VREG_NOT_OK) { + /* short circuit detected. Disable OLEDB module */ + val = 0; + rc = qpnp_oledb_write(oledb, oledb->base + OLEDB_MODULE_RDY, + &val, 1); + if (rc < 0) { + pr_err("Failed to write MODULE_RDY rc=%d\n", rc); + return NOTIFY_STOP; + } + oledb->lab_sc_detected = true; + oledb->mod_enable = false; + pr_crit("LAB SC detected, disabling OLEDB forever!\n"); + } + if (action == LAB_VREG_OK) { /* Disable SWIRE pull down control and enable via spmi mode */ rc = qpnp_oledb_force_pulldown_config(oledb); - if (rc < 0) + if (rc < 0) { + pr_err("Failed to config force pull down\n"); return NOTIFY_STOP; + } } return NOTIFY_OK; @@ -1281,7 +1327,11 @@ static int qpnp_oledb_regulator_probe(struct platform_device *pdev) return rc; } - if (oledb->force_pd_control) { + /* Enable LAB short circuit notification support */ + if (oledb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) + oledb->handle_lab_sc_notification = true; + + if (oledb->force_pd_control || oledb->handle_lab_sc_notification) { oledb->oledb_nb.notifier_call = qpnp_labibb_notifier_cb; rc = qpnp_labibb_notifier_register(&oledb->oledb_nb); if (rc < 0) { diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index d5c00951cf93..5d81bcc1dc75 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2806,10 +2806,10 @@ static int sd_revalidate_disk(struct gendisk *disk) if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max && sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && - logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_CACHE_SIZE) { - q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); - rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); - } else + sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) + rw_max = q->limits.io_opt = + sdkp->opt_xfer_blocks * sdp->sector_size; + else rw_max = BLK_DEF_MAX_SECTORS; /* Combine with controller limits */ diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 765a6f1ac1b7..654630bb7d0e 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -151,11 +151,6 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo return blocks << (ilog2(sdev->sector_size) - 9); } -static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks) -{ - return blocks * sdev->sector_size; -} - /* * A DIF-capable target device can be formatted with different * protection schemes. Currently 0 through 3 are defined: diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 3e858015813f..c5393d517432 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -4289,15 +4289,25 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) * mode hence full reinit is required to move link to HS speeds. */ if (ret || hba->full_init_linereset) { + int err; + hba->full_init_linereset = false; ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER); dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d", __func__, ret); /* - * If link recovery fails then return error so that caller - * don't retry the hibern8 enter again. + * If link recovery fails then return error code (-ENOLINK) + * returned ufshcd_link_recovery(). + * If link recovery succeeds then return -EAGAIN to attempt + * hibern8 enter retry again. */ - ret = ufshcd_link_recovery(hba); + err = ufshcd_link_recovery(hba); + if (err) { + dev_err(hba->dev, "%s: link recovery failed", __func__); + ret = err; + } else { + ret = -EAGAIN; + } } else { dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__, ktime_to_us(ktime_get())); @@ -4314,8 +4324,8 @@ int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) ret = __ufshcd_uic_hibern8_enter(hba); if (!ret) goto out; - /* Unable to recover the link, so no point proceeding */ - if (ret == -ENOLINK) + else if (ret != -EAGAIN) + /* Unable to recover the link, so no point proceeding */ BUG(); } out: @@ -9228,10 +9238,11 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up) /* scale up to G3 now */ new_pwr_info.gear_tx = UFS_HS_G3; new_pwr_info.gear_rx = UFS_HS_G3; - ret = ufshcd_change_power_mode(hba, &new_pwr_info); - if (ret) - goto out; + /* now, fall through to set the HS-G3 */ } + ret = ufshcd_change_power_mode(hba, &new_pwr_info); + if (ret) + goto out; } else { memcpy(&new_pwr_info, &hba->pwr_info, sizeof(struct ufs_pa_layer_attr)); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 75bebf66376d..34b0adb108eb 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -441,6 +441,23 @@ config QCOM_MEMORY_DUMP_V2 of deadlocks or cpu hangs these dump regions are captured to give a snapshot of the system at the time of the crash. +config QCOM_MINIDUMP + bool "QCOM Minidump Support" + depends on MSM_SMEM && QCOM_DLOAD_MODE + help + This enables minidump feature. It allows various clients to + register to dump their state at system bad state (panic/WDT,etc.,). + This uses SMEM to store all registered client information. + This will dump all registered entries, only when DLOAD mode is enabled. + +config MINIDUMP_MAX_ENTRIES + int "Minidump Maximum num of entries" + default 200 + depends on QCOM_MINIDUMP + help + This defines maximum number of entries to be allocated for application + subsytem in Minidump SMEM table. + config ICNSS tristate "Platform driver for Q6 integrated connectivity" ---help--- diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index fa350d122384..87698b75d3b8 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -66,6 +66,7 @@ obj-$(CONFIG_QCOM_SCM_XPU) += scm-xpu.o obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o obj-$(CONFIG_QCOM_MEMORY_DUMP) += memory_dump.o obj-$(CONFIG_QCOM_MEMORY_DUMP_V2) += memory_dump_v2.o +obj-$(CONFIG_QCOM_MINIDUMP) += msm_minidump.o obj-$(CONFIG_QCOM_DCC) += dcc.o obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o obj-$(CONFIG_QCOM_COMMON_LOG) += common_log.o diff --git a/drivers/soc/qcom/common_log.c b/drivers/soc/qcom/common_log.c index ecf89b2b3b37..f001e820b797 100644 --- a/drivers/soc/qcom/common_log.c +++ b/drivers/soc/qcom/common_log.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -18,6 +18,8 @@ #include <linux/slab.h> #include <linux/kmemleak.h> #include <soc/qcom/memory_dump.h> +#include <soc/qcom/minidump.h> +#include <asm/sections.h> #define MISC_DUMP_DATA_LEN 4096 #define PMIC_DUMP_DATA_LEN (64 * 1024) @@ -38,6 +40,8 @@ void register_misc_dump(void) misc_buf = kzalloc(MISC_DUMP_DATA_LEN, GFP_KERNEL); if (!misc_buf) goto err0; + + strlcpy(misc_data->name, "KMISC", sizeof(misc_data->name)); misc_data->addr = virt_to_phys(misc_buf); misc_data->len = MISC_DUMP_DATA_LEN; dump_entry.id = MSM_DUMP_DATA_MISC; @@ -70,6 +74,7 @@ static void register_pmic_dump(void) if (!dump_addr) goto err0; + strlcpy(dump_data->name, "KPMIC", sizeof(dump_data->name)); dump_data->addr = virt_to_phys(dump_addr); dump_data->len = PMIC_DUMP_DATA_LEN; dump_entry.id = MSM_DUMP_DATA_PMIC; @@ -104,6 +109,8 @@ static void register_vsense_dump(void) if (!dump_addr) goto err0; + strlcpy(dump_data->name, "KVSENSE", + sizeof(dump_data->name)); dump_data->addr = virt_to_phys(dump_addr); dump_data->len = VSENSE_DUMP_DATA_LEN; dump_entry.id = MSM_DUMP_DATA_VSENSE; @@ -136,6 +143,7 @@ void register_rpm_dump(void) if (!dump_addr) goto err0; + strlcpy(dump_data->name, "KRPM", sizeof(dump_data->name)); dump_data->addr = virt_to_phys(dump_addr); dump_data->len = RPM_DUMP_DATA_LEN; dump_entry.id = MSM_DUMP_DATA_RPM; @@ -217,8 +225,39 @@ static void __init common_log_register_log_buf(void) } } +static void __init register_kernel_sections(void) +{ + struct md_region ksec_entry; + char *data_name = "KDATABSS"; + const size_t static_size = __per_cpu_end - __per_cpu_start; + void __percpu *base = (void __percpu *)__per_cpu_start; + unsigned int cpu; + + strlcpy(ksec_entry.name, data_name, sizeof(ksec_entry.name)); + ksec_entry.virt_addr = (uintptr_t)_sdata; + ksec_entry.phys_addr = virt_to_phys(_sdata); + ksec_entry.size = roundup((__bss_stop - _sdata), 4); + if (msm_minidump_add_region(&ksec_entry)) + pr_err("Failed to add data section in Minidump\n"); + + /* Add percpu static sections */ + for_each_possible_cpu(cpu) { + void *start = per_cpu_ptr(base, cpu); + + memset(&ksec_entry, 0, sizeof(ksec_entry)); + scnprintf(ksec_entry.name, sizeof(ksec_entry.name), + "KSPERCPU%d", cpu); + ksec_entry.virt_addr = (uintptr_t)start; + ksec_entry.phys_addr = per_cpu_ptr_to_phys(start); + ksec_entry.size = static_size; + if (msm_minidump_add_region(&ksec_entry)) + pr_err("Failed to add percpu sections in Minidump\n"); + } +} + static int __init msm_common_log_init(void) { + register_kernel_sections(); common_log_register_log_buf(); register_misc_dump(); register_pmic_dump(); diff --git a/drivers/soc/qcom/cpuss_dump.c b/drivers/soc/qcom/cpuss_dump.c index 93c876a7ad73..2e08b78dee94 100644 --- a/drivers/soc/qcom/cpuss_dump.c +++ b/drivers/soc/qcom/cpuss_dump.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -75,6 +75,8 @@ static int cpuss_dump_probe(struct platform_device *pdev) dump_data->addr = dump_addr; dump_data->len = size; + scnprintf(dump_data->name, sizeof(dump_data->name), + "KCPUSS%X", id); dump_entry.id = id; dump_entry.addr = virt_to_phys(dump_data); ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry); diff --git a/drivers/soc/qcom/dcc.c b/drivers/soc/qcom/dcc.c index aced50bf7fda..e5f3f119065b 100644 --- a/drivers/soc/qcom/dcc.c +++ b/drivers/soc/qcom/dcc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1173,6 +1173,8 @@ static void dcc_allocate_dump_mem(struct dcc_drvdata *drvdata) /* Allocate memory for dcc reg dump */ drvdata->reg_buf = devm_kzalloc(dev, drvdata->reg_size, GFP_KERNEL); if (drvdata->reg_buf) { + strlcpy(drvdata->reg_data.name, "KDCC_REG", + sizeof(drvdata->reg_data.name)); drvdata->reg_data.addr = virt_to_phys(drvdata->reg_buf); drvdata->reg_data.len = drvdata->reg_size; reg_dump_entry.id = MSM_DUMP_DATA_DCC_REG; @@ -1190,6 +1192,8 @@ static void dcc_allocate_dump_mem(struct dcc_drvdata *drvdata) /* Allocate memory for dcc sram dump */ drvdata->sram_buf = devm_kzalloc(dev, drvdata->ram_size, GFP_KERNEL); if (drvdata->sram_buf) { + strlcpy(drvdata->sram_data.name, "KDCC_SRAM", + sizeof(drvdata->sram_data.name)); drvdata->sram_data.addr = virt_to_phys(drvdata->sram_buf); drvdata->sram_data.len = drvdata->ram_size; sram_dump_entry.id = MSM_DUMP_DATA_DCC_SRAM; diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c index cc809cbdd839..72f5829d1eb6 100644 --- a/drivers/soc/qcom/glink.c +++ b/drivers/soc/qcom/glink.c @@ -1845,7 +1845,7 @@ static void glink_ch_ctx_release(struct rwref_lock *ch_st_lock) /** * ch_name_to_ch_ctx_create() - lookup a channel by name, create the channel if - * it is not found. + * it is not found and get reference of context. * @xprt_ctx: Transport to search for a matching channel. * @name: Name of the desired channel. * @@ -1901,6 +1901,7 @@ check_ctx: spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags); kfree(ctx); + rwref_get(&entry->ch_state_lhb2); rwref_write_put(&xprt_ctx->xprt_state_lhb0); return entry; } @@ -1935,6 +1936,7 @@ check_ctx: "%s: local:GLINK_CHANNEL_CLOSED\n", __func__); } + rwref_get(&ctx->ch_state_lhb2); spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags); rwref_write_put(&xprt_ctx->xprt_state_lhb0); mutex_lock(&xprt_ctx->xprt_dbgfs_lock_lhb4); @@ -2579,6 +2581,7 @@ void *glink_open(const struct glink_open_config *cfg) GLINK_INFO_CH_XPRT(ctx, transport_ptr, "%s: Channel not ready to be re-opened. State: %u\n", __func__, ctx->local_open_state); + rwref_put(&ctx->ch_state_lhb2); return ERR_PTR(-EBUSY); } @@ -2627,11 +2630,13 @@ void *glink_open(const struct glink_open_config *cfg) ctx->local_open_state = GLINK_CHANNEL_CLOSED; GLINK_ERR_CH(ctx, "%s: Unable to send open command %d\n", __func__, ret); + rwref_put(&ctx->ch_state_lhb2); return ERR_PTR(ret); } GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n", __func__, ctx); + rwref_put(&ctx->ch_state_lhb2); return ctx; } EXPORT_SYMBOL(glink_open); @@ -4804,6 +4809,7 @@ static void glink_core_rx_cmd_ch_remote_open(struct glink_transport_if *if_ptr, GLINK_ERR_CH(ctx, "%s: Duplicate remote open for rcid %u, name '%s'\n", __func__, rcid, name); + rwref_put(&ctx->ch_state_lhb2); glink_core_migration_edge_unlock(if_ptr->glink_core_priv); return; } @@ -4826,6 +4832,7 @@ static void glink_core_rx_cmd_ch_remote_open(struct glink_transport_if *if_ptr, if (do_migrate) ch_migrate(NULL, ctx); + rwref_put(&ctx->ch_state_lhb2); glink_core_migration_edge_unlock(if_ptr->glink_core_priv); } diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c index 2dc4208cbc51..85d51807077c 100644 --- a/drivers/soc/qcom/glink_smem_native_xprt.c +++ b/drivers/soc/qcom/glink_smem_native_xprt.c @@ -798,6 +798,12 @@ static bool get_rx_fifo(struct edge_info *einfo) einfo->remote_proc_id, SMEM_ITEM_CACHED_FLAG); if (!einfo->rx_fifo) + einfo->rx_fifo = smem_get_entry( + SMEM_GLINK_NATIVE_XPRT_FIFO_1, + &einfo->rx_fifo_size, + einfo->remote_proc_id, + 0); + if (!einfo->rx_fifo) return false; } diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 0b35caa86d51..ab46eb70651c 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -2110,7 +2110,6 @@ static int icnss_driver_event_register_driver(void *data) power_off: icnss_hw_power_off(penv); - penv->ops = NULL; out: return ret; } @@ -2646,7 +2645,7 @@ int icnss_register_driver(struct icnss_driver_ops *ops) } ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_REGISTER_DRIVER, - ICNSS_EVENT_SYNC, ops); + 0, ops); if (ret == -EINTR) ret = 0; diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c index af141c808c81..092b1c1af44b 100644 --- a/drivers/soc/qcom/memory_dump_v2.c +++ b/drivers/soc/qcom/memory_dump_v2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -20,6 +20,7 @@ #include <linux/kmemleak.h> #include <soc/qcom/memory_dump.h> #include <soc/qcom/scm.h> +#include <soc/qcom/minidump.h> #define MSM_DUMP_TABLE_VERSION MSM_DUMP_MAKE_VERSION(2, 0) @@ -87,6 +88,29 @@ static struct msm_dump_table *msm_dump_get_table(enum msm_dump_table_ids id) return table; } +int msm_dump_data_add_minidump(struct msm_dump_entry *entry) +{ + struct msm_dump_data *data; + struct md_region md_entry; + + data = (struct msm_dump_data *)(phys_to_virt(entry->addr)); + if (!strcmp(data->name, "")) { + pr_info("Entry name is NULL, Use ID %d for minidump\n", + entry->id); + snprintf(md_entry.name, sizeof(md_entry.name), "KMDT0x%X", + entry->id); + } else { + strlcpy(md_entry.name, data->name, sizeof(md_entry.name)); + } + + md_entry.phys_addr = data->addr; + md_entry.virt_addr = (uintptr_t)phys_to_virt(data->addr); + md_entry.size = data->len; + md_entry.id = entry->id; + + return msm_minidump_add_region(&md_entry); +} + int msm_dump_data_register(enum msm_dump_table_ids id, struct msm_dump_entry *entry) { @@ -107,6 +131,10 @@ int msm_dump_data_register(enum msm_dump_table_ids id, table->num_entries++; dmac_flush_range(table, (void *)table + sizeof(struct msm_dump_table)); + + if (msm_dump_data_add_minidump(entry)) + pr_info("Failed to add entry in Minidump table\n"); + return 0; } EXPORT_SYMBOL(msm_dump_data_register); diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c index 78f6a2aa8f66..a92e5c416678 100644 --- a/drivers/soc/qcom/msm_glink_pkt.c +++ b/drivers/soc/qcom/msm_glink_pkt.c @@ -625,14 +625,17 @@ ssize_t glink_pkt_read(struct file *file, return -ENETRESET; } + mutex_lock(&devp->ch_lock); if (!glink_rx_intent_exists(devp->handle, count)) { ret = glink_queue_rx_intent(devp->handle, devp, count); if (ret) { GLINK_PKT_ERR("%s: failed to queue_rx_intent ret[%d]\n", __func__, ret); + mutex_unlock(&devp->ch_lock); return ret; } } + mutex_unlock(&devp->ch_lock); GLINK_PKT_INFO("Begin %s on glink_pkt_dev id:%d buffer_size %zu\n", __func__, devp->i, count); diff --git a/drivers/soc/qcom/msm_minidump.c b/drivers/soc/qcom/msm_minidump.c new file mode 100644 index 000000000000..1cb36bf98555 --- /dev/null +++ b/drivers/soc/qcom/msm_minidump.c @@ -0,0 +1,371 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#define pr_fmt(fmt) "Minidump: " fmt + +#include <linux/init.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/elf.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <soc/qcom/smem.h> +#include <soc/qcom/scm.h> +#include <soc/qcom/minidump.h> + + +#define MAX_NUM_ENTRIES (CONFIG_MINIDUMP_MAX_ENTRIES + 1) +#define SMEM_ENTRY_SIZE 32 +#define MAX_MEM_LENGTH (SMEM_ENTRY_SIZE * MAX_NUM_ENTRIES) +#define MAX_STRTBL_SIZE (MAX_NUM_ENTRIES * MAX_NAME_LENGTH) +#define SMEM_MINIDUMP_TABLE_ID 602 + +/* Bootloader Minidump table */ +struct md_smem_table { + u32 version; + u32 smem_length; + u64 next_avail_offset; + char reserved[MAX_NAME_LENGTH]; + u64 *region_start; +}; + +/* Bootloader Minidump region */ +struct md_smem_region { + char name[MAX_NAME_LENGTH]; + u64 address; + u64 size; +}; + +/* md_table: APPS minidump table + * @num_regions: Number of entries registered + * @region_base_offset: APPS region start offset smem table + * @md_smem_table: Pointer smem table + * @region: Pointer to APPS region in smem table + * @entry: All registered client entries. + */ + +struct md_table { + u32 num_regions; + u32 region_base_offset; + struct md_smem_table *md_smem_table; + struct md_smem_region *region; + struct md_region entry[MAX_NUM_ENTRIES]; +}; + +/* Protect elfheader and smem table from deferred calls contention */ +static DEFINE_SPINLOCK(mdt_lock); +static bool minidump_enabled; +static struct md_table minidump_table; +static unsigned int pendings; +static unsigned int region_idx = 1; /* First entry is ELF header*/ + +/* ELF Header */ +static struct elfhdr *md_ehdr; +/* ELF Program header */ +static struct elf_phdr *phdr; +/* ELF Section header */ +static struct elf_shdr *shdr; +/* Section offset in elf image */ +static u64 elf_offset; +/* String table index, first byte must be '\0' */ +static unsigned int stringtable_idx = 1; + +static inline struct elf_shdr *elf_sheader(struct elfhdr *hdr) +{ + return (struct elf_shdr *)((size_t)hdr + (size_t)hdr->e_shoff); +} + +static inline struct elf_shdr *elf_section(struct elfhdr *hdr, int idx) +{ + return &elf_sheader(hdr)[idx]; +} + +static inline char *elf_str_table(struct elfhdr *hdr) +{ + if (hdr->e_shstrndx == SHN_UNDEF) + return NULL; + return (char *)hdr + elf_section(hdr, hdr->e_shstrndx)->sh_offset; +} + +static inline char *elf_lookup_string(struct elfhdr *hdr, int offset) +{ + char *strtab = elf_str_table(hdr); + + if ((strtab == NULL) | (stringtable_idx < offset)) + return NULL; + return strtab + offset; +} + +static inline unsigned int set_section_name(const char *name) +{ + char *strtab = elf_str_table(md_ehdr); + int ret = 0; + + if ((strtab == NULL) | (name == NULL)) + return 0; + + ret = stringtable_idx; + stringtable_idx += strlcpy((strtab + stringtable_idx), + name, MAX_NAME_LENGTH); + stringtable_idx += 1; + return ret; +} + +/* return 1 if name already exists */ +static inline bool md_check_name(const char *name) +{ + struct md_region *mde = minidump_table.entry; + int i, regno = minidump_table.num_regions; + + for (i = 0; i < regno; i++, mde++) + if (!strcmp(mde->name, name)) + return true; + return false; +} + +/* Update Mini dump table in SMEM */ +static int md_update_smem_table(const struct md_region *entry) +{ + struct md_smem_region *mdr; + + if (!minidump_enabled) { + pr_info("Table in smem is not setup\n"); + return -ENODEV; + } + + mdr = &minidump_table.region[region_idx++]; + + strlcpy(mdr->name, entry->name, sizeof(mdr->name)); + mdr->address = entry->phys_addr; + mdr->size = entry->size; + + /* Update elf header */ + shdr->sh_type = SHT_PROGBITS; + shdr->sh_name = set_section_name(mdr->name); + shdr->sh_addr = (elf_addr_t)entry->virt_addr; + shdr->sh_size = mdr->size; + shdr->sh_flags = SHF_WRITE; + shdr->sh_offset = elf_offset; + shdr->sh_entsize = 0; + + phdr->p_type = PT_LOAD; + phdr->p_offset = elf_offset; + phdr->p_vaddr = entry->virt_addr; + phdr->p_paddr = entry->phys_addr; + phdr->p_filesz = phdr->p_memsz = mdr->size; + phdr->p_flags = PF_R | PF_W; + + md_ehdr->e_shnum += 1; + md_ehdr->e_phnum += 1; + elf_offset += shdr->sh_size; + shdr++; + phdr++; + + return 0; +} + +bool msm_minidump_enabled(void) +{ + bool ret; + + spin_lock(&mdt_lock); + ret = minidump_enabled; + spin_unlock(&mdt_lock); + return ret; +} +EXPORT_SYMBOL(msm_minidump_enabled); + +int msm_minidump_add_region(const struct md_region *entry) +{ + u32 entries; + struct md_region *mdr; + int ret = 0; + + if (!entry) + return -EINVAL; + + if (((strlen(entry->name) > MAX_NAME_LENGTH) || + md_check_name(entry->name)) && !entry->virt_addr) { + pr_info("Invalid entry details\n"); + return -EINVAL; + } + + if (!IS_ALIGNED(entry->size, 4)) { + pr_info("size should be 4 byte aligned\n"); + return -EINVAL; + } + + spin_lock(&mdt_lock); + entries = minidump_table.num_regions; + if (entries >= MAX_NUM_ENTRIES) { + pr_info("Maximum entries reached.\n"); + spin_unlock(&mdt_lock); + return -ENOMEM; + } + + mdr = &minidump_table.entry[entries]; + strlcpy(mdr->name, entry->name, sizeof(mdr->name)); + mdr->virt_addr = entry->virt_addr; + mdr->phys_addr = entry->phys_addr; + mdr->size = entry->size; + mdr->id = entry->id; + + minidump_table.num_regions = entries + 1; + + if (minidump_enabled) + ret = md_update_smem_table(entry); + else + pendings++; + + spin_unlock(&mdt_lock); + + pr_debug("Minidump: added %s to %s list\n", + mdr->name, minidump_enabled ? "":"pending"); + return ret; +} +EXPORT_SYMBOL(msm_minidump_add_region); + +static int msm_minidump_add_header(void) +{ + struct md_smem_region *mdreg = &minidump_table.region[0]; + char *banner; + unsigned int strtbl_off, elfh_size, phdr_off; + + elfh_size = sizeof(*md_ehdr) + MAX_STRTBL_SIZE + MAX_MEM_LENGTH + + ((sizeof(*shdr) + sizeof(*phdr)) * (MAX_NUM_ENTRIES + 1)); + + md_ehdr = kzalloc(elfh_size, GFP_KERNEL); + if (!md_ehdr) + return -ENOMEM; + + strlcpy(mdreg->name, "KELF_HEADER", sizeof(mdreg->name)); + mdreg->address = virt_to_phys(md_ehdr); + mdreg->size = elfh_size; + + /* Section headers*/ + shdr = (struct elf_shdr *)(md_ehdr + 1); + phdr = (struct elf_phdr *)(shdr + MAX_NUM_ENTRIES); + phdr_off = sizeof(*md_ehdr) + (sizeof(*shdr) * MAX_NUM_ENTRIES); + + memcpy(md_ehdr->e_ident, ELFMAG, SELFMAG); + md_ehdr->e_ident[EI_CLASS] = ELF_CLASS; + md_ehdr->e_ident[EI_DATA] = ELF_DATA; + md_ehdr->e_ident[EI_VERSION] = EV_CURRENT; + md_ehdr->e_ident[EI_OSABI] = ELF_OSABI; + md_ehdr->e_type = ET_CORE; + md_ehdr->e_machine = ELF_ARCH; + md_ehdr->e_version = EV_CURRENT; + md_ehdr->e_ehsize = sizeof(*md_ehdr); + md_ehdr->e_phoff = phdr_off; + md_ehdr->e_phentsize = sizeof(*phdr); + md_ehdr->e_phnum = 1; + md_ehdr->e_shoff = sizeof(*md_ehdr); + md_ehdr->e_shentsize = sizeof(*shdr); + md_ehdr->e_shnum = 3; /* NULL, STR TABLE, Linux banner */ + md_ehdr->e_shstrndx = 1; + + elf_offset = elfh_size; + strtbl_off = sizeof(*md_ehdr) + + ((sizeof(*phdr) + sizeof(*shdr)) * MAX_NUM_ENTRIES); + /* First section header should be NULL + * 2nd entry for string table + */ + shdr++; + shdr->sh_type = SHT_STRTAB; + shdr->sh_offset = (elf_addr_t)strtbl_off; + shdr->sh_size = MAX_STRTBL_SIZE; + shdr->sh_entsize = 0; + shdr->sh_flags = 0; + shdr->sh_name = set_section_name("STR_TBL"); + shdr++; + + /* 3rd entry for linux banner */ + banner = (char *)md_ehdr + strtbl_off + MAX_STRTBL_SIZE; + strlcpy(banner, linux_banner, MAX_MEM_LENGTH); + + shdr->sh_type = SHT_PROGBITS; + shdr->sh_offset = (elf_addr_t)(strtbl_off + MAX_STRTBL_SIZE); + shdr->sh_size = strlen(linux_banner) + 1; + shdr->sh_addr = (elf_addr_t)linux_banner; + shdr->sh_entsize = 0; + shdr->sh_flags = SHF_WRITE; + shdr->sh_name = set_section_name("linux_banner"); + shdr++; + + phdr->p_type = PT_LOAD; + phdr->p_offset = (elf_addr_t)(strtbl_off + MAX_STRTBL_SIZE); + phdr->p_vaddr = (elf_addr_t)linux_banner; + phdr->p_paddr = virt_to_phys(linux_banner); + phdr->p_filesz = phdr->p_memsz = strlen(linux_banner) + 1; + phdr->p_flags = PF_R | PF_W; + + md_ehdr->e_phnum += 1; + phdr++; + + return 0; +} + +static int __init msm_minidump_init(void) +{ + unsigned int i, size; + struct md_region *mdr; + struct md_smem_table *smem_table; + + /* Get Minidump table */ + smem_table = smem_get_entry(SMEM_MINIDUMP_TABLE_ID, &size, 0, + SMEM_ANY_HOST_FLAG); + if (IS_ERR_OR_NULL(smem_table)) { + pr_info("SMEM is not initialized.\n"); + return -ENODEV; + } + + if ((smem_table->next_avail_offset + MAX_MEM_LENGTH) > + smem_table->smem_length) { + pr_info("SMEM memory not available.\n"); + return -ENOMEM; + } + + /* Get next_avail_offset and update it to reserve memory */ + minidump_table.region_base_offset = smem_table->next_avail_offset; + minidump_table.region = (struct md_smem_region *)((uintptr_t)smem_table + + minidump_table.region_base_offset); + + smem_table->next_avail_offset = + minidump_table.region_base_offset + MAX_MEM_LENGTH; + minidump_table.md_smem_table = smem_table; + + msm_minidump_add_header(); + + /* Add pending entries to smem table */ + spin_lock(&mdt_lock); + minidump_enabled = true; + + for (i = 0; i < pendings; i++) { + mdr = &minidump_table.entry[i]; + if (md_update_smem_table(mdr)) { + pr_info("Unable to add entry %s to smem table\n", + mdr->name); + spin_unlock(&mdt_lock); + return -ENODEV; + } + } + + pendings = 0; + spin_unlock(&mdt_lock); + + pr_info("Enabled, region base:%d, region 0x%pK\n", + minidump_table.region_base_offset, minidump_table.region); + + return 0; +} +subsys_initcall(msm_minidump_init) diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c index 3a6d84140bc9..8a2be787b70e 100644 --- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c +++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c @@ -114,7 +114,7 @@ int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data, { int rc = 0, retries = 0; void *pkt_data = NULL; - struct apr_tx_buf *tx_buf; + struct apr_tx_buf *tx_buf = NULL; struct apr_pkt_priv *pkt_priv_ptr = pkt_priv; if (!apr_ch->handle || !pkt_priv) diff --git a/drivers/soc/qcom/qdsp6v2/audio_notifier.c b/drivers/soc/qcom/qdsp6v2/audio_notifier.c index a59b436234c7..b46cd2067441 100644 --- a/drivers/soc/qcom/qdsp6v2/audio_notifier.c +++ b/drivers/soc/qcom/qdsp6v2/audio_notifier.c @@ -510,7 +510,7 @@ int audio_notifier_deregister(char *client_name) int ret = 0; int ret2; struct list_head *ptr, *next; - struct client_data *client_data; + struct client_data *client_data = NULL; if (client_name == NULL) { pr_err("%s: client_name is NULL\n", __func__); diff --git a/drivers/soc/qcom/qpnp-haptic.c b/drivers/soc/qcom/qpnp-haptic.c index c86eebcd390f..70cf11359e97 100644 --- a/drivers/soc/qcom/qpnp-haptic.c +++ b/drivers/soc/qcom/qpnp-haptic.c @@ -138,7 +138,7 @@ #define QPNP_HAP_WAV_SAMP_MAX 0x7E #define QPNP_HAP_BRAKE_PAT_LEN 4 #define QPNP_HAP_PLAY_EN 0x80 -#define QPNP_HAP_EN 0x80 +#define QPNP_HAP_EN_BIT BIT(7) #define QPNP_HAP_BRAKE_MASK BIT(0) #define QPNP_HAP_AUTO_RES_MASK BIT(7) #define AUTO_RES_ENABLE BIT(7) @@ -305,7 +305,6 @@ struct qpnp_pwm_info { * @ wave_samp - array of wave samples * @ shadow_wave_samp - shadow array of wave samples * @ brake_pat - pattern for active breaking - * @ reg_en_ctl - enable control register * @ reg_play - play register * @ lra_res_cal_period - period for resonance calibration * @ sc_duration - counter to determine the duration of short circuit condition @@ -358,6 +357,7 @@ struct qpnp_hap { u32 sc_irq; u32 status_flags; u16 base; + u16 last_rate_cfg; u16 drive_period_code_max_limit; u16 drive_period_code_min_limit; u16 lra_res_cal_period; @@ -368,7 +368,6 @@ struct qpnp_hap { u8 wave_samp[QPNP_HAP_WAV_SAMP_LEN]; u8 shadow_wave_samp[QPNP_HAP_WAV_SAMP_LEN]; u8 brake_pat[QPNP_HAP_BRAKE_PAT_LEN]; - u8 reg_en_ctl; u8 reg_play; u8 sc_duration; u8 ext_pwm_dtest_line; @@ -391,6 +390,18 @@ struct qpnp_hap { static struct qpnp_hap *ghap; /* helper to read a pmic register */ +static int qpnp_hap_read_mult_reg(struct qpnp_hap *hap, u16 addr, u8 *val, + int len) +{ + int rc; + + rc = regmap_bulk_read(hap->regmap, addr, val, len); + if (rc < 0) + pr_err("Error reading address: %X - ret %X\n", addr, rc); + + return rc; +} + static int qpnp_hap_read_reg(struct qpnp_hap *hap, u16 addr, u8 *val) { int rc; @@ -399,11 +410,28 @@ static int qpnp_hap_read_reg(struct qpnp_hap *hap, u16 addr, u8 *val) rc = regmap_read(hap->regmap, addr, &tmp); if (rc < 0) pr_err("Error reading address: %X - ret %X\n", addr, rc); - *val = (u8)tmp; + else + *val = (u8)tmp; + return rc; } /* helper to write a pmic register */ +static int qpnp_hap_write_mult_reg(struct qpnp_hap *hap, u16 addr, u8 *val, + int len) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&hap->bus_lock, flags); + rc = regmap_bulk_write(hap->regmap, addr, val, len); + if (rc < 0) + pr_err("Error writing address: %X - ret %X\n", addr, rc); + + spin_unlock_irqrestore(&hap->bus_lock, flags); + return rc; +} + static int qpnp_hap_write_reg(struct qpnp_hap *hap, u16 addr, u8 val) { unsigned long flags; @@ -480,15 +508,12 @@ static void qpnp_handle_sc_irq(struct work_struct *work) } } -static int qpnp_hap_mod_enable(struct qpnp_hap *hap, int on) +static int qpnp_hap_mod_enable(struct qpnp_hap *hap, bool on) { u8 val; int rc, i; - val = hap->reg_en_ctl; - if (on) { - val |= QPNP_HAP_EN; - } else { + if (!on) { for (i = 0; i < QPNP_HAP_MAX_RETRIES; i++) { /* wait for 4 cycles of play rate */ unsigned long sleep_time = @@ -511,16 +536,13 @@ static int qpnp_hap_mod_enable(struct qpnp_hap *hap, int on) if (i >= QPNP_HAP_MAX_RETRIES) pr_debug("Haptics Busy. Force disable\n"); - - val &= ~QPNP_HAP_EN; } + val = on ? QPNP_HAP_EN_BIT : 0; rc = qpnp_hap_write_reg(hap, QPNP_HAP_EN_CTL_REG(hap->base), val); if (rc < 0) return rc; - hap->reg_en_ctl = val; - return 0; } @@ -1517,20 +1539,23 @@ static int qpnp_hap_auto_res_enable(struct qpnp_hap *hap, int enable) static void update_lra_frequency(struct qpnp_hap *hap) { - u8 lra_auto_res_lo = 0, lra_auto_res_hi = 0, val; + u8 lra_auto_res[2], val; u32 play_rate_code; + u16 rate_cfg; int rc; - qpnp_hap_read_reg(hap, QPNP_HAP_LRA_AUTO_RES_LO(hap->base), - &lra_auto_res_lo); - qpnp_hap_read_reg(hap, QPNP_HAP_LRA_AUTO_RES_HI(hap->base), - &lra_auto_res_hi); + rc = qpnp_hap_read_mult_reg(hap, QPNP_HAP_LRA_AUTO_RES_LO(hap->base), + lra_auto_res, 2); + if (rc < 0) { + pr_err("Error in reading LRA_AUTO_RES_LO/HI, rc=%d\n", rc); + return; + } play_rate_code = - (lra_auto_res_hi & 0xF0) << 4 | (lra_auto_res_lo & 0xFF); + (lra_auto_res[1] & 0xF0) << 4 | (lra_auto_res[0] & 0xFF); pr_debug("lra_auto_res_lo = 0x%x lra_auto_res_hi = 0x%x play_rate_code = 0x%x\n", - lra_auto_res_lo, lra_auto_res_hi, play_rate_code); + lra_auto_res[0], lra_auto_res[1], play_rate_code); rc = qpnp_hap_read_reg(hap, QPNP_HAP_STATUS(hap->base), &val); if (rc < 0) @@ -1559,12 +1584,21 @@ static void update_lra_frequency(struct qpnp_hap *hap) return; } - qpnp_hap_write_reg(hap, QPNP_HAP_RATE_CFG1_REG(hap->base), - lra_auto_res_lo); + lra_auto_res[1] >>= 4; + rate_cfg = lra_auto_res[1] << 8 | lra_auto_res[0]; + if (hap->last_rate_cfg == rate_cfg) { + pr_debug("Same rate_cfg, skip updating\n"); + return; + } - lra_auto_res_hi = lra_auto_res_hi >> 4; - qpnp_hap_write_reg(hap, QPNP_HAP_RATE_CFG2_REG(hap->base), - lra_auto_res_hi); + rc = qpnp_hap_write_mult_reg(hap, QPNP_HAP_RATE_CFG1_REG(hap->base), + lra_auto_res, 2); + if (rc < 0) { + pr_err("Error in writing to RATE_CFG1/2, rc=%d\n", rc); + } else { + pr_debug("Update RATE_CFG with [0x%x]\n", rate_cfg); + hap->last_rate_cfg = rate_cfg; + } } static enum hrtimer_restart detect_auto_res_error(struct hrtimer *timer) @@ -1983,6 +2017,8 @@ static int qpnp_hap_config(struct qpnp_hap *hap) if (rc) return rc; + hap->last_rate_cfg = hap->init_drive_period_code; + if (hap->act_type == QPNP_HAP_LRA && hap->perform_lra_auto_resonance_search) calculate_lra_code(hap); @@ -2019,12 +2055,6 @@ static int qpnp_hap_config(struct qpnp_hap *hap) return rc; } - /* Cache enable control register */ - rc = qpnp_hap_read_reg(hap, QPNP_HAP_EN_CTL_REG(hap->base), &val); - if (rc < 0) - return rc; - hap->reg_en_ctl = val; - /* Cache play register */ rc = qpnp_hap_read_reg(hap, QPNP_HAP_PLAY_REG(hap->base), &val); if (rc < 0) diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c index f2784dedbc7a..e792c953f31f 100644 --- a/drivers/soc/qcom/rpm-smd.c +++ b/drivers/soc/qcom/rpm-smd.c @@ -108,9 +108,7 @@ static struct glink_apps_rpm_data *glink_data; #define RPM_DATA_LEN_SIZE 16 #define RPM_HDR_SIZE ((rpm_msg_fmt_ver == RPM_MSG_V0_FMT) ?\ sizeof(struct rpm_v0_hdr) : sizeof(struct rpm_v1_hdr)) -#define GET_FIELD(offset, size) (((1U << (offset + size)) - 1) - \ - ((1U << offset) - 1)) -#define CLEAR_FIELD(offset, size) (~GET_FIELD(offset, size)) +#define CLEAR_FIELD(offset, size) (~GENMASK(offset + size - 1, offset)) static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier); static bool standalone; @@ -223,7 +221,7 @@ static uint32_t msm_rpm_get_next_msg_id(void); static inline uint32_t get_offset_value(uint32_t val, uint32_t offset, uint32_t size) { - return (((val) & GET_FIELD(offset, size)) + return (((val) & GENMASK(offset + size - 1, offset)) >> offset); } diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c index 0625f75de373..97cd11201262 100644 --- a/drivers/soc/qcom/service-locator.c +++ b/drivers/soc/qcom/service-locator.c @@ -24,7 +24,6 @@ #include <linux/device.h> #include <linux/delay.h> #include <linux/workqueue.h> -#include <linux/debugfs.h> #include <soc/qcom/msm_qmi_interface.h> #include <soc/qcom/service-locator.h> @@ -440,140 +439,3 @@ int find_subsys(const char *pd_path, char *subsys) return 0; } EXPORT_SYMBOL(find_subsys); - -static struct pd_qmi_client_data test_data; - -static int servloc_test_pdr_cb(struct notifier_block *this, - unsigned long opcode, void *ptr) -{ - int i, rc = 0; - char subsys[QMI_SERVREG_LOC_NAME_LENGTH_V01]; - struct pd_qmi_client_data *return_data; - - return_data = (struct pd_qmi_client_data *)ptr; - - if (opcode) { - pr_err("%s: Failed to get process domain!, opcode = %lu\n", - __func__, opcode); - return -EIO; - } - - pr_err("Service Name: %s\tTotal Domains: %d\n", - return_data->service_name, return_data->total_domains); - - for (i = 0; i < return_data->total_domains; i++) { - pr_err("Instance ID: %d\t ", - return_data->domain_list[i].instance_id); - pr_err("Domain Name: %s\n", - return_data->domain_list[i].name); - rc = find_subsys(return_data->domain_list[i].name, - subsys); - if (rc < 0) - pr_err("No valid subsys found for %s!\n", - return_data->domain_list[i].name); - else - pr_err("Subsys: %s\n", subsys); - } - return 0; -} - -static struct notifier_block pdr_service_nb = { - .notifier_call = servloc_test_pdr_cb, -}; - -static ssize_t servloc_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - int rc = 0; - char *node_name = filp->private_data; - - if (!strcmp(node_name, "test_servloc_get")) - rc = get_service_location(test_data.client_name, - test_data.service_name, &pdr_service_nb); - - return rc; -} - -static ssize_t servloc_write(struct file *fp, const char __user *buf, - size_t count, loff_t *unused) -{ - char *node_name = fp->private_data; - - if (!buf) - return -EIO; - if (!strcmp(node_name, "service_name")) { - snprintf(test_data.service_name, sizeof(test_data.service_name), - "%.*s", (int) min((size_t)count - 1, - (sizeof(test_data.service_name) - 1)), buf); - } else { - snprintf(test_data.client_name, sizeof(test_data.client_name), - "%.*s", (int) min((size_t)count - 1, - (sizeof(test_data.client_name) - 1)), buf); - } - return count; -} - -static const struct file_operations servloc_fops = { - .open = simple_open, - .read = servloc_read, - .write = servloc_write, -}; - -static struct dentry *servloc_base_dir; -static struct dentry *test_servloc_file; - -static int __init servloc_debugfs_init(void) -{ - servloc_base_dir = debugfs_create_dir("test_servloc", NULL); - return !servloc_base_dir ? -ENOMEM : 0; -} - -static void servloc_debugfs_exit(void) -{ - debugfs_remove_recursive(servloc_base_dir); -} - -static int servloc_debugfs_add(void) -{ - int rc; - - if (!servloc_base_dir) - return -ENOMEM; - - test_servloc_file = debugfs_create_file("client_name", - S_IRUGO | S_IWUSR, servloc_base_dir, - "client_name", &servloc_fops); - rc = !test_servloc_file ? -ENOMEM : 0; - - if (rc == 0) { - test_servloc_file = debugfs_create_file("service_name", - S_IRUGO | S_IWUSR, servloc_base_dir, - "service_name", &servloc_fops); - rc = !test_servloc_file ? -ENOMEM : 0; - } - - if (rc == 0) { - test_servloc_file = debugfs_create_file("test_servloc_get", - S_IRUGO | S_IWUSR, servloc_base_dir, - "test_servloc_get", &servloc_fops); - rc = !test_servloc_file ? -ENOMEM : 0; - } - return rc; -} - -static int __init service_locator_init(void) -{ - pr_debug("service_locator_status = %d\n", locator_status); - if (servloc_debugfs_init()) - pr_err("Could not create test_servloc base directory!"); - if (servloc_debugfs_add()) - pr_err("Could not create test_servloc node entries!"); - return 0; -} - -static void __exit service_locator_exit(void) -{ - servloc_debugfs_exit(); -} -module_init(service_locator_init); -module_exit(service_locator_exit); diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c index fa916ac5ade4..2b6e05a1720f 100644 --- a/drivers/soc/qcom/service-notifier.c +++ b/drivers/soc/qcom/service-notifier.c @@ -21,7 +21,6 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/err.h> -#include <linux/debugfs.h> #include <linux/uaccess.h> #include <soc/qcom/subsystem_restart.h> @@ -188,7 +187,7 @@ static void root_service_clnt_notify(struct qmi_handle *handle, switch (event) { case QMI_RECV_MSG: - schedule_work(&data->svc_rcv_msg); + queue_work(data->svc_event_wq, &data->svc_rcv_msg); break; default: break; @@ -752,179 +751,3 @@ int service_notif_unregister_notifier(void *service_notif_handle, &service_notif->service_notif_rcvr_list, nb); } EXPORT_SYMBOL(service_notif_unregister_notifier); - -struct service_notifier_test_data { - char service_path[MAX_STRING_LEN]; - int instance_id; - struct notifier_block nb; - void *service_notif_handle; -}; - -static struct service_notifier_test_data test_data; - -static void print_service_provider_state(int notification, char *type) -{ - if (notification == SERVREG_NOTIF_SERVICE_STATE_DOWN_V01) - pr_info("%s: Service %s down!\n", type, test_data.service_path); - else if (notification == SERVREG_NOTIF_SERVICE_STATE_UP_V01) - pr_info("%s: Service %s up!\n", type, test_data.service_path); - else if (notification == SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01) - pr_info("%s: Service %s state uninit!\n", type, - test_data.service_path); - else - pr_info("%s: Service %s state Unknown 0x%x!\n", type, - test_data.service_path, notification); -} - -static int nb_callback(struct notifier_block *nb, - unsigned long notification, - void *data) -{ - print_service_provider_state((int)notification, "Notification:"); - return 0; -} - -static ssize_t show_service_path(struct seq_file *f, void *unused) -{ - if (test_data.service_notif_handle) - seq_printf(f, "Service Path: %s\n", test_data.service_path); - else - seq_puts(f, "No existing notifier\n"); - return 0; -} - - -static ssize_t set_service_notifier_register(struct file *fp, - const char __user *buf, - size_t count, loff_t *ppos) -{ - int curr_state = INT_MAX, rc; - - if (!buf) - return -EIO; - if (test_data.service_notif_handle) { - service_notif_unregister_notifier( - test_data.service_notif_handle, - &test_data.nb); - test_data.service_notif_handle = NULL; - pr_info("Unregistering existing notifier for %s\n", - test_data.service_path); - } - rc = simple_write_to_buffer(test_data.service_path, MAX_STRING_LEN, - ppos, buf, count - 1); - if (rc != count - 1) { - pr_err("Unable to read data into kernel buffer\n"); - goto err; - } - test_data.nb.notifier_call = nb_callback; - test_data.service_notif_handle = service_notif_register_notifier( - test_data.service_path, - test_data.instance_id, &test_data.nb, - &curr_state); - if (!IS_ERR(test_data.service_notif_handle)) { - pr_info("Notifier Registered for service %s\n", - test_data.service_path); - print_service_provider_state(curr_state, "Initial State"); - return count; - } -err: - test_data.service_notif_handle = NULL; - pr_err("Unable to register notifier for %s\n", test_data.service_path); - return -EIO; -} - -static int open_service_notifier_register(struct inode *inode, struct file *f) -{ - return single_open(f, (void *) show_service_path, - inode->i_private); -} - -static const struct file_operations service_notifier_register_fops = { - .open = open_service_notifier_register, - .read = seq_read, - .write = set_service_notifier_register, - .llseek = seq_lseek, - .release = seq_release, -}; - -static ssize_t show_service_notifier_id(struct seq_file *f, void *unused) -{ - seq_printf(f, "Service instance ID: %d\n", test_data.instance_id); - return 0; -} - -static ssize_t set_service_notifier_id(struct file *fp, - const char __user *buf, - size_t count, loff_t *unused) -{ - int val, rc; - char kbuf[MAX_STRING_LEN]; - - if (count > MAX_STRING_LEN) { - rc = -EIO; - goto err; - } - rc = copy_from_user(kbuf, buf, count); - if (rc != 0) { - rc = -EFAULT; - goto err; - } - - kbuf[count - 1] = '\0'; - rc = kstrtoint(kbuf, 0, &val); - if (rc < 0) - goto err; - - test_data.instance_id = val; - return count; -err: - pr_err("Invalid input parameters: rc = %d\n", rc); - return rc; -} - -static int open_service_notifier_id(struct inode *inode, struct file *f) -{ - return single_open(f, (void *) show_service_notifier_id, - inode->i_private); -} - -static const struct file_operations service_notifier_id_fops = { - .open = open_service_notifier_id, - .read = seq_read, - .write = set_service_notifier_id, - .llseek = seq_lseek, - .release = seq_release, -}; - -static struct dentry *service_notifier_dir; -static struct dentry *service_path_file; -static struct dentry *service_id_file; - -static int __init service_notifier_init(void) -{ - service_notifier_dir = debugfs_create_dir("service_notifier", NULL); - if (service_notifier_dir) { - service_path_file = debugfs_create_file("service_path", - S_IRUGO | S_IWUSR, service_notifier_dir, NULL, - &service_notifier_register_fops); - if (!service_path_file) - goto err; - service_id_file = debugfs_create_file("service_id", - S_IRUGO | S_IWUSR, service_notifier_dir, NULL, - &service_notifier_id_fops); - if (!service_id_file) - goto err; - } - return 0; -err: - debugfs_remove_recursive(service_notifier_dir); - return 0; -} - -static void __exit service_notifier_exit(void) -{ - debugfs_remove_recursive(service_notifier_dir); - test_data.nb.notifier_call = nb_callback; -} -module_init(service_notifier_init); -module_exit(service_notifier_exit); diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index dd3e545eb7da..c1d8748a5d08 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -46,6 +46,7 @@ #define SMEM_IMAGE_VERSION_OEM_OFFSET 95 #define SMEM_IMAGE_VERSION_PARTITION_APPS 10 +static DECLARE_RWSEM(current_image_rwsem); enum { HW_PLATFORM_UNKNOWN = 0, HW_PLATFORM_SURF = 1, @@ -530,7 +531,7 @@ static struct msm_soc_info cpu_of_id[] = { /* Cobalt IDs */ [292] = {MSM_CPU_8998, "MSM8998"}, - [319] = {MSM_CPU_8998, "APQ8998"}, + [319] = {MSM_CPU_8998, "APQ8098"}, /* Hamster ID */ [306] = {MSM_CPU_HAMSTER, "MSMHAMSTER"}, @@ -899,7 +900,9 @@ msm_get_image_version(struct device *dev, pr_err("Failed to get image version base address"); return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "Unknown"); } + down_read(¤t_image_rwsem); string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE; + up_read(¤t_image_rwsem); return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s\n", string_address); } @@ -912,14 +915,19 @@ msm_set_image_version(struct device *dev, { char *store_address; - if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) + down_read(¤t_image_rwsem); + if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) { + up_read(¤t_image_rwsem); return count; + } store_address = socinfo_get_image_version_base_address(); if (IS_ERR_OR_NULL(store_address)) { pr_err("Failed to get image version base address"); + up_read(¤t_image_rwsem); return count; } store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE; + up_read(¤t_image_rwsem); snprintf(store_address, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s", buf); return count; } @@ -937,7 +945,9 @@ msm_get_image_variant(struct device *dev, return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE, "Unknown"); } + down_read(¤t_image_rwsem); string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE; + up_read(¤t_image_rwsem); string_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET; return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s\n", string_address); @@ -951,14 +961,19 @@ msm_set_image_variant(struct device *dev, { char *store_address; - if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) + down_read(¤t_image_rwsem); + if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) { + up_read(¤t_image_rwsem); return count; + } store_address = socinfo_get_image_version_base_address(); if (IS_ERR_OR_NULL(store_address)) { pr_err("Failed to get image version base address"); + up_read(¤t_image_rwsem); return count; } store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE; + up_read(¤t_image_rwsem); store_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET; snprintf(store_address, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s", buf); return count; @@ -976,7 +991,9 @@ msm_get_image_crm_version(struct device *dev, pr_err("Failed to get image version base address"); return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "Unknown"); } + down_read(¤t_image_rwsem); string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE; + up_read(¤t_image_rwsem); string_address += SMEM_IMAGE_VERSION_OEM_OFFSET; return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.33s\n", string_address); @@ -990,14 +1007,19 @@ msm_set_image_crm_version(struct device *dev, { char *store_address; - if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) + down_read(¤t_image_rwsem); + if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS) { + up_read(¤t_image_rwsem); return count; + } store_address = socinfo_get_image_version_base_address(); if (IS_ERR_OR_NULL(store_address)) { pr_err("Failed to get image version base address"); + up_read(¤t_image_rwsem); return count; } store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE; + up_read(¤t_image_rwsem); store_address += SMEM_IMAGE_VERSION_OEM_OFFSET; snprintf(store_address, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.33s", buf); return count; @@ -1008,8 +1030,14 @@ msm_get_image_number(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", + int ret; + + down_read(¤t_image_rwsem); + ret = snprintf(buf, PAGE_SIZE, "%d\n", current_image); + up_read(¤t_image_rwsem); + return ret; + } static ssize_t @@ -1021,10 +1049,12 @@ msm_select_image(struct device *dev, struct device_attribute *attr, ret = kstrtoint(buf, 10, &digit); if (ret) return ret; + down_write(¤t_image_rwsem); if (0 <= digit && digit < SMEM_IMAGE_VERSION_BLOCKS_COUNT) current_image = digit; else current_image = 0; + up_write(¤t_image_rwsem); return count; } @@ -1235,9 +1265,9 @@ static void * __init setup_dummy_socinfo(void) dummy_socinfo.id = 327; strlcpy(dummy_socinfo.build_id, "sda630 - ", sizeof(dummy_socinfo.build_id)); - } else if (early_machine_is_apq8998()) { + } else if (early_machine_is_apq8098()) { dummy_socinfo.id = 319; - strlcpy(dummy_socinfo.build_id, "apq8998 - ", + strlcpy(dummy_socinfo.build_id, "apq8098 - ", sizeof(dummy_socinfo.build_id)); } diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c index 0c44d76bc7c7..f7b9c3f85a30 100644 --- a/drivers/soc/qcom/spcom.c +++ b/drivers/soc/qcom/spcom.c @@ -898,12 +898,12 @@ static int spcom_rx(struct spcom_channel *ch, goto exit_err; } +copy_buf: if (!ch->glink_rx_buf) { pr_err("invalid glink_rx_buf.\n"); goto exit_err; } -copy_buf: /* Copy from glink buffer to spcom buffer */ size = min_t(int, ch->actual_rx_size, size); memcpy(buf, ch->glink_rx_buf, size); diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c index c35ec26fefa2..d3d0b8594c9f 100644 --- a/drivers/soc/qcom/subsystem_restart.c +++ b/drivers/soc/qcom/subsystem_restart.c @@ -28,7 +28,6 @@ #include <linux/spinlock.h> #include <linux/device.h> #include <linux/idr.h> -#include <linux/debugfs.h> #include <linux/interrupt.h> #include <linux/of_gpio.h> #include <linux/cdev.h> @@ -149,7 +148,6 @@ struct restart_log { * @restart_level: restart level (0 - panic, 1 - related, 2 - independent, etc.) * @restart_order: order of other devices this devices restarts with * @crash_count: number of times the device has crashed - * @dentry: debugfs directory for this device * @do_ramdump_on_put: ramdump on subsystem_put() if true * @err_ready: completion variable to record error ready from subsystem * @crashed: indicates if subsystem has crashed @@ -171,9 +169,6 @@ struct subsys_device { int restart_level; int crash_count; struct subsys_soc_restart_order *restart_order; -#ifdef CONFIG_DEBUG_FS - struct dentry *dentry; -#endif bool do_ramdump_on_put; struct cdev char_dev; dev_t dev_no; @@ -352,10 +347,11 @@ static struct device_attribute subsys_attrs[] = { __ATTR_NULL, }; -static struct bus_type subsys_bus_type = { +struct bus_type subsys_bus_type = { .name = "msm_subsys", .dev_attrs = subsys_attrs, }; +EXPORT_SYMBOL(subsys_bus_type); static DEFINE_IDA(subsys_ida); @@ -1169,87 +1165,6 @@ void notify_proxy_unvote(struct device *device) notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_UNVOTE, NULL); } -#ifdef CONFIG_DEBUG_FS -static ssize_t subsys_debugfs_read(struct file *filp, char __user *ubuf, - size_t cnt, loff_t *ppos) -{ - int r; - char buf[40]; - struct subsys_device *subsys = filp->private_data; - - r = snprintf(buf, sizeof(buf), "%d\n", subsys->count); - return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -} - -static ssize_t subsys_debugfs_write(struct file *filp, - const char __user *ubuf, size_t cnt, loff_t *ppos) -{ - struct subsys_device *subsys = filp->private_data; - char buf[10]; - char *cmp; - - cnt = min(cnt, sizeof(buf) - 1); - if (copy_from_user(&buf, ubuf, cnt)) - return -EFAULT; - buf[cnt] = '\0'; - cmp = strstrip(buf); - - if (!strcmp(cmp, "restart")) { - if (subsystem_restart_dev(subsys)) - return -EIO; - } else if (!strcmp(cmp, "get")) { - if (subsystem_get(subsys->desc->name)) - return -EIO; - } else if (!strcmp(cmp, "put")) { - subsystem_put(subsys); - } else { - return -EINVAL; - } - - return cnt; -} - -static const struct file_operations subsys_debugfs_fops = { - .open = simple_open, - .read = subsys_debugfs_read, - .write = subsys_debugfs_write, -}; - -static struct dentry *subsys_base_dir; - -static int __init subsys_debugfs_init(void) -{ - subsys_base_dir = debugfs_create_dir("msm_subsys", NULL); - return !subsys_base_dir ? -ENOMEM : 0; -} - -static void subsys_debugfs_exit(void) -{ - debugfs_remove_recursive(subsys_base_dir); -} - -static int subsys_debugfs_add(struct subsys_device *subsys) -{ - if (!subsys_base_dir) - return -ENOMEM; - - subsys->dentry = debugfs_create_file(subsys->desc->name, - S_IRUGO | S_IWUSR, subsys_base_dir, - subsys, &subsys_debugfs_fops); - return !subsys->dentry ? -ENOMEM : 0; -} - -static void subsys_debugfs_remove(struct subsys_device *subsys) -{ - debugfs_remove(subsys->dentry); -} -#else -static int __init subsys_debugfs_init(void) { return 0; }; -static void subsys_debugfs_exit(void) { } -static int subsys_debugfs_add(struct subsys_device *subsys) { return 0; } -static void subsys_debugfs_remove(struct subsys_device *subsys) { } -#endif - static int subsys_device_open(struct inode *inode, struct file *file) { struct subsys_device *device, *subsys_dev = 0; @@ -1686,17 +1601,8 @@ struct subsys_device *subsys_register(struct subsys_desc *desc) mutex_init(&subsys->track.lock); - ret = subsys_debugfs_add(subsys); - if (ret) { - ida_simple_remove(&subsys_ida, subsys->id); - wakeup_source_trash(&subsys->ssr_wlock); - kfree(subsys); - return ERR_PTR(ret); - } - ret = device_register(&subsys->dev); if (ret) { - subsys_debugfs_remove(subsys); put_device(&subsys->dev); return ERR_PTR(ret); } @@ -1758,7 +1664,6 @@ err_setup_irqs: if (ofnode) subsys_remove_restart_order(ofnode); err_register: - subsys_debugfs_remove(subsys); device_unregister(&subsys->dev); return ERR_PTR(ret); } @@ -1787,7 +1692,6 @@ void subsys_unregister(struct subsys_device *subsys) WARN_ON(subsys->count); device_unregister(&subsys->dev); mutex_unlock(&subsys->track.lock); - subsys_debugfs_remove(subsys); subsys_char_device_remove(subsys); sysmon_notifier_unregister(subsys->desc); if (subsys->desc->edge) @@ -1827,9 +1731,6 @@ static int __init subsys_restart_init(void) ret = bus_register(&subsys_bus_type); if (ret) goto err_bus; - ret = subsys_debugfs_init(); - if (ret) - goto err_debugfs; char_class = class_create(THIS_MODULE, "subsys"); if (IS_ERR(char_class)) { @@ -1848,8 +1749,6 @@ static int __init subsys_restart_init(void) err_soc: class_destroy(char_class); err_class: - subsys_debugfs_exit(); -err_debugfs: bus_unregister(&subsys_bus_type); err_bus: destroy_workqueue(ssr_wq); diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c index 470ecfdd9f5e..745a069df88a 100644 --- a/drivers/soc/qcom/watchdog_v2.c +++ b/drivers/soc/qcom/watchdog_v2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -29,6 +29,7 @@ #include <linux/wait.h> #include <soc/qcom/scm.h> #include <soc/qcom/memory_dump.h> +#include <soc/qcom/minidump.h> #include <soc/qcom/watchdog.h> #define MODULE_NAME "msm_watchdog" @@ -521,6 +522,8 @@ void register_scan_dump(struct msm_watchdog_data *wdog_dd) dump_data->addr = virt_to_phys(dump_addr); dump_data->len = wdog_dd->scandump_size; + strlcpy(dump_data->name, "KSCANDUMP", sizeof(dump_data->name)); + dump_entry.id = MSM_DUMP_DATA_SCANDUMP; dump_entry.addr = virt_to_phys(dump_data); ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry); @@ -605,6 +608,9 @@ static void configure_bark_dump(struct msm_watchdog_data *wdog_dd) cpu_data[cpu].addr = virt_to_phys(cpu_buf + cpu * MAX_CPU_CTX_SIZE); cpu_data[cpu].len = MAX_CPU_CTX_SIZE; + snprintf(cpu_data[cpu].name, sizeof(cpu_data[cpu].name), + "KCPU_CTX%d", cpu); + dump_entry.id = MSM_DUMP_DATA_CPU_CTX + cpu; dump_entry.addr = virt_to_phys(&cpu_data[cpu]); ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, @@ -820,6 +826,7 @@ static int msm_watchdog_probe(struct platform_device *pdev) { int ret; struct msm_watchdog_data *wdog_dd; + struct md_region md_entry; if (!pdev->dev.of_node || !enable) return -ENODEV; @@ -841,6 +848,15 @@ static int msm_watchdog_probe(struct platform_device *pdev) goto err; } init_watchdog_data(wdog_dd); + + /* Add wdog info to minidump table */ + strlcpy(md_entry.name, "KWDOGDATA", sizeof(md_entry.name)); + md_entry.virt_addr = (uintptr_t)wdog_dd; + md_entry.phys_addr = virt_to_phys(wdog_dd); + md_entry.size = sizeof(*wdog_dd); + if (msm_minidump_add_region(&md_entry)) + pr_info("Failed to add RTB in Minidump\n"); + return 0; err: kzfree(wdog_dd); diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c index 6f75eee8f921..8efe18dcc98f 100644 --- a/drivers/spi/spi_qsd.c +++ b/drivers/spi/spi_qsd.c @@ -45,6 +45,8 @@ #include <linux/msm-bus-board.h> #include "spi_qsd.h" +#define SPI_MAX_BYTES_PER_WORD (4) + static int msm_spi_pm_resume_runtime(struct device *device); static int msm_spi_pm_suspend_runtime(struct device *device); static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd); @@ -438,10 +440,12 @@ static void msm_spi_read_word_from_fifo(struct msm_spi *dd) u32 data_in; int i; int shift; + int read_bytes = (dd->pack_words ? + SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word); data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO); if (dd->read_buf) { - for (i = 0; (i < dd->bytes_per_word) && + for (i = 0; (i < read_bytes) && dd->rx_bytes_remaining; i++) { /* The data format depends on bytes_per_word: 4 bytes: 0x12345678 @@ -454,8 +458,8 @@ static void msm_spi_read_word_from_fifo(struct msm_spi *dd) dd->rx_bytes_remaining--; } } else { - if (dd->rx_bytes_remaining >= dd->bytes_per_word) - dd->rx_bytes_remaining -= dd->bytes_per_word; + if (dd->rx_bytes_remaining >= read_bytes) + dd->rx_bytes_remaining -= read_bytes; else dd->rx_bytes_remaining = 0; } @@ -552,7 +556,7 @@ msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n) if (n != (*config & SPI_CFG_N)) *config = (*config & ~SPI_CFG_N) | n; - if (dd->mode == SPI_BAM_MODE) { + if (dd->tx_mode == SPI_BAM_MODE) { if (dd->read_buf == NULL) *config |= SPI_NO_INPUT; if (dd->write_buf == NULL) @@ -617,25 +621,34 @@ static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw) static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words) { /* - * n_words cannot exceed fifo_size, and only one READ COUNT - * interrupt is generated per transaction, so for transactions - * larger than fifo size READ COUNT must be disabled. - * For those transactions we usually move to Data Mover mode. + * For FIFO mode: + * - Set the MX_OUTPUT_COUNT/MX_INPUT_COUNT registers to 0 + * - Set the READ/WRITE_COUNT registers to 0 (infinite mode) + * or num bytes (finite mode) if less than fifo worth of data. + * For Block mode: + * - Set the MX_OUTPUT/MX_INPUT_COUNT registers to num xfer bytes. + * - Set the READ/WRITE_COUNT registers to 0. */ - if (dd->mode == SPI_FIFO_MODE) { - if (n_words <= dd->input_fifo_size) { - writel_relaxed(n_words, - dd->base + SPI_MX_READ_COUNT); - msm_spi_set_write_count(dd, n_words); - } else { - writel_relaxed(0, dd->base + SPI_MX_READ_COUNT); - msm_spi_set_write_count(dd, 0); - } - if (dd->qup_ver == SPI_QUP_VERSION_BFAM) { - /* must be zero for FIFO */ - writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT); + if (dd->tx_mode != SPI_BAM_MODE) { + if (dd->tx_mode == SPI_FIFO_MODE) { + if (n_words <= dd->input_fifo_size) + msm_spi_set_write_count(dd, n_words); + else + msm_spi_set_write_count(dd, 0); writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT); - } + } else + writel_relaxed(n_words, dd->base + SPI_MX_OUTPUT_COUNT); + + if (dd->rx_mode == SPI_FIFO_MODE) { + if (n_words <= dd->input_fifo_size) + writel_relaxed(n_words, + dd->base + SPI_MX_READ_COUNT); + else + writel_relaxed(0, + dd->base + SPI_MX_READ_COUNT); + writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT); + } else + writel_relaxed(n_words, dd->base + SPI_MX_INPUT_COUNT); } else { /* must be zero for BAM and DMOV */ writel_relaxed(0, dd->base + SPI_MX_READ_COUNT); @@ -882,7 +895,7 @@ xfr_err: static int msm_spi_bam_next_transfer(struct msm_spi *dd) { - if (dd->mode != SPI_BAM_MODE) + if (dd->tx_mode != SPI_BAM_MODE) return 0; if (dd->tx_bytes_remaining > 0) { @@ -901,7 +914,7 @@ msm_spi_bam_next_transfer(struct msm_spi *dd) static int msm_spi_dma_send_next(struct msm_spi *dd) { int ret = 0; - if (dd->mode == SPI_BAM_MODE) + if (dd->tx_mode == SPI_BAM_MODE) ret = msm_spi_bam_next_transfer(dd); return ret; } @@ -932,32 +945,38 @@ static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id) } op = readl_relaxed(dd->base + SPI_OPERATIONAL); + writel_relaxed(op, dd->base + SPI_OPERATIONAL); + /* + * Ensure service flag was cleared before further + * processing of interrupt. + */ + mb(); if (op & SPI_OP_INPUT_SERVICE_FLAG) { - writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG, - dd->base + SPI_OPERATIONAL); - /* - * Ensure service flag was cleared before further - * processing of interrupt. - */ - mb(); ret |= msm_spi_input_irq(irq, dev_id); } if (op & SPI_OP_OUTPUT_SERVICE_FLAG) { - writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG, - dd->base + SPI_OPERATIONAL); - /* - * Ensure service flag was cleared before further - * processing of interrupt. - */ - mb(); ret |= msm_spi_output_irq(irq, dev_id); } - if (dd->done) { + if (dd->tx_mode != SPI_BAM_MODE) { + if (!dd->rx_done) { + if (dd->rx_bytes_remaining == 0) + dd->rx_done = true; + } + if (!dd->tx_done) { + if (!dd->tx_bytes_remaining && + (op & SPI_OP_IP_FIFO_NOT_EMPTY)) { + dd->tx_done = true; + } + } + } + if (dd->tx_done && dd->rx_done) { + msm_spi_set_state(dd, SPI_OP_STATE_RESET); + dd->tx_done = false; + dd->rx_done = false; complete(&dd->rx_transfer_complete); complete(&dd->tx_transfer_complete); - dd->done = 0; } return ret; } @@ -968,17 +987,23 @@ static irqreturn_t msm_spi_input_irq(int irq, void *dev_id) dd->stat_rx++; - if (dd->mode == SPI_MODE_NONE) + if (dd->rx_mode == SPI_MODE_NONE) return IRQ_HANDLED; - if (dd->mode == SPI_FIFO_MODE) { + if (dd->rx_mode == SPI_FIFO_MODE) { while ((readl_relaxed(dd->base + SPI_OPERATIONAL) & SPI_OP_IP_FIFO_NOT_EMPTY) && (dd->rx_bytes_remaining > 0)) { msm_spi_read_word_from_fifo(dd); } - if (dd->rx_bytes_remaining == 0) - msm_spi_complete(dd); + } else if (dd->rx_mode == SPI_BLOCK_MODE) { + int count = 0; + + while (dd->rx_bytes_remaining && + (count < dd->input_block_size)) { + msm_spi_read_word_from_fifo(dd); + count += SPI_MAX_BYTES_PER_WORD; + } } return IRQ_HANDLED; @@ -989,18 +1014,20 @@ static void msm_spi_write_word_to_fifo(struct msm_spi *dd) u32 word; u8 byte; int i; + int write_bytes = + (dd->pack_words ? SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word); word = 0; if (dd->write_buf) { - for (i = 0; (i < dd->bytes_per_word) && + for (i = 0; (i < write_bytes) && dd->tx_bytes_remaining; i++) { dd->tx_bytes_remaining--; byte = *dd->write_buf++; word |= (byte << (BITS_PER_BYTE * i)); } } else - if (dd->tx_bytes_remaining > dd->bytes_per_word) - dd->tx_bytes_remaining -= dd->bytes_per_word; + if (dd->tx_bytes_remaining > write_bytes) + dd->tx_bytes_remaining -= write_bytes; else dd->tx_bytes_remaining = 0; dd->write_xfr_cnt++; @@ -1012,11 +1039,22 @@ static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd) { int count = 0; - while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) && - !(readl_relaxed(dd->base + SPI_OPERATIONAL) & - SPI_OP_OUTPUT_FIFO_FULL)) { - msm_spi_write_word_to_fifo(dd); - count++; + if (dd->tx_mode == SPI_FIFO_MODE) { + while ((dd->tx_bytes_remaining > 0) && + (count < dd->input_fifo_size) && + !(readl_relaxed(dd->base + SPI_OPERATIONAL) + & SPI_OP_OUTPUT_FIFO_FULL)) { + msm_spi_write_word_to_fifo(dd); + count++; + } + } + + if (dd->tx_mode == SPI_BLOCK_MODE) { + while (dd->tx_bytes_remaining && + (count < dd->output_block_size)) { + msm_spi_write_word_to_fifo(dd); + count += SPI_MAX_BYTES_PER_WORD; + } } } @@ -1026,11 +1064,11 @@ static irqreturn_t msm_spi_output_irq(int irq, void *dev_id) dd->stat_tx++; - if (dd->mode == SPI_MODE_NONE) + if (dd->tx_mode == SPI_MODE_NONE) return IRQ_HANDLED; /* Output FIFO is empty. Transmit any outstanding write data. */ - if (dd->mode == SPI_FIFO_MODE) + if ((dd->tx_mode == SPI_FIFO_MODE) || (dd->tx_mode == SPI_BLOCK_MODE)) msm_spi_write_rmn_to_fifo(dd); return IRQ_HANDLED; @@ -1106,7 +1144,7 @@ error: static int msm_spi_dma_map_buffers(struct msm_spi *dd) { int ret = 0; - if (dd->mode == SPI_BAM_MODE) + if (dd->tx_mode == SPI_BAM_MODE) ret = msm_spi_bam_map_buffers(dd); return ret; } @@ -1135,7 +1173,7 @@ static void msm_spi_bam_unmap_buffers(struct msm_spi *dd) static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd) { - if (dd->mode == SPI_BAM_MODE) + if (dd->tx_mode == SPI_BAM_MODE) msm_spi_bam_unmap_buffers(dd); } @@ -1197,9 +1235,11 @@ static void msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count) { if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) { - dd->mode = SPI_BAM_MODE; + dd->tx_mode = SPI_BAM_MODE; + dd->rx_mode = SPI_BAM_MODE; } else { - dd->mode = SPI_FIFO_MODE; + dd->rx_mode = SPI_FIFO_MODE; + dd->tx_mode = SPI_FIFO_MODE; dd->read_len = dd->cur_transfer->len; dd->write_len = dd->cur_transfer->len; } @@ -1215,14 +1255,23 @@ static void msm_spi_set_qup_io_modes(struct msm_spi *dd) spi_iom = readl_relaxed(dd->base + SPI_IO_MODES); /* Set input and output transfer mode: FIFO, DMOV, or BAM */ spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE); - spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT)); - spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT)); - /* Turn on packing for data mover */ - if (dd->mode == SPI_BAM_MODE) + spi_iom = (spi_iom | (dd->tx_mode << OUTPUT_MODE_SHIFT)); + spi_iom = (spi_iom | (dd->rx_mode << INPUT_MODE_SHIFT)); + + /* Always enable packing for the BAM mode and for non BAM mode only + * if bpw is % 8 and transfer length is % 4 Bytes. + */ + if (dd->tx_mode == SPI_BAM_MODE || + ((dd->cur_msg_len % SPI_MAX_BYTES_PER_WORD == 0) && + (dd->cur_transfer->bits_per_word) && + (dd->cur_transfer->bits_per_word <= 32) && + (dd->cur_transfer->bits_per_word % 8 == 0))) { spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN; - else { + dd->pack_words = true; + } else { spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN); spi_iom |= SPI_IO_M_OUTPUT_BIT_SHIFT_EN; + dd->pack_words = false; } /*if (dd->mode == SPI_BAM_MODE) { @@ -1280,7 +1329,7 @@ static void msm_spi_set_qup_op_mask(struct msm_spi *dd) { /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status * change in BAM mode */ - u32 mask = (dd->mode == SPI_BAM_MODE) ? + u32 mask = (dd->tx_mode == SPI_BAM_MODE) ? QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG : 0; writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK); @@ -1321,6 +1370,8 @@ static int msm_spi_process_transfer(struct msm_spi *dd) dd->rx_bytes_remaining = dd->cur_msg_len; dd->read_buf = dd->cur_transfer->rx_buf; dd->write_buf = dd->cur_transfer->tx_buf; + dd->tx_done = false; + dd->rx_done = false; init_completion(&dd->tx_transfer_complete); init_completion(&dd->rx_transfer_complete); if (dd->cur_transfer->bits_per_word) @@ -1351,10 +1402,12 @@ static int msm_spi_process_transfer(struct msm_spi *dd) msm_spi_set_transfer_mode(dd, bpw, read_count); msm_spi_set_mx_counts(dd, read_count); - if (dd->mode == SPI_BAM_MODE) { + if (dd->tx_mode == SPI_BAM_MODE) { ret = msm_spi_dma_map_buffers(dd); if (ret < 0) { pr_err("Mapping DMA buffers\n"); + dd->tx_mode = SPI_MODE_NONE; + dd->rx_mode = SPI_MODE_NONE; return ret; } } @@ -1368,11 +1421,11 @@ static int msm_spi_process_transfer(struct msm_spi *dd) the first. Restricting this to one write avoids contention issues and race conditions between this thread and the int handler */ - if (dd->mode == SPI_FIFO_MODE) { + if (dd->tx_mode != SPI_BAM_MODE) { if (msm_spi_prepare_for_write(dd)) goto transfer_end; msm_spi_start_write(dd, read_count); - } else if (dd->mode == SPI_BAM_MODE) { + } else { if ((msm_spi_bam_begin_transfer(dd)) < 0) { dev_err(dd->dev, "%s: BAM transfer setup failed\n", __func__); @@ -1388,11 +1441,11 @@ static int msm_spi_process_transfer(struct msm_spi *dd) * might fire before the first word is written resulting in a * possible race condition. */ - if (dd->mode != SPI_BAM_MODE) + if (dd->tx_mode != SPI_BAM_MODE) if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) { dev_warn(dd->dev, "%s: Failed to set QUP to run-state. Mode:%d", - __func__, dd->mode); + __func__, dd->tx_mode); goto transfer_end; } @@ -1422,10 +1475,11 @@ static int msm_spi_process_transfer(struct msm_spi *dd) msm_spi_udelay(dd->xfrs_delay_usec); transfer_end: - if ((dd->mode == SPI_BAM_MODE) && status) + if ((dd->tx_mode == SPI_BAM_MODE) && status) msm_spi_bam_flush(dd); msm_spi_dma_unmap_buffers(dd); - dd->mode = SPI_MODE_NONE; + dd->tx_mode = SPI_MODE_NONE; + dd->rx_mode = SPI_MODE_NONE; msm_spi_set_state(dd, SPI_OP_STATE_RESET); if (!dd->cur_transfer->cs_change) @@ -2349,7 +2403,8 @@ static int init_resources(struct platform_device *pdev) pclk_enabled = 0; dd->transfer_pending = 0; - dd->mode = SPI_MODE_NONE; + dd->tx_mode = SPI_MODE_NONE; + dd->rx_mode = SPI_MODE_NONE; rc = msm_spi_request_irq(dd, pdev, master); if (rc) diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h index 7a5cfadaa5a0..47d69965f18a 100644 --- a/drivers/spi/spi_qsd.h +++ b/drivers/spi/spi_qsd.h @@ -113,6 +113,8 @@ #define INPUT_MODE_SHIFT QSD_REG(10) QUP_REG(12) /* SPI_OPERATIONAL fields */ +#define SPI_OP_IN_BLK_RD_REQ_FLAG 0x00002000 +#define SPI_OP_OUT_BLK_WR_REQ_FLAG 0x00001000 #define SPI_OP_MAX_INPUT_DONE_FLAG 0x00000800 #define SPI_OP_MAX_OUTPUT_DONE_FLAG 0x00000400 #define SPI_OP_INPUT_SERVICE_FLAG 0x00000200 @@ -321,7 +323,8 @@ struct msm_spi { bool transfer_pending; wait_queue_head_t continue_suspend; /* DMA data */ - enum msm_spi_mode mode; + enum msm_spi_mode tx_mode; + enum msm_spi_mode rx_mode; bool use_dma; int tx_dma_chan; int tx_dma_crci; @@ -353,7 +356,8 @@ struct msm_spi { #endif struct msm_spi_platform_data *pdata; /* Platform data */ /* When set indicates multiple transfers in a single message */ - bool done; + bool rx_done; + bool tx_done; u32 cur_msg_len; /* Used in FIFO mode to keep track of the transfer being processed */ struct spi_transfer *cur_tx_transfer; @@ -371,6 +375,7 @@ struct msm_spi { struct pinctrl_state *pins_active; struct pinctrl_state *pins_sleep; bool is_init_complete; + bool pack_words; }; /* Forward declaration */ @@ -524,7 +529,8 @@ static inline void msm_spi_set_write_count(struct msm_spi *dd, int val) static inline void msm_spi_complete(struct msm_spi *dd) { - dd->done = 1; + dd->tx_done = true; + dd->rx_done = true; } static inline void msm_spi_enable_error_flags(struct msm_spi *dd) diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index a5bfeab596ac..d1802bcba0fb 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -166,6 +166,7 @@ struct spmi_pmic_arb { u16 max_apid; u16 max_periph; u32 *mapping_table; + int reserved_chan; DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS); struct irq_domain *domain; struct spmi_controller *spmic; @@ -861,6 +862,10 @@ static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pa, u16 ppid) * ppid_to_apid is an in-memory invert of that table. */ for (apid = pa->last_apid; apid < pa->max_periph; apid++) { + /* Do not keep the reserved channel in the mapping table */ + if (pa->reserved_chan >= 0 && apid == pa->reserved_chan) + continue; + regval = readl_relaxed(pa->cnfg + SPMI_OWNERSHIP_TABLE_REG(apid)); pa->apid_data[apid].irq_owner @@ -920,6 +925,10 @@ static int pmic_arb_read_apid_map_v5(struct spmi_pmic_arb *pa) * receive interrupts from the PPID. */ for (apid = 0; apid < pa->max_periph; apid++) { + /* Do not keep the reserved channel in the mapping table */ + if (pa->reserved_chan >= 0 && apid == pa->reserved_chan) + continue; + offset = pa->ver_ops->channel_map_offset(apid); if (offset >= pa->core_size) break; @@ -1340,6 +1349,10 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) pa->ee = ee; + pa->reserved_chan = -EINVAL; + of_property_read_u32(pdev->dev.of_node, "qcom,reserved-chan", + &pa->reserved_chan); + pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1, sizeof(*pa->mapping_table), GFP_KERNEL); if (!pa->mapping_table) { diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index faa81c28a0d3..58bf3d2f52bd 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -114,6 +114,7 @@ struct ion_client { */ struct ion_handle { struct kref ref; + unsigned int user_ref_count; struct ion_client *client; struct ion_buffer *buffer; struct rb_node node; @@ -433,6 +434,50 @@ int ion_handle_put(struct ion_handle *handle) return ret; } +/* Must hold the client lock */ +static void user_ion_handle_get(struct ion_handle *handle) +{ + if (handle->user_ref_count++ == 0) + kref_get(&handle->ref); +} + +/* Must hold the client lock */ +static struct ion_handle *user_ion_handle_get_check_overflow( + struct ion_handle *handle) +{ + if (handle->user_ref_count + 1 == 0) + return ERR_PTR(-EOVERFLOW); + user_ion_handle_get(handle); + return handle; +} + +/* passes a kref to the user ref count. + * We know we're holding a kref to the object before and + * after this call, so no need to reverify handle. + */ +static struct ion_handle *pass_to_user(struct ion_handle *handle) +{ + struct ion_client *client = handle->client; + struct ion_handle *ret; + + mutex_lock(&client->lock); + ret = user_ion_handle_get_check_overflow(handle); + ion_handle_put_nolock(handle); + mutex_unlock(&client->lock); + return ret; +} + +/* Must hold the client lock */ +static int user_ion_handle_put_nolock(struct ion_handle *handle) +{ + int ret = 0; + + if (--handle->user_ref_count == 0) + ret = ion_handle_put_nolock(handle); + + return ret; +} + static struct ion_handle *ion_handle_lookup(struct ion_client *client, struct ion_buffer *buffer) { @@ -644,6 +689,25 @@ static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle ion_handle_put_nolock(handle); } +static void user_ion_free_nolock(struct ion_client *client, + struct ion_handle *handle) +{ + bool valid_handle; + + WARN_ON(client != handle->client); + + valid_handle = ion_handle_validate(client, handle); + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to free.\n", __func__); + return; + } + if (!handle->user_ref_count > 0) { + WARN(1, "%s: User does not have access!\n", __func__); + return; + } + user_ion_handle_put_nolock(handle); +} + void ion_free(struct ion_client *client, struct ion_handle *handle) { BUG_ON(client != handle->client); @@ -1518,7 +1582,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) data.allocation.flags, true); if (IS_ERR(handle)) return PTR_ERR(handle); - + pass_to_user(handle); data.allocation.handle = handle->id; cleanup_handle = handle; @@ -1534,7 +1598,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) mutex_unlock(&client->lock); return PTR_ERR(handle); } - ion_free_nolock(client, handle); + user_ion_free_nolock(client, handle); ion_handle_put_nolock(handle); mutex_unlock(&client->lock); break; @@ -1558,10 +1622,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) struct ion_handle *handle; handle = ion_import_dma_buf(client, data.fd.fd); - if (IS_ERR(handle)) + if (IS_ERR(handle)) { ret = PTR_ERR(handle); - else - data.handle.handle = handle->id; + } else { + handle = pass_to_user(handle); + if (IS_ERR(handle)) + ret = PTR_ERR(handle); + else + data.handle.handle = handle->id; + } break; } case ION_IOC_SYNC: @@ -1593,8 +1662,10 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (dir & _IOC_READ) { if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { if (cleanup_handle) { - ion_free(client, cleanup_handle); - ion_handle_put(cleanup_handle); + mutex_lock(&client->lock); + user_ion_free_nolock(client, cleanup_handle); + ion_handle_put_nolock(cleanup_handle); + mutex_unlock(&client->lock); } return -EFAULT; } diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index b5905fc81147..dfb6d2f77af1 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -238,7 +238,7 @@ void ion_cma_heap_destroy(struct ion_heap *heap) static void ion_secure_cma_free(struct ion_buffer *buffer) { int ret = 0; - u32 source_vm; + int source_vm; int dest_vmid; int dest_perms; struct ion_cma_buffer_info *info = buffer->priv_virt; diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c index ec09f9f12b47..803811597f37 100644 --- a/drivers/staging/android/ion/ion_system_secure_heap.c +++ b/drivers/staging/android/ion/ion_system_secure_heap.c @@ -1,6 +1,6 @@ /* * - * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -78,7 +78,7 @@ int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid) int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid) { - u32 source_vmid = VMID_HLOS; + int source_vmid = VMID_HLOS; u32 dest_perms = PERM_READ | PERM_WRITE; struct scatterlist *sg; int ret, i; diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index bec687853c5d..205af6627b80 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -217,6 +217,22 @@ static int test_task_flag(struct task_struct *p, int flag) return 0; } +static int test_task_state(struct task_struct *p, int state) +{ + struct task_struct *t; + + for_each_thread(p, t) { + task_lock(t); + if (t->state & state) { + task_unlock(t); + return 1; + } + task_unlock(t); + } + + return 0; +} + static DEFINE_MUTEX(scan_mutex); int can_use_cma_pages(gfp_t gfp_mask) @@ -404,7 +420,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) int other_free; int other_file; - if (mutex_lock_interruptible(&scan_mutex) < 0) + if (!mutex_trylock(&scan_mutex)) return 0; other_free = global_page_state(NR_FREE_PAGES); @@ -462,8 +478,6 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) if (time_before_eq(jiffies, lowmem_deathpending_timeout)) { if (test_task_flag(tsk, TIF_MEMDIE)) { rcu_read_unlock(); - /* give the system time to free up the memory */ - msleep_interruptible(20); mutex_unlock(&scan_mutex); return 0; } @@ -497,6 +511,17 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) } if (selected) { long cache_size, cache_limit, free; + + if (test_task_flag(selected, TIF_MEMDIE) && + (test_task_state(selected, TASK_UNINTERRUPTIBLE))) { + lowmem_print(2, "'%s' (%d) is already killed\n", + selected->comm, + selected->pid); + rcu_read_unlock(); + mutex_unlock(&scan_mutex); + return 0; + } + task_lock(selected); send_sig(SIGKILL, selected, 0); /* @@ -565,7 +590,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) static struct shrinker lowmem_shrinker = { .scan_objects = lowmem_scan, .count_objects = lowmem_count, - .seeks = DEFAULT_SEEKS * 16 + .seeks = DEFAULT_SEEKS * 16, + .flags = SHRINKER_LMK }; static int __init lowmem_init(void) diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index 9b8d17ce3a5e..5238d67490ce 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c @@ -29,6 +29,7 @@ #include "sync.h" #define CREATE_TRACE_POINTS +#define SYNC_DUMP_TIME_LIMIT 7000 #include "trace/sync.h" static const struct fence_ops android_fence_ops; @@ -392,7 +393,9 @@ int sync_fence_wait(struct sync_fence *fence, long timeout) if (timeout) { pr_info("fence timeout on [%pK] after %dms\n", fence, jiffies_to_msecs(timeout)); - sync_dump(); + if (jiffies_to_msecs(timeout) >= + SYNC_DUMP_TIME_LIMIT) + sync_dump(); } return -ETIME; } diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index c31aaf7a9880..e5d3a0bdf32a 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -160,7 +160,7 @@ static int usb_string_copy(const char *s, char **s_copy) if (!str) return -ENOMEM; } - strncpy(str, s, MAX_USB_STRING_WITH_NULL_LEN); + strlcpy(str, s, MAX_USB_STRING_WITH_NULL_LEN); if (str[ret - 1] == '\n') str[ret - 1] = '\0'; *s_copy = str; diff --git a/drivers/usb/gadget/function/f_ccid.h b/drivers/usb/gadget/function/f_ccid.h index 42a7ebbbccfc..935308cff0bc 100644 --- a/drivers/usb/gadget/function/f_ccid.h +++ b/drivers/usb/gadget/function/f_ccid.h @@ -55,29 +55,29 @@ #define CCID_READ_DTR _IOR('C', 3, int) struct usb_ccid_notification { - unsigned char buf[4]; + __u8 buf[4]; } __packed; struct ccid_bulk_in_header { - unsigned char bMessageType; - unsigned long wLength; - unsigned char bSlot; - unsigned char bSeq; - unsigned char bStatus; - unsigned char bError; - unsigned char bSpecific; - unsigned char abData[ABDATA_SIZE]; - unsigned char bSizeToSend; + __u8 bMessageType; + __u32 wLength; + __u8 bSlot; + __u8 bSeq; + __u8 bStatus; + __u8 bError; + __u8 bSpecific; + __u8 abData[ABDATA_SIZE]; + __u8 bSizeToSend; } __packed; struct ccid_bulk_out_header { - unsigned char bMessageType; - unsigned long wLength; - unsigned char bSlot; - unsigned char bSeq; - unsigned char bSpecific_0; - unsigned char bSpecific_1; - unsigned char bSpecific_2; - unsigned char APDU[ABDATA_SIZE]; + __u8 bMessageType; + __u32 wLength; + __u8 bSlot; + __u8 bSeq; + __u8 bSpecific_0; + __u8 bSpecific_1; + __u8 bSpecific_2; + __u8 APDU[ABDATA_SIZE]; } __packed; #endif diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index a1c00525a598..7c35241a487a 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -71,6 +71,7 @@ __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len); /* The function structure ***************************************************/ struct ffs_ep; +static bool first_read_done; struct ffs_function { struct usb_configuration *conf; @@ -756,6 +757,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) { struct ffs_epfile *epfile = file->private_data; struct ffs_ep *ep; + struct ffs_data *ffs = epfile->ffs; char *data = NULL; ssize_t ret, data_len = -EINVAL; int halt; @@ -764,6 +766,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) atomic_read(&epfile->error), io_data->read ? "READ" : "WRITE"); smp_mb__before_atomic(); +retry: if (atomic_read(&epfile->error)) return -ENODEV; @@ -968,10 +971,36 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) * disabled (disconnect) or changed * (composition switch) ? */ - if (epfile->ep == ep) + if (epfile->ep == ep) { ret = ep->status; - else + if (ret >= 0) + first_read_done = true; + } else { ret = -ENODEV; + } + + /* do wait again if func eps are not enabled */ + if (io_data->read && !first_read_done + && ret < 0) { + unsigned short count = ffs->eps_count; + + pr_debug("%s: waiting for the online state\n", + __func__); + ret = 0; + kfree(data); + data = NULL; + data_len = -EINVAL; + spin_unlock_irq(&epfile->ffs->eps_lock); + mutex_unlock(&epfile->mutex); + epfile = ffs->epfiles; + do { + atomic_set(&epfile->error, 0); + ++epfile; + } while (--count); + epfile = file->private_data; + goto retry; + } + spin_unlock_irq(&epfile->ffs->eps_lock); if (io_data->read && ret > 0) { @@ -1038,6 +1067,7 @@ ffs_epfile_open(struct inode *inode, struct file *file) smp_mb__before_atomic(); atomic_set(&epfile->error, 0); + first_read_done = false; ffs_log("exit:state %d setup_state %d flag %lu", epfile->ffs->state, epfile->ffs->setup_state, epfile->ffs->flags); @@ -1159,7 +1189,7 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) *to = p->data; } - ffs_log("enter"); + ffs_log("exit"); return res; } @@ -3354,6 +3384,8 @@ static int ffs_func_set_alt(struct usb_function *f, if (ffs->func) { ffs_func_eps_disable(ffs->func); ffs->func = NULL; + /* matching put to allow LPM on disconnect */ + usb_gadget_autopm_put_async(ffs->gadget); } if (ffs->state == FFS_DEACTIVATED) { @@ -3387,14 +3419,9 @@ static int ffs_func_set_alt(struct usb_function *f, static void ffs_func_disable(struct usb_function *f) { - struct ffs_function *func = ffs_func_from_usb(f); - struct ffs_data *ffs = func->ffs; - ffs_log("enter"); ffs_func_set_alt(f, 0, (unsigned)-1); - /* matching put to allow LPM on disconnect */ - usb_gadget_autopm_put_async(ffs->gadget); ffs_log("exit"); } diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index be532503954f..7216fdd4245d 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -1550,13 +1550,6 @@ static void gsi_ctrl_notify_resp_complete(struct usb_ep *ep, event->bNotificationType, req->status); /* FALLTHROUGH */ case 0: - /* - * handle multiple pending resp available - * notifications by queuing same until we're done, - * rest of the notification require queuing new - * request. - */ - gsi_ctrl_send_notification(gsi); break; } } @@ -1651,6 +1644,14 @@ static void gsi_ctrl_reset_cmd_complete(struct usb_ep *ep, gsi_ctrl_send_cpkt_tomodem(gsi, req->buf, 0); } +static void gsi_ctrl_send_response_complete(struct usb_ep *ep, + struct usb_request *req) +{ + struct f_gsi *gsi = req->context; + + gsi_ctrl_send_notification(gsi); +} + static int gsi_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { @@ -1737,6 +1738,8 @@ gsi_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) memcpy(req->buf, cpkt->buf, value); gsi_ctrl_pkt_free(cpkt); + req->complete = gsi_ctrl_send_response_complete; + req->context = gsi; log_event_dbg("copied encap_resp %d bytes", value); break; diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 1fd5a95b6e99..59d6ac67d072 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -2284,16 +2284,15 @@ reset: fsg->bulk_out_enabled = 0; } + /* allow usb LPM after eps are disabled */ + usb_gadget_autopm_put_async(common->gadget); common->fsg = NULL; wake_up(&common->fsg_wait); } common->running = 0; - if (!new_fsg || rc) { - /* allow usb LPM after eps are disabled */ - usb_gadget_autopm_put_async(common->gadget); + if (!new_fsg || rc) return rc; - } common->fsg = new_fsg; fsg = common->fsg; diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c index aa11cf2f7417..6ba742076baa 100644 --- a/drivers/usb/phy/phy-msm-ssusb-qmp.c +++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c @@ -222,11 +222,6 @@ static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on) "enable phy->fpc_redrive_ldo failed\n"); return rc; } - - dev_dbg(phy->phy.dev, - "fpc redrive ldo: min_vol:%duV max_vol:%duV\n", - phy->redrive_voltage_levels[VOLTAGE_LEVEL_MIN], - phy->redrive_voltage_levels[VOLTAGE_LEVEL_MAX]); } rc = msm_ldo_enable(phy, phy->vdd, phy->vdd_levels, @@ -236,11 +231,6 @@ static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on) goto disable_fpc_redrive; } - dev_dbg(phy->phy.dev, - "vdd ldo: min_vol:%duV max_vol:%duV\n", - phy->vdd_levels[VOLTAGE_LEVEL_MIN], - phy->vdd_levels[VOLTAGE_LEVEL_MAX]); - rc = msm_ldo_enable(phy, phy->core_ldo, phy->core_voltage_levels, USB_SSPHY_HPM_LOAD); if (rc < 0) { @@ -248,11 +238,6 @@ static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on) goto disable_vdd; } - dev_dbg(phy->phy.dev, - "core ldo: min_vol:%duV max_vol:%duV\n", - phy->core_voltage_levels[VOLTAGE_LEVEL_MIN], - phy->core_voltage_levels[VOLTAGE_LEVEL_MAX]); - return 0; disable_regulators: diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c index e60869144339..0ecf1ef92ab3 100644 --- a/drivers/video/fbdev/msm/mdss_debug.c +++ b/drivers/video/fbdev/msm/mdss_debug.c @@ -59,11 +59,13 @@ static int panel_debug_base_open(struct inode *inode, struct file *file) static int panel_debug_base_release(struct inode *inode, struct file *file) { struct mdss_debug_base *dbg = file->private_data; + mutex_lock(&mdss_debug_lock); if (dbg && dbg->buf) { kfree(dbg->buf); dbg->buf_len = 0; dbg->buf = NULL; } + mutex_unlock(&mdss_debug_lock); return 0; } @@ -385,11 +387,13 @@ static int mdss_debug_base_open(struct inode *inode, struct file *file) static int mdss_debug_base_release(struct inode *inode, struct file *file) { struct mdss_debug_base *dbg = file->private_data; + mutex_lock(&mdss_debug_lock); if (dbg && dbg->buf) { kfree(dbg->buf); dbg->buf_len = 0; dbg->buf = NULL; } + mutex_unlock(&mdss_debug_lock); return 0; } diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c index 2d499ef903d3..1e878e9a00cb 100644 --- a/drivers/video/fbdev/msm/mdss_dp.c +++ b/drivers/video/fbdev/msm/mdss_dp.c @@ -69,6 +69,11 @@ static int mdss_dp_process_phy_test_pattern_request( static int mdss_dp_send_audio_notification( struct mdss_dp_drv_pdata *dp, int val); +static inline void mdss_dp_reset_sink_count(struct mdss_dp_drv_pdata *dp) +{ + memset(&dp->sink_count, 0, sizeof(dp->sink_count)); +} + static inline void mdss_dp_reset_test_data(struct mdss_dp_drv_pdata *dp) { dp->test_data = (const struct dpcd_test_request){ 0 }; @@ -133,22 +138,77 @@ static int mdss_dp_is_clk_prefix(const char *clk_prefix, const char *clk_name) return !strncmp(clk_name, clk_prefix, strlen(clk_prefix)); } +static void mdss_dp_reset_phy_config_indices(struct mdss_dp_drv_pdata *dp) +{ + int i = 0; + + for (i = 0; i < PHY_AUX_CFG_MAX; i++) + dp->aux_cfg[i].current_index = 0; +} + +static void mdss_dp_phy_aux_cfg_reset(struct mdss_dp_drv_pdata *dp) +{ + int i = 0; + + for (i = 0; i < PHY_AUX_CFG_MAX; i++) + dp->aux_cfg[i] = (const struct mdss_dp_phy_cfg){ 0 }; +} + +static int mdss_dp_parse_aux_cfg(struct platform_device *pdev, + struct mdss_dp_drv_pdata *dp) +{ + int len = 0, i = 0, j = 0, config_count = 0; + const char *data; + int const minimum_config_count = 1; + + for (i = 0; i < PHY_AUX_CFG_MAX; i++) { + const char *property = mdss_dp_get_phy_aux_config_property(i); + + data = of_get_property(pdev->dev.of_node, property, &len); + if (!data) { + pr_err("Unable to read %s\n", property); + goto error; + } + + config_count = len - 1; + if ((config_count < minimum_config_count) || + (config_count > MDSS_DP_MAX_PHY_CFG_VALUE_CNT)) { + pr_err("Invalid config count (%d) configs for %s\n", + config_count, property); + goto error; + } + + dp->aux_cfg[i].offset = data[0]; + dp->aux_cfg[i].cfg_cnt = config_count; + pr_debug("%s offset=0x%x, cfg_cnt=%d\n", + property, + dp->aux_cfg[i].offset, + dp->aux_cfg[i].cfg_cnt); + for (j = 1; j < len; j++) { + dp->aux_cfg[i].lut[j - 1] = data[j]; + pr_debug("%s lut[%d]=0x%x\n", + property, + i, + dp->aux_cfg[i].lut[j - 1]); + } + } + + return 0; + +error: + mdss_dp_phy_aux_cfg_reset(dp); + return -EINVAL; +} + static int mdss_dp_parse_prop(struct platform_device *pdev, struct mdss_dp_drv_pdata *dp_drv) { int len = 0, i = 0, rc = 0; const char *data; - data = of_get_property(pdev->dev.of_node, - "qcom,aux-cfg-settings", &len); - if ((!data) || (len != AUX_CFG_LEN)) { - pr_err("%s:%d, Unable to read DP AUX CFG settings", - __func__, __LINE__); - return -EINVAL; - } - - for (i = 0; i < len; i++) - dp_drv->aux_cfg[i] = data[i]; + rc = mdss_dp_parse_aux_cfg(pdev, dp_drv); + if (rc) + return rc; data = of_get_property(pdev->dev.of_node, "qcom,logical2physical-lane-map", &len); @@ -958,6 +1018,12 @@ void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *dp) mdss_dp_configuration_ctrl(&dp->ctrl_io, data); } +static inline void mdss_dp_ack_state(struct mdss_dp_drv_pdata *dp, int val) +{ + if (dp && dp->ext_audio_data.intf_ops.notify) + dp->ext_audio_data.intf_ops.notify(dp->ext_pdev, val); +} + static int mdss_dp_wait4video_ready(struct mdss_dp_drv_pdata *dp_drv) { int ret = 0; @@ -989,26 +1055,28 @@ static int mdss_dp_wait4video_ready(struct mdss_dp_drv_pdata *dp_drv) static void mdss_dp_update_cable_status(struct mdss_dp_drv_pdata *dp, bool connected) { - mutex_lock(&dp->pd_msg_mutex); + mutex_lock(&dp->attention_lock); pr_debug("cable_connected to %d\n", connected); if (dp->cable_connected != connected) dp->cable_connected = connected; else pr_debug("no change in cable status\n"); - mutex_unlock(&dp->pd_msg_mutex); + mutex_unlock(&dp->attention_lock); } static int dp_get_cable_status(struct platform_device *pdev, u32 vote) { - struct mdss_dp_drv_pdata *dp_ctrl = platform_get_drvdata(pdev); + struct mdss_dp_drv_pdata *dp = platform_get_drvdata(pdev); u32 hpd; - if (!dp_ctrl) { + if (!dp) { DEV_ERR("%s: invalid input\n", __func__); return -ENODEV; } - hpd = dp_ctrl->cable_connected; + mutex_lock(&dp->attention_lock); + hpd = dp->cable_connected; + mutex_unlock(&dp->attention_lock); return hpd; } @@ -1230,12 +1298,6 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic) return 0; } /* dp_init_panel_info */ -static inline void mdss_dp_ack_state(struct mdss_dp_drv_pdata *dp, int val) -{ - if (dp && dp->ext_audio_data.intf_ops.notify) - dp->ext_audio_data.intf_ops.notify(dp->ext_pdev, val); -} - /** * mdss_dp_get_lane_mapping() - returns lane mapping based on given orientation * @orientation: usb plug orientation @@ -1434,6 +1496,13 @@ static int mdss_dp_setup_main_link(struct mdss_dp_drv_pdata *dp, bool train) if (ret) goto end; + /* + * Skip the transfer unit setup and the routine to wait for the + * video ready interrupt as link training tests do not require + * video frames to be sent. + */ + if (mdss_dp_is_link_training_requested(dp)) + goto end; send_video: /* * Set up transfer unit values and set controller state to send @@ -1512,8 +1581,14 @@ exit_loop: pr_debug("end\n"); - /* Send a connect notification */ - if (!mdss_dp_is_phy_test_pattern_requested(dp_drv)) + /* + * Send a connect notification to clients except when processing link + * training and electrical compliance tests. There is no need to send + * a notification in these testing use cases as there is no + * expectation of receiving a video signal as part of the test. + */ + if (!mdss_dp_is_phy_test_pattern_requested(dp_drv) && + !mdss_dp_is_link_training_requested(dp_drv)) mdss_dp_notify_clients(dp_drv, NOTIFY_CONNECT_IRQ_HPD); return ret; @@ -1608,7 +1683,19 @@ int mdss_dp_on(struct mdss_panel_data *pdata) dp_drv = container_of(pdata, struct mdss_dp_drv_pdata, panel_data); - if (dp_drv->power_on) { + /* + * If the link already active, then nothing needs to be done here. + * However, it is possible that the the power_on flag could be + * set to true but we would still need to initialize the DP host. + * An example of this use-case is when a multiport dongle is connected + * and subsequently the downstream sink is disconnected. This would + * only go through the IRQ HPD path where we tear down the link but + * the power_on flag remains set to true. When the downstream sink + * is subsequently connected again, we need to re-initialize DP + * host + */ + if (dp_drv->power_on && + (dp_drv->new_vic && (dp_drv->new_vic == dp_drv->vic))) { pr_debug("Link already setup, return\n"); return 0; } @@ -1626,6 +1713,23 @@ int mdss_dp_on(struct mdss_panel_data *pdata) return mdss_dp_on_hpd(dp_drv); } +static bool mdss_dp_is_ds_bridge(struct mdss_dp_drv_pdata *dp) +{ + return dp->dpcd.downstream_port.dfp_present; +} + +static bool mdss_dp_is_ds_bridge_sink_count_zero(struct mdss_dp_drv_pdata *dp) +{ + return (mdss_dp_is_ds_bridge(dp) && + (dp->sink_count.count == 0)); +} + +static bool mdss_dp_is_ds_bridge_no_local_edid(struct mdss_dp_drv_pdata *dp) +{ + return (mdss_dp_is_ds_bridge_sink_count_zero(dp) && + !(dp->dpcd.flags & DPCD_PORT_0_EDID_PRESENTED)); +} + static int mdss_dp_off_irq(struct mdss_dp_drv_pdata *dp_drv) { if (!dp_drv->power_on) { @@ -1643,10 +1747,16 @@ static int mdss_dp_off_irq(struct mdss_dp_drv_pdata *dp_drv) /* Make sure DP mainlink and audio engines are disabled */ wmb(); - mdss_dp_ack_state(dp_drv, false); + /* + * If downstream device is a brige which no longer has any + * downstream devices connected to it, then we should reset + * the current panel info + */ + if (mdss_dp_is_ds_bridge_sink_count_zero(dp_drv)) + dp_init_panel_info(dp_drv, HDMI_VFRMT_UNKNOWN); + mutex_unlock(&dp_drv->train_mutex); - complete_all(&dp_drv->irq_comp); pr_debug("end\n"); return 0; @@ -1673,11 +1783,11 @@ static int mdss_dp_off_hpd(struct mdss_dp_drv_pdata *dp_drv) mdss_dp_host_deinit(dp_drv); dp_drv->power_on = false; - dp_drv->sink_info_read = false; dp_init_panel_info(dp_drv, HDMI_VFRMT_UNKNOWN); - mdss_dp_ack_state(dp_drv, false); mdss_dp_reset_test_data(dp_drv); + mdss_dp_reset_sink_count(dp_drv); + dp_drv->prev_sink_count = dp_drv->sink_count; mutex_unlock(&dp_drv->train_mutex); pr_debug("DP off done\n"); @@ -1716,8 +1826,9 @@ static int mdss_dp_send_audio_notification( if (mdss_dp_sink_audio_supp(dp) || dp->audio_test_req) { dp->audio_test_req = false; - pr_debug("sending audio notification\n"); flags |= MSM_EXT_DISP_HPD_AUDIO; + pr_debug("sending audio notification = %d, flags = %d\n", val, + flags); if (dp->ext_audio_data.intf_ops.hpd) ret = dp->ext_audio_data.intf_ops.hpd(dp->ext_pdev, @@ -1742,7 +1853,8 @@ static int mdss_dp_send_video_notification( goto end; } - flags |= MSM_EXT_DISP_HPD_VIDEO; + flags |= MSM_EXT_DISP_HPD_ASYNC_VIDEO; + pr_debug("sending video notification = %d, flags = %d\n", val, flags); if (dp->ext_audio_data.intf_ops.hpd) ret = dp->ext_audio_data.intf_ops.hpd(dp->ext_pdev, @@ -1799,6 +1911,8 @@ static int mdss_dp_edid_init(struct mdss_panel_data *pdata) dp_drv->edid_buf = edid_init_data.buf; dp_drv->edid_buf_size = edid_init_data.buf_size; + mdss_dp_set_default_resolution(dp_drv); + return 0; } @@ -1850,14 +1964,16 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata) mdss_dp_ctrl_reset(&dp_drv->ctrl_io); mdss_dp_phy_reset(&dp_drv->ctrl_io); mdss_dp_aux_reset(&dp_drv->ctrl_io); + mdss_dp_aux_set_limits(&dp_drv->ctrl_io); + mdss_dp_aux_ctrl(&dp_drv->ctrl_io, true); pr_debug("Ctrl_hw_rev =0x%x, phy hw_rev =0x%x\n", mdss_dp_get_ctrl_hw_version(&dp_drv->ctrl_io), mdss_dp_get_phy_hw_version(&dp_drv->phy_io)); - mdss_dp_phy_aux_setup(&dp_drv->phy_io, dp_drv->aux_cfg, - dp_drv->phy_reg_offset); + mdss_dp_reset_phy_config_indices(dp_drv); + mdss_dp_phy_aux_setup(dp_drv); mdss_dp_irq_enable(dp_drv); dp_drv->dp_initialized = true; @@ -1925,10 +2041,11 @@ static int mdss_dp_host_deinit(struct mdss_dp_drv_pdata *dp) static int mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp, enum notification_status status) { - const int irq_comp_timeout = HZ * 2; int ret = 0; + bool notify = false; + bool connect; - mutex_lock(&dp->pd_msg_mutex); + pr_debug("beginning notification\n"); if (status == dp->hpd_notification_status) { pr_debug("No change in status %s --> %s\n", mdss_dp_notification_status_to_string(status), @@ -1941,39 +2058,40 @@ static int mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp, case NOTIFY_CONNECT_IRQ_HPD: if (dp->hpd_notification_status != NOTIFY_DISCONNECT_IRQ_HPD) goto invalid_request; - /* Follow the same programming as for NOTIFY_CONNECT */ - mdss_dp_host_init(&dp->panel_data); - mdss_dp_send_video_notification(dp, true); + notify = true; + connect = true; break; case NOTIFY_CONNECT: - if ((dp->hpd_notification_status == NOTIFY_CONNECT_IRQ_HPD) || - (dp->hpd_notification_status == - NOTIFY_DISCONNECT_IRQ_HPD)) + if (dp->hpd_notification_status == NOTIFY_CONNECT_IRQ_HPD) goto invalid_request; - mdss_dp_host_init(&dp->panel_data); - mdss_dp_send_video_notification(dp, true); + notify = true; + connect = true; break; case NOTIFY_DISCONNECT: - mdss_dp_send_audio_notification(dp, false); - mdss_dp_send_video_notification(dp, false); + /* + * Userspace triggers a disconnect event on boot up, this must + * not be processed as there was no previously connected sink + * device. + */ + if (dp->hpd_notification_status == NOTIFY_UNKNOWN) + goto invalid_request; + if (dp->hpd_notification_status == NOTIFY_DISCONNECT_IRQ_HPD) { + /* + * user modules already turned off. Need to explicitly + * turn off DP core here. + */ + mdss_dp_off_hpd(dp); + } else { + notify = true; + connect = false; + } break; case NOTIFY_DISCONNECT_IRQ_HPD: if (dp->hpd_notification_status == NOTIFY_DISCONNECT) goto invalid_request; - mdss_dp_send_audio_notification(dp, false); - mdss_dp_send_video_notification(dp, false); - if (!IS_ERR_VALUE(ret) && ret) { - reinit_completion(&dp->irq_comp); - ret = wait_for_completion_timeout(&dp->irq_comp, - irq_comp_timeout); - if (ret <= 0) { - pr_warn("irq_comp timed out\n"); - ret = -EINVAL; - } else { - ret = 0; - } - } + notify = true; + connect = false; break; default: pr_err("Invalid notification status = %d\n", status); @@ -1981,7 +2099,7 @@ static int mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp, break; } - goto end; + goto notify; invalid_request: pr_err("Invalid request %s --> %s\n", @@ -1990,16 +2108,34 @@ invalid_request: mdss_dp_notification_status_to_string(status)); ret = -EINVAL; -end: +notify: + if (ret || !notify) { + pr_debug("not sending notification\n"); + goto end; + } + + atomic_set(&dp->notification_pending, 1); + if (connect) { + mdss_dp_host_init(&dp->panel_data); + ret = mdss_dp_send_video_notification(dp, true); + } else { + mdss_dp_send_audio_notification(dp, false); + ret = mdss_dp_send_video_notification(dp, false); + } + if (!ret) { pr_debug("Successfully sent notification %s --> %s\n", mdss_dp_notification_status_to_string( dp->hpd_notification_status), mdss_dp_notification_status_to_string(status)); - dp->hpd_notification_status = status; + } else { + pr_err("%s Notification failed\n", + mdss_dp_notification_status_to_string(status)); + atomic_set(&dp->notification_pending, 0); } - mutex_unlock(&dp->pd_msg_mutex); +end: + dp->hpd_notification_status = status; return ret; } @@ -2008,9 +2144,6 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp) int ret; u32 max_pclk_khz; - if (dp->sink_info_read) - return 0; - pr_debug("start\n"); ret = mdss_dp_dpcd_cap_read(dp); @@ -2023,8 +2156,25 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp) */ pr_err("dpcd read failed, set failsafe parameters\n"); mdss_dp_set_default_link_parameters(dp); + goto read_edid; } + /* + * When connected to a multiport adaptor which does not have a + * local EDID present, do not attempt to read the EDID. + * When connected to a multiport adaptor with no downstream device + * connected to it, do not attempt to read the EDID. It is possible + * that the adaptor may advertise the presence of local EDID, but it + * is not guaranteed to work. + */ + if (mdss_dp_is_ds_bridge_sink_count_zero(dp)) { + if (mdss_dp_is_ds_bridge_no_local_edid(dp)) + pr_debug("No local EDID present on DS branch device\n"); + pr_info("no downstream devices, skip client notification\n"); + goto end; + } + +read_edid: ret = mdss_dp_edid_read(dp); if (ret) { pr_err("edid read error, setting default resolution\n"); @@ -2035,14 +2185,18 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp) hdmi_edid_set_max_pclk_rate(dp->panel_data.panel_info.edid_data, min(dp->max_pclk_khz, max_pclk_khz)); + if (dp->dpcd_read_required) { + pr_debug("reading DPCD with updated AUX config\n"); + mdss_dp_dpcd_cap_read(dp); + dp->dpcd_read_required = false; + } + ret = hdmi_edid_parser(dp->panel_data.panel_info.edid_data); if (ret) { pr_err("edid parse failed, setting default resolution\n"); goto notify; } - dp->sink_info_read = true; - notify: if (ret) { /* set failsafe parameters */ @@ -2069,7 +2223,6 @@ notify: end: pr_debug("end\n"); return ret; - } static int mdss_dp_check_params(struct mdss_dp_drv_pdata *dp, void *arg) @@ -2270,12 +2423,16 @@ static ssize_t mdss_dp_rda_connected(struct device *dev, { ssize_t ret; struct mdss_dp_drv_pdata *dp = mdss_dp_get_drvdata(dev); + bool cable_connected; if (!dp) return -EINVAL; - ret = snprintf(buf, PAGE_SIZE, "%d\n", dp->cable_connected); - pr_debug("%d\n", dp->cable_connected); + mutex_lock(&dp->attention_lock); + cable_connected = dp->cable_connected; + mutex_unlock(&dp->attention_lock); + ret = snprintf(buf, PAGE_SIZE, "%d\n", cable_connected); + pr_debug("%d\n", cable_connected); return ret; } @@ -2444,6 +2601,7 @@ static ssize_t mdss_dp_wta_hpd(struct device *dev, int hpd, rc; ssize_t ret = strnlen(buf, PAGE_SIZE); struct mdss_dp_drv_pdata *dp = mdss_dp_get_drvdata(dev); + bool cable_connected; if (!dp) { pr_err("invalid data\n"); @@ -2459,9 +2617,13 @@ static ssize_t mdss_dp_wta_hpd(struct device *dev, } dp->hpd = !!hpd; - pr_debug("hpd=%d\n", dp->hpd); + mutex_lock(&dp->attention_lock); + cable_connected = dp->cable_connected; + mutex_unlock(&dp->attention_lock); + pr_debug("hpd=%d cable_connected=%s\n", dp->hpd, + cable_connected ? "true" : "false"); - if (dp->hpd && dp->cable_connected) { + if (dp->hpd && cable_connected) { if (dp->alt_mode.current_state & DP_CONFIGURE_DONE) { mdss_dp_host_init(&dp->panel_data); mdss_dp_process_hpd_high(dp); @@ -2791,8 +2953,6 @@ static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata) /* wait until link training is completed */ mutex_lock(&dp_drv->train_mutex); - mdss_dp_aux_set_sink_power_state(dp_drv, SINK_POWER_OFF); - reinit_completion(&dp_drv->idle_comp); mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE); if (!wait_for_completion_timeout(&dp_drv->idle_comp, @@ -2882,28 +3042,33 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata, switch (event) { case MDSS_EVENT_UNBLANK: - mdss_dp_ack_state(dp, true); rc = mdss_dp_on(pdata); break; case MDSS_EVENT_PANEL_ON: mdss_dp_update_hdcp_info(dp); if (dp_is_hdcp_enabled(dp)) { - cancel_delayed_work(&dp->hdcp_cb_work); + cancel_delayed_work_sync(&dp->hdcp_cb_work); dp->hdcp_status = HDCP_STATE_AUTHENTICATING; queue_delayed_work(dp->workq, &dp->hdcp_cb_work, HZ / 2); } break; + case MDSS_EVENT_POST_PANEL_ON: + atomic_set(&dp->notification_pending, 0); + complete_all(&dp->notification_comp); + break; case MDSS_EVENT_PANEL_OFF: rc = mdss_dp_off(pdata); + atomic_set(&dp->notification_pending, 0); + complete_all(&dp->notification_comp); break; case MDSS_EVENT_BLANK: if (dp_is_hdcp_enabled(dp)) { dp->hdcp_status = HDCP_STATE_INACTIVE; - cancel_delayed_work(&dp->hdcp_cb_work); + cancel_delayed_work_sync(&dp->hdcp_cb_work); if (dp->hdcp.ops->off) dp->hdcp.ops->off(dp->hdcp.data); } @@ -3060,6 +3225,7 @@ static int mdss_retrieve_dp_ctrl_resources(struct platform_device *pdev, static void mdss_dp_video_ready(struct mdss_dp_drv_pdata *dp) { pr_debug("dp_video_ready\n"); + mdss_dp_ack_state(dp, true); complete(&dp->video_comp); } @@ -3091,10 +3257,21 @@ static int mdss_dp_event_thread(void *data) ev_data = (struct mdss_dp_event_data *)data; + pr_debug("starting\n"); while (!kthread_should_stop()) { wait_event(ev_data->event_q, (ev_data->pndx != ev_data->gndx) || - kthread_should_stop()); + kthread_should_stop() || + kthread_should_park()); + if (kthread_should_stop()) + return 0; + + if (kthread_should_park()) { + pr_debug("parking event thread\n"); + kthread_parkme(); + continue; + } + spin_lock_irqsave(&ev_data->event_lock, flag); ev = &(ev_data->event_list[ev_data->gndx++]); todo = ev->id; @@ -3186,6 +3363,7 @@ irqreturn_t dp_isr(int irq, void *ptr) spin_lock(&dp->lock); isr1 = dp_read(base + DP_INTR_STATUS); isr2 = dp_read(base + DP_INTR_STATUS2); + pr_debug("isr1=0x%08x, isr2=0x%08x\n", isr1, isr2); mask1 = isr1 & dp->mask1; @@ -3269,6 +3447,27 @@ static int mdss_dp_event_setup(struct mdss_dp_drv_pdata *dp) return 0; } +static void mdss_dp_reset_event_list(struct mdss_dp_drv_pdata *dp) +{ + struct mdss_dp_event_data *ev_data = &dp->dp_event; + + spin_lock(&ev_data->event_lock); + ev_data->pndx = ev_data->gndx = 0; + spin_unlock(&ev_data->event_lock); + + mutex_lock(&dp->attention_lock); + INIT_LIST_HEAD(&dp->attention_head); + mutex_unlock(&dp->attention_lock); +} + +static void mdss_dp_reset_sw_state(struct mdss_dp_drv_pdata *dp) +{ + pr_debug("enter\n"); + mdss_dp_reset_event_list(dp); + atomic_set(&dp->notification_pending, 0); + complete_all(&dp->notification_comp); +} + static void usbpd_connect_callback(struct usbpd_svid_handler *hdlr) { struct mdss_dp_drv_pdata *dp_drv; @@ -3280,6 +3479,8 @@ static void usbpd_connect_callback(struct usbpd_svid_handler *hdlr) } mdss_dp_update_cable_status(dp_drv, true); + if (dp_drv->ev_thread) + kthread_unpark(dp_drv->ev_thread); if (dp_drv->hpd) dp_send_events(dp_drv, EV_USBPD_DISCOVER_MODES); @@ -3299,6 +3500,9 @@ static void usbpd_disconnect_callback(struct usbpd_svid_handler *hdlr) mdss_dp_update_cable_status(dp_drv, false); dp_drv->alt_mode.current_state = UNKNOWN_STATE; + mdss_dp_reset_sw_state(dp_drv); + kthread_park(dp_drv->ev_thread); + /** * Manually turn off the DP controller if we are in PHY * testing mode. @@ -3402,6 +3606,17 @@ static inline void mdss_dp_link_maintenance(struct mdss_dp_drv_pdata *dp, if (mdss_dp_notify_clients(dp, NOTIFY_DISCONNECT_IRQ_HPD)) return; + if (atomic_read(&dp->notification_pending)) { + int ret; + + pr_debug("waiting for the disconnect to finish\n"); + ret = wait_for_completion_timeout(&dp->notification_comp, HZ); + if (ret <= 0) { + pr_warn("NOTIFY_DISCONNECT_IRQ_HPD timed out\n"); + return; + } + } + mdss_dp_on_irq(dp, lt_needed); } @@ -3553,7 +3768,7 @@ static int mdss_dp_process_audio_pattern_request(struct mdss_dp_drv_pdata *dp) return -EINVAL; if (dp_is_hdcp_enabled(dp) && dp->hdcp.ops->off) { - cancel_delayed_work(&dp->hdcp_cb_work); + cancel_delayed_work_sync(&dp->hdcp_cb_work); dp->hdcp.ops->off(dp->hdcp.data); } @@ -3599,10 +3814,46 @@ static int mdss_dp_process_audio_pattern_request(struct mdss_dp_drv_pdata *dp) static int mdss_dp_process_downstream_port_status_change( struct mdss_dp_drv_pdata *dp) { - if (!mdss_dp_is_downstream_port_status_changed(dp)) + bool ds_status_changed = false; + + if (mdss_dp_is_downstream_port_status_changed(dp)) { + pr_debug("downstream port status changed\n"); + ds_status_changed = true; + } + + /* + * Ideally sink should update the downstream port status changed + * whenever it updates the downstream sink count. However, it is + * possible that only the sink count is updated without setting + * the downstream port status changed bit. + */ + if (dp->sink_count.count != dp->prev_sink_count.count) { + pr_debug("downstream sink count changed from %d --> %d\n", + dp->prev_sink_count.count, dp->sink_count.count); + ds_status_changed = true; + } + + if (!ds_status_changed) return -EINVAL; - return mdss_dp_edid_read(dp); + mdss_dp_notify_clients(dp, NOTIFY_DISCONNECT_IRQ_HPD); + if (atomic_read(&dp->notification_pending)) { + int ret; + + pr_debug("waiting for the disconnect to finish\n"); + ret = wait_for_completion_timeout(&dp->notification_comp, HZ); + if (ret <= 0) { + pr_warn("NOTIFY_DISCONNECT_IRQ_HPD timed out\n"); + return -ETIMEDOUT; + } + } + + if (mdss_dp_is_ds_bridge_sink_count_zero(dp)) { + pr_debug("sink count is zero, nothing to do\n"); + return 0; + } + + return mdss_dp_process_hpd_high(dp); } static bool mdss_dp_video_pattern_test_lt_needed(struct mdss_dp_drv_pdata *dp) @@ -3700,19 +3951,19 @@ static int mdss_dp_process_hpd_irq_high(struct mdss_dp_drv_pdata *dp) mdss_dp_aux_parse_sink_status_field(dp); - ret = mdss_dp_process_link_training_request(dp); + ret = mdss_dp_process_downstream_port_status_change(dp); if (!ret) goto exit; - ret = mdss_dp_process_phy_test_pattern_request(dp); + ret = mdss_dp_process_link_training_request(dp); if (!ret) goto exit; - ret = mdss_dp_process_link_status_update(dp); + ret = mdss_dp_process_phy_test_pattern_request(dp); if (!ret) goto exit; - ret = mdss_dp_process_downstream_port_status_change(dp); + ret = mdss_dp_process_link_status_update(dp); if (!ret) goto exit; @@ -3725,7 +3976,6 @@ static int mdss_dp_process_hpd_irq_high(struct mdss_dp_drv_pdata *dp) goto exit; pr_debug("done\n"); - exit: dp->hpd_irq_on = false; return ret; @@ -3771,7 +4021,8 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd, node->vdo = *vdos; mutex_lock(&dp_drv->attention_lock); - list_add_tail(&node->list, &dp_drv->attention_head); + if (dp_drv->cable_connected) + list_add_tail(&node->list, &dp_drv->attention_head); mutex_unlock(&dp_drv->attention_lock); dp_send_events(dp_drv, EV_USBPD_ATTENTION); @@ -3819,11 +4070,21 @@ static void mdss_dp_process_attention(struct mdss_dp_drv_pdata *dp_drv) if (!dp_drv->alt_mode.dp_status.hpd_high) { pr_debug("Attention: HPD low\n"); + if (!dp_drv->power_on) { + pr_debug("HPD already low\n"); + return; + } + if (dp_is_hdcp_enabled(dp_drv) && dp_drv->hdcp.ops->off) { - cancel_delayed_work(&dp_drv->hdcp_cb_work); + cancel_delayed_work_sync(&dp_drv->hdcp_cb_work); dp_drv->hdcp.ops->off(dp_drv->hdcp.data); } + /* + * Reset the sink count before nofifying clients since HPD Low + * indicates that the downstream device has been disconnected. + */ + mdss_dp_reset_sink_count(dp_drv); mdss_dp_notify_clients(dp_drv, NOTIFY_DISCONNECT); pr_debug("Attention: Notified clients\n"); @@ -3851,6 +4112,11 @@ static void mdss_dp_process_attention(struct mdss_dp_drv_pdata *dp_drv) pr_debug("Attention: HPD high\n"); + if (dp_drv->power_on) { + pr_debug("HPD high processed already\n"); + return; + } + dp_drv->alt_mode.current_state |= DP_STATUS_DONE; if (dp_drv->alt_mode.current_state & DP_CONFIGURE_DONE) { @@ -3872,7 +4138,13 @@ static void mdss_dp_handle_attention(struct mdss_dp_drv_pdata *dp) pr_debug("processing item %d in the list\n", ++i); + reinit_completion(&dp->notification_comp); mutex_lock(&dp->attention_lock); + if (!dp->cable_connected) { + pr_debug("cable disconnected, returning\n"); + mutex_unlock(&dp->attention_lock); + goto exit; + } node = list_first_entry(&dp->attention_head, struct mdss_dp_attention_node, list); @@ -3886,9 +4158,25 @@ static void mdss_dp_handle_attention(struct mdss_dp_drv_pdata *dp) mdss_dp_usbpd_ext_dp_status(&dp->alt_mode.dp_status); mdss_dp_process_attention(dp); + if (atomic_read(&dp->notification_pending)) { + pr_debug("waiting for the attention event to finish\n"); + /* + * This wait is intentionally implemented without a + * timeout since this is happens only in possible error + * conditions e.g. if the display framework does not + * power off/on the DisplayPort device in time. Other + * events might already be queued from the sink at this + * point and they cannot be processed until the power + * off/on is complete otherwise we might have problems + * with interleaving of these events e.g. un-clocked + * register access. + */ + wait_for_completion(&dp->notification_comp); + } pr_debug("done processing item %d in the list\n", i); }; +exit: pr_debug("exit\n"); } @@ -3964,7 +4252,6 @@ static int mdss_dp_probe(struct platform_device *pdev) dp_drv->mask1 = EDP_INTR_MASK1; dp_drv->mask2 = EDP_INTR_MASK2; mutex_init(&dp_drv->emutex); - mutex_init(&dp_drv->pd_msg_mutex); mutex_init(&dp_drv->attention_lock); mutex_init(&dp_drv->hdcp_mutex); spin_lock_init(&dp_drv->lock); @@ -4057,8 +4344,9 @@ static int mdss_dp_probe(struct platform_device *pdev) dp_drv->inited = true; dp_drv->hpd_irq_on = false; + atomic_set(&dp_drv->notification_pending, 0); mdss_dp_reset_test_data(dp_drv); - init_completion(&dp_drv->irq_comp); + init_completion(&dp_drv->notification_comp); dp_drv->suspend_vic = HDMI_VFRMT_UNKNOWN; pr_debug("done\n"); diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h index 4decb26ea073..f358aad8a667 100644 --- a/drivers/video/fbdev/msm/mdss_dp.h +++ b/drivers/video/fbdev/msm/mdss_dp.h @@ -77,7 +77,7 @@ #define EDP_INTR_I2C_NACK BIT(18) #define EDP_INTR_I2C_DEFER BIT(21) #define EDP_INTR_PLL_UNLOCKED BIT(24) -#define EDP_INTR_AUX_ERROR BIT(27) +#define EDP_INTR_PHY_AUX_ERR BIT(27) #define EDP_INTR_STATUS1 \ @@ -85,7 +85,7 @@ EDP_INTR_WRONG_ADDR | EDP_INTR_TIMEOUT | \ EDP_INTR_NACK_DEFER | EDP_INTR_WRONG_DATA_CNT | \ EDP_INTR_I2C_NACK | EDP_INTR_I2C_DEFER | \ - EDP_INTR_PLL_UNLOCKED | EDP_INTR_AUX_ERROR) + EDP_INTR_PLL_UNLOCKED | EDP_INTR_PHY_AUX_ERR) #define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2) @@ -110,6 +110,8 @@ struct edp_buf { int len; /* dara length */ char trans_num; /* transaction number */ char i2c; /* 1 == i2c cmd, 0 == native cmd */ + bool no_send_addr; + bool no_send_stop; }; /* USBPD-TypeC specific Macros */ @@ -186,6 +188,7 @@ struct dp_alt_mode { #define DPCD_MAX_DOWNSPREAD_0_5 BIT(2) #define DPCD_NO_AUX_HANDSHAKE BIT(3) #define DPCD_PORT_0_EDID_PRESENTED BIT(4) +#define DPCD_PORT_1_EDID_PRESENTED BIT(5) /* event */ #define EV_EDP_AUX_SETUP BIT(0) @@ -239,6 +242,8 @@ struct downstream_port_config { bool oui_support; }; +#define DP_MAX_DS_PORT_COUNT 2 + struct dpcd_cap { char major; char minor; @@ -249,7 +254,7 @@ struct dpcd_cap { char enhanced_frame; u32 max_link_rate; /* 162, 270 and 540 Mb, divided by 10 */ u32 flags; - u32 rx_port0_buf_size; + u32 rx_port_buf_size[DP_MAX_DS_PORT_COUNT]; u32 training_read_interval;/* us */ struct downstream_port_config downstream_port; }; @@ -426,6 +431,102 @@ struct mdss_dp_crc_data { u32 b_cb; }; +#define MDSS_DP_MAX_PHY_CFG_VALUE_CNT 3 +struct mdss_dp_phy_cfg { + u32 cfg_cnt; + u32 current_index; + u32 offset; + u32 lut[MDSS_DP_MAX_PHY_CFG_VALUE_CNT]; +}; + +/* PHY AUX config registers */ +enum dp_phy_aux_config_type { + PHY_AUX_CFG0, + PHY_AUX_CFG1, + PHY_AUX_CFG2, + PHY_AUX_CFG3, + PHY_AUX_CFG4, + PHY_AUX_CFG5, + PHY_AUX_CFG6, + PHY_AUX_CFG7, + PHY_AUX_CFG8, + PHY_AUX_CFG9, + PHY_AUX_CFG_MAX, +}; + +static inline const char *mdss_dp_get_phy_aux_config_property(u32 cfg_type) +{ + switch (cfg_type) { + case PHY_AUX_CFG0: + return "qcom,aux-cfg0-settings"; + case PHY_AUX_CFG1: + return "qcom,aux-cfg1-settings"; + case PHY_AUX_CFG2: + return "qcom,aux-cfg2-settings"; + case PHY_AUX_CFG3: + return "qcom,aux-cfg3-settings"; + case PHY_AUX_CFG4: + return "qcom,aux-cfg4-settings"; + case PHY_AUX_CFG5: + return "qcom,aux-cfg5-settings"; + case PHY_AUX_CFG6: + return "qcom,aux-cfg6-settings"; + case PHY_AUX_CFG7: + return "qcom,aux-cfg7-settings"; + case PHY_AUX_CFG8: + return "qcom,aux-cfg8-settings"; + case PHY_AUX_CFG9: + return "qcom,aux-cfg9-settings"; + default: + return "unknown"; + } +} + +static inline char *mdss_dp_phy_aux_config_type_to_string(u32 cfg_type) +{ + switch (cfg_type) { + case PHY_AUX_CFG0: + return DP_ENUM_STR(PHY_AUX_CFG0); + case PHY_AUX_CFG1: + return DP_ENUM_STR(PHY_AUX_CFG1); + case PHY_AUX_CFG2: + return DP_ENUM_STR(PHY_AUX_CFG2); + case PHY_AUX_CFG3: + return DP_ENUM_STR(PHY_AUX_CFG3); + case PHY_AUX_CFG4: + return DP_ENUM_STR(PHY_AUX_CFG4); + case PHY_AUX_CFG5: + return DP_ENUM_STR(PHY_AUX_CFG5); + case PHY_AUX_CFG6: + return DP_ENUM_STR(PHY_AUX_CFG6); + case PHY_AUX_CFG7: + return DP_ENUM_STR(PHY_AUX_CFG7); + case PHY_AUX_CFG8: + return DP_ENUM_STR(PHY_AUX_CFG8); + case PHY_AUX_CFG9: + return DP_ENUM_STR(PHY_AUX_CFG9); + default: + return "unknown"; + } +} + +enum dp_aux_transaction { + DP_AUX_WRITE, + DP_AUX_READ +}; + +static inline char *mdss_dp_aux_transaction_to_string(u32 transaction) +{ + switch (transaction) { + case DP_AUX_WRITE: + return DP_ENUM_STR(DP_AUX_WRITE); + case DP_AUX_READ: + return DP_ENUM_STR(DP_AUX_READ); + default: + return "unknown"; + } +} + struct mdss_dp_drv_pdata { /* device driver */ int (*on) (struct mdss_panel_data *pdata); @@ -449,11 +550,11 @@ struct mdss_dp_drv_pdata { bool core_clks_on; bool link_clks_on; bool power_on; - bool sink_info_read; u32 suspend_vic; bool hpd; bool psm_enabled; bool audio_test_req; + bool dpcd_read_required; /* dp specific */ unsigned char *base; @@ -513,10 +614,9 @@ struct mdss_dp_drv_pdata { struct completion aux_comp; struct completion idle_comp; struct completion video_comp; - struct completion irq_comp; + struct completion notification_comp; struct mutex aux_mutex; struct mutex train_mutex; - struct mutex pd_msg_mutex; struct mutex attention_lock; struct mutex hdcp_mutex; bool cable_connected; @@ -540,13 +640,14 @@ struct mdss_dp_drv_pdata { struct dp_statistic dp_stat; bool hpd_irq_on; u32 hpd_notification_status; + atomic_t notification_pending; struct mdss_dp_event_data dp_event; struct task_struct *ev_thread; /* dt settings */ char l_map[4]; - u32 aux_cfg[AUX_CFG_LEN]; + struct mdss_dp_phy_cfg aux_cfg[PHY_AUX_CFG_MAX]; struct workqueue_struct *workq; struct delayed_work hdcp_cb_work; @@ -562,6 +663,7 @@ struct mdss_dp_drv_pdata { struct dpcd_test_request test_data; struct dpcd_sink_count sink_count; + struct dpcd_sink_count prev_sink_count; struct list_head attention_head; }; @@ -688,6 +790,7 @@ enum dp_aux_error { EDP_AUX_ERR_NACK = -3, EDP_AUX_ERR_DEFER = -4, EDP_AUX_ERR_NACK_DEFER = -5, + EDP_AUX_ERR_PHY = -6, }; static inline char *mdss_dp_get_aux_error(u32 aux_error) diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c index 8566b1d6985a..37209c161366 100644 --- a/drivers/video/fbdev/msm/mdss_dp_aux.c +++ b/drivers/video/fbdev/msm/mdss_dp_aux.c @@ -73,6 +73,21 @@ static int dp_buf_trailing(struct edp_buf *eb) return (int)(eb->end - eb->data); } +static void mdss_dp_aux_clear_hw_interrupts(void __iomem *phy_base) +{ + u32 data; + + data = dp_read(phy_base + DP_PHY_AUX_INTERRUPT_STATUS); + pr_debug("PHY_AUX_INTERRUPT_STATUS=0x%08x\n", data); + + dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f); + dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f); + dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0); + + /* Ensure that all interrupts are cleared and acked */ + wmb(); +} + /* * edp aux dp_buf_add_cmd: * NO native and i2c command mix allowed @@ -123,35 +138,46 @@ static int dp_buf_add_cmd(struct edp_buf *eb, struct edp_cmd *cmd) return cmd->len - 1; } -static int dp_cmd_fifo_tx(struct edp_buf *tp, unsigned char *base) +static int dp_cmd_fifo_tx(struct mdss_dp_drv_pdata *dp) { u32 data; - char *dp; + char *datap; int len, cnt; + struct edp_buf *tp = &dp->txp; + void __iomem *base = dp->base; + len = tp->len; /* total byte to cmd fifo */ if (len == 0) return 0; cnt = 0; - dp = tp->start; + datap = tp->start; while (cnt < len) { - data = *dp; /* data byte */ + data = *datap; /* data byte */ data <<= 8; data &= 0x00ff00; /* index = 0, write */ if (cnt == 0) data |= BIT(31); /* INDEX_WRITE */ dp_write(base + DP_AUX_DATA, data); cnt++; - dp++; + datap++; } + /* clear the current tx request before queuing a new one */ + dp_write(base + DP_AUX_TRANS_CTRL, 0); + + /* clear any previous PHY AUX interrupts */ + mdss_dp_aux_clear_hw_interrupts(dp->phy_io.base); + data = (tp->trans_num - 1); if (tp->i2c) { data |= BIT(8); /* I2C */ - data |= BIT(10); /* NO SEND ADDR */ - data |= BIT(11); /* NO SEND STOP */ + if (tp->no_send_addr) + data |= BIT(10); /* NO SEND ADDR */ + if (tp->no_send_stop) + data |= BIT(11); /* NO SEND STOP */ } data |= BIT(9); /* GO */ @@ -164,7 +190,7 @@ static int dp_cmd_fifo_rx(struct edp_buf *rp, int len, unsigned char *base) { u32 data; char *dp; - int i; + int i, actual_i; data = 0; /* index = 0 */ data |= BIT(31); /* INDEX_WRITE */ @@ -177,7 +203,12 @@ static int dp_cmd_fifo_rx(struct edp_buf *rp, int len, unsigned char *base) data = dp_read(base + DP_AUX_DATA); for (i = 0; i < len; i++) { data = dp_read(base + DP_AUX_DATA); - *dp++ = (char)((data >> 8) & 0xff); + *dp++ = (char)((data >> 8) & 0xFF); + + actual_i = (data >> 16) & 0xFF; + if (i != actual_i) + pr_warn("Index mismatch: expected %d, found %d\n", + i, actual_i); } rp->len = len; @@ -214,9 +245,16 @@ static int dp_aux_write_cmds(struct mdss_dp_drv_pdata *ep, reinit_completion(&ep->aux_comp); - len = dp_cmd_fifo_tx(&ep->txp, ep->base); + tp->no_send_addr = true; + tp->no_send_stop = true; + len = dp_cmd_fifo_tx(ep); - wait_for_completion_timeout(&ep->aux_comp, HZ/4); + if (!wait_for_completion_timeout(&ep->aux_comp, HZ/4)) { + pr_err("aux write timeout\n"); + ep->aux_error_num = EDP_AUX_ERR_TOUT; + /* Reset the AUX controller state machine */ + mdss_dp_aux_reset(&ep->ctrl_io); + } if (ep->aux_error_num == EDP_AUX_ERR_NONE) ret = len; @@ -228,13 +266,6 @@ static int dp_aux_write_cmds(struct mdss_dp_drv_pdata *ep, return ret; } -int dp_aux_write(void *ep, struct edp_cmd *cmd) -{ - int rc = dp_aux_write_cmds(ep, cmd); - - return rc < 0 ? -EINVAL : 0; -} - static int dp_aux_read_cmds(struct mdss_dp_drv_pdata *ep, struct edp_cmd *cmds) { @@ -242,6 +273,7 @@ static int dp_aux_read_cmds(struct mdss_dp_drv_pdata *ep, struct edp_buf *tp; struct edp_buf *rp; int len, ret; + u32 data; mutex_lock(&ep->aux_mutex); ep->aux_cmd_busy = 1; @@ -270,10 +302,23 @@ static int dp_aux_read_cmds(struct mdss_dp_drv_pdata *ep, reinit_completion(&ep->aux_comp); - dp_cmd_fifo_tx(tp, ep->base); + tp->no_send_addr = true; + tp->no_send_stop = false; + dp_cmd_fifo_tx(ep); - wait_for_completion_timeout(&ep->aux_comp, HZ/4); + if (!wait_for_completion_timeout(&ep->aux_comp, HZ/4)) { + pr_err("aux read timeout\n"); + ep->aux_error_num = EDP_AUX_ERR_TOUT; + /* Reset the AUX controller state machine */ + mdss_dp_aux_reset(&ep->ctrl_io); + ret = ep->aux_error_num; + goto end; + } + /* clear the current rx request before queuing a new one */ + data = dp_read(ep->base + DP_AUX_TRANS_CTRL); + data &= (~BIT(9)); + dp_write(ep->base + DP_AUX_TRANS_CTRL, data); if (ep->aux_error_num == EDP_AUX_ERR_NONE) { ret = dp_cmd_fifo_rx(rp, len, ep->base); @@ -284,58 +329,128 @@ static int dp_aux_read_cmds(struct mdss_dp_drv_pdata *ep, ret = ep->aux_error_num; } +end: ep->aux_cmd_busy = 0; mutex_unlock(&ep->aux_mutex); return ret; } -int dp_aux_read(void *ep, struct edp_cmd *cmds) -{ - int rc = dp_aux_read_cmds(ep, cmds); - - return rc < 0 ? -EINVAL : 0; -} - void dp_aux_native_handler(struct mdss_dp_drv_pdata *ep, u32 isr) { - if (isr & EDP_INTR_AUX_I2C_DONE) + pr_debug("isr=0x%08x\n", isr); + if (isr & EDP_INTR_AUX_I2C_DONE) { ep->aux_error_num = EDP_AUX_ERR_NONE; - else if (isr & EDP_INTR_WRONG_ADDR) + } else if (isr & EDP_INTR_WRONG_ADDR) { ep->aux_error_num = EDP_AUX_ERR_ADDR; - else if (isr & EDP_INTR_TIMEOUT) + } else if (isr & EDP_INTR_TIMEOUT) { ep->aux_error_num = EDP_AUX_ERR_TOUT; - if (isr & EDP_INTR_NACK_DEFER) + } else if (isr & EDP_INTR_NACK_DEFER) { ep->aux_error_num = EDP_AUX_ERR_NACK; + } else if (isr & EDP_INTR_PHY_AUX_ERR) { + ep->aux_error_num = EDP_AUX_ERR_PHY; + mdss_dp_aux_clear_hw_interrupts(ep->phy_io.base); + } else { + ep->aux_error_num = EDP_AUX_ERR_NONE; + } complete(&ep->aux_comp); } void dp_aux_i2c_handler(struct mdss_dp_drv_pdata *ep, u32 isr) { + pr_debug("isr=0x%08x\n", isr); if (isr & EDP_INTR_AUX_I2C_DONE) { if (isr & (EDP_INTR_I2C_NACK | EDP_INTR_I2C_DEFER)) ep->aux_error_num = EDP_AUX_ERR_NACK; else ep->aux_error_num = EDP_AUX_ERR_NONE; } else { - if (isr & EDP_INTR_WRONG_ADDR) + if (isr & EDP_INTR_WRONG_ADDR) { ep->aux_error_num = EDP_AUX_ERR_ADDR; - else if (isr & EDP_INTR_TIMEOUT) + } else if (isr & EDP_INTR_TIMEOUT) { ep->aux_error_num = EDP_AUX_ERR_TOUT; - if (isr & EDP_INTR_NACK_DEFER) + } else if (isr & EDP_INTR_NACK_DEFER) { ep->aux_error_num = EDP_AUX_ERR_NACK_DEFER; - if (isr & EDP_INTR_I2C_NACK) + } else if (isr & EDP_INTR_I2C_NACK) { ep->aux_error_num = EDP_AUX_ERR_NACK; - if (isr & EDP_INTR_I2C_DEFER) + } else if (isr & EDP_INTR_I2C_DEFER) { ep->aux_error_num = EDP_AUX_ERR_DEFER; + } else if (isr & EDP_INTR_PHY_AUX_ERR) { + ep->aux_error_num = EDP_AUX_ERR_PHY; + mdss_dp_aux_clear_hw_interrupts(ep->phy_io.base); + } else { + ep->aux_error_num = EDP_AUX_ERR_NONE; + } } complete(&ep->aux_comp); } -static int dp_aux_write_buf(struct mdss_dp_drv_pdata *ep, u32 addr, - char *buf, int len, int i2c) +static int dp_aux_rw_cmds_retry(struct mdss_dp_drv_pdata *dp, + struct edp_cmd *cmd, enum dp_aux_transaction transaction) +{ + int const retry_count = 5; + int adjust_count = 0; + int i; + u32 aux_cfg1_config_count; + int ret; + + aux_cfg1_config_count = mdss_dp_phy_aux_get_config_cnt(dp, + PHY_AUX_CFG1); +retry: + i = 0; + ret = 0; + do { + struct edp_cmd cmd1 = *cmd; + + dp->aux_error_num = EDP_AUX_ERR_NONE; + pr_debug("Trying %s, iteration count: %d\n", + mdss_dp_aux_transaction_to_string(transaction), + i + 1); + if (transaction == DP_AUX_READ) + ret = dp_aux_read_cmds(dp, &cmd1); + else if (transaction == DP_AUX_WRITE) + ret = dp_aux_write_cmds(dp, &cmd1); + + i++; + } while ((i < retry_count) && (ret < 0)); + + if (ret >= 0) /* rw success */ + goto end; + + if (adjust_count >= aux_cfg1_config_count) { + pr_err("PHY_AUX_CONFIG1 calibration failed\n"); + goto end; + } + + /* Adjust AUX configuration and retry */ + pr_debug("AUX failure (%d), adjust AUX settings\n", ret); + mdss_dp_phy_aux_update_config(dp, PHY_AUX_CFG1); + adjust_count++; + goto retry; + +end: + return ret; +} + +/** + * dp_aux_write_buf_retry() - send a AUX write command + * @dp: display port driver data + * @addr: AUX address (in hex) to write the command to + * @buf: the buffer containing the actual payload + * @len: the length of the buffer @buf + * @i2c: indicates if it is an i2c-over-aux transaction + * @retry: specifies if retries should be attempted upon failures + * + * Send an AUX write command with the specified payload over the AUX + * channel. This function can send both native AUX command or an + * i2c-over-AUX command. In addition, if specified, it can also retry + * when failures are detected. The retry logic would adjust AUX PHY + * parameters on the fly. + */ +static int dp_aux_write_buf_retry(struct mdss_dp_drv_pdata *dp, u32 addr, + char *buf, int len, int i2c, bool retry) { struct edp_cmd cmd; @@ -346,11 +461,42 @@ static int dp_aux_write_buf(struct mdss_dp_drv_pdata *ep, u32 addr, cmd.len = len & 0x0ff; cmd.next = 0; - return dp_aux_write_cmds(ep, &cmd); + if (retry) + return dp_aux_rw_cmds_retry(dp, &cmd, DP_AUX_WRITE); + else + return dp_aux_write_cmds(dp, &cmd); } -static int dp_aux_read_buf(struct mdss_dp_drv_pdata *ep, u32 addr, - int len, int i2c) +static int dp_aux_write_buf(struct mdss_dp_drv_pdata *dp, u32 addr, + char *buf, int len, int i2c) +{ + return dp_aux_write_buf_retry(dp, addr, buf, len, i2c, true); +} + +int dp_aux_write(void *dp, struct edp_cmd *cmd) +{ + int rc = dp_aux_write_cmds(dp, cmd); + + return rc < 0 ? -EINVAL : 0; +} + +/** + * dp_aux_read_buf_retry() - send a AUX read command + * @dp: display port driver data + * @addr: AUX address (in hex) to write the command to + * @buf: the buffer containing the actual payload + * @len: the length of the buffer @buf + * @i2c: indicates if it is an i2c-over-aux transaction + * @retry: specifies if retries should be attempted upon failures + * + * Send an AUX write command with the specified payload over the AUX + * channel. This function can send both native AUX command or an + * i2c-over-AUX command. In addition, if specified, it can also retry + * when failures are detected. The retry logic would adjust AUX PHY + * parameters on the fly. + */ +static int dp_aux_read_buf_retry(struct mdss_dp_drv_pdata *dp, u32 addr, + int len, int i2c, bool retry) { struct edp_cmd cmd = {0}; @@ -361,7 +507,23 @@ static int dp_aux_read_buf(struct mdss_dp_drv_pdata *ep, u32 addr, cmd.len = len & 0x0ff; cmd.next = 0; - return dp_aux_read_cmds(ep, &cmd); + if (retry) + return dp_aux_rw_cmds_retry(dp, &cmd, DP_AUX_READ); + else + return dp_aux_read_cmds(dp, &cmd); +} + +static int dp_aux_read_buf(struct mdss_dp_drv_pdata *dp, u32 addr, + int len, int i2c) +{ + return dp_aux_read_buf_retry(dp, addr, len, i2c, true); +} + +int dp_aux_read(void *dp, struct edp_cmd *cmds) +{ + int rc = dp_aux_read_cmds(dp, cmds); + + return rc < 0 ? -EINVAL : 0; } /* @@ -733,16 +895,68 @@ static void dp_aux_send_checksum(struct mdss_dp_drv_pdata *dp, u32 checksum) dp_aux_write_buf(dp, 0x260, data, 1, 0); } +int mdss_dp_aux_read_edid(struct mdss_dp_drv_pdata *dp, + u8 *buf, int size, int blk_num) +{ + int max_size_bytes = 16; + int rc, read_size; + int ret = 0; + u8 offset_lut[] = {0x0, 0x80}; + u8 offset; + + if (dp->test_data.test_requested == TEST_EDID_READ) + max_size_bytes = 128; + + /* + * Calculate the offset of the desired EDID block to be read. + * For even blocks, offset starts at 0x0 + * For odd blocks, offset starts at 0x80 + */ + if (blk_num % 2) + offset = offset_lut[1]; + else + offset = offset_lut[0]; + + do { + struct edp_cmd cmd = {0}; + + read_size = min(size, max_size_bytes); + cmd.read = 1; + cmd.addr = EDID_START_ADDRESS; + cmd.len = read_size; + cmd.out_buf = buf; + cmd.i2c = 1; + + /* Write the offset first prior to reading the data */ + pr_debug("offset=0x%x, size=%d\n", offset, size); + dp_aux_write_buf_retry(dp, EDID_START_ADDRESS, &offset, 1, 1, + false); + rc = dp_aux_read(dp, &cmd); + if (rc < 0) { + pr_err("aux read failed\n"); + return rc; + } + + print_hex_dump(KERN_DEBUG, "DP:EDID: ", DUMP_PREFIX_NONE, 16, 1, + buf, read_size, false); + buf += read_size; + offset += read_size; + size -= read_size; + ret += read_size; + } while (size > 0); + + return ret; +} + int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp) { - struct edp_buf *rp = &dp->rxp; int rlen, ret = 0; int edid_blk = 0, blk_num = 0, retries = 10; bool edid_parsing_done = false; - const u8 cea_tag = 0x02, start_ext_blk = 0x1; u32 const segment_addr = 0x30; u32 checksum = 0; - char segment = 0x1; + bool phy_aux_update_requested = false; + bool ext_block_parsing_done = false; ret = dp_aux_chan_ready(dp); if (ret) { @@ -750,72 +964,91 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp) return ret; } + memset(dp->edid_buf, 0, dp->edid_buf_size); + /** * Parse the test request vector to see whether there is a * TEST_EDID_READ test request. */ dp_sink_parse_test_request(dp); - do { - rlen = dp_aux_read_buf(dp, EDID_START_ADDRESS + - (blk_num * EDID_BLOCK_SIZE), - EDID_BLOCK_SIZE, 1); + while (retries) { + u8 segment; + u8 edid_buf[EDID_BLOCK_SIZE] = {0}; + + /* + * Write the segment first. + * Segment = 0, for blocks 0 and 1 + * Segment = 1, for blocks 2 and 3 + * Segment = 2, for blocks 3 and 4 + * and so on ... + */ + segment = blk_num >> 1; + dp_aux_write_buf_retry(dp, segment_addr, &segment, 1, 1, false); + + rlen = mdss_dp_aux_read_edid(dp, edid_buf, EDID_BLOCK_SIZE, + blk_num); if (rlen != EDID_BLOCK_SIZE) { - pr_err("Read failed. rlen=%d\n", rlen); + pr_err("Read failed. rlen=%s\n", + mdss_dp_get_aux_error(rlen)); + mdss_dp_phy_aux_update_config(dp, PHY_AUX_CFG1); + phy_aux_update_requested = true; + retries--; continue; } - pr_debug("blk_num=%d, rlen=%d\n", blk_num, rlen); - - if (dp_edid_is_valid_header(rp->data)) { - ret = dp_edid_buf_error(rp->data, rp->len); + print_hex_dump(KERN_DEBUG, "DP:EDID: ", DUMP_PREFIX_NONE, 16, 1, + edid_buf, EDID_BLOCK_SIZE, false); + if (dp_edid_is_valid_header(edid_buf)) { + ret = dp_edid_buf_error(edid_buf, rlen); if (ret) { pr_err("corrupt edid block detected\n"); + mdss_dp_phy_aux_update_config(dp, PHY_AUX_CFG1); + phy_aux_update_requested = true; + retries--; continue; } if (edid_parsing_done) { + pr_debug("block 0 parsed already\n"); blk_num++; + retries--; continue; } - dp_extract_edid_manufacturer(&dp->edid, rp->data); - dp_extract_edid_product(&dp->edid, rp->data); - dp_extract_edid_version(&dp->edid, rp->data); - dp_extract_edid_ext_block_cnt(&dp->edid, rp->data); - dp_extract_edid_video_support(&dp->edid, rp->data); - dp_extract_edid_feature(&dp->edid, rp->data); + dp_extract_edid_manufacturer(&dp->edid, edid_buf); + dp_extract_edid_product(&dp->edid, edid_buf); + dp_extract_edid_version(&dp->edid, edid_buf); + dp_extract_edid_ext_block_cnt(&dp->edid, edid_buf); + dp_extract_edid_video_support(&dp->edid, edid_buf); + dp_extract_edid_feature(&dp->edid, edid_buf); dp_extract_edid_detailed_timing_description(&dp->edid, - rp->data); + edid_buf); edid_parsing_done = true; + } else if (!edid_parsing_done) { + pr_debug("Invalid edid block 0 header\n"); + /* Retry block 0 with adjusted phy aux settings */ + mdss_dp_phy_aux_update_config(dp, PHY_AUX_CFG1); + phy_aux_update_requested = true; + retries--; + continue; } else { edid_blk++; blk_num++; - - /* fix dongle byte shift issue */ - if (edid_blk == 1 && rp->data[0] != cea_tag) { - u8 tmp[EDID_BLOCK_SIZE - 1]; - - memcpy(tmp, rp->data, EDID_BLOCK_SIZE - 1); - rp->data[0] = cea_tag; - memcpy(rp->data + 1, tmp, EDID_BLOCK_SIZE - 1); - } } memcpy(dp->edid_buf + (edid_blk * EDID_BLOCK_SIZE), - rp->data, EDID_BLOCK_SIZE); + edid_buf, EDID_BLOCK_SIZE); - checksum = rp->data[rp->len - 1]; + checksum = edid_buf[rlen - 1]; /* break if no more extension blocks present */ - if (edid_blk == dp->edid.ext_block_cnt) + if (edid_blk >= dp->edid.ext_block_cnt) { + ext_block_parsing_done = true; break; - - /* write segment number to read block 3 onwards */ - if (edid_blk == start_ext_blk) - dp_aux_write_buf(dp, segment_addr, &segment, 1, 1); - } while (retries--); + } + } if (dp->test_data.test_requested == TEST_EDID_READ) { pr_debug("sending checksum %d\n", checksum); @@ -823,6 +1056,18 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp) dp->test_data = (const struct dpcd_test_request){ 0 }; } + /* + * Trigger the reading of DPCD if there was a change in the AUX + * configuration caused by a failure while reading the EDID. + * This is required to ensure the integrity and validity + * of the sink capabilities read that will subsequently be used + * to establish the mainlink. + */ + if (edid_parsing_done && ext_block_parsing_done + && phy_aux_update_requested) { + dp->dpcd_read_required = true; + } + return ret; } @@ -834,6 +1079,10 @@ int mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *ep) struct dpcd_cap *cap; struct edp_buf *rp; int rlen; + int i; + + cap = &ep->dpcd; + memset(cap, 0, sizeof(*cap)); rlen = dp_aux_read_buf(ep, 0, len, 0); if (rlen <= 0) { @@ -848,11 +1097,8 @@ int mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *ep) } rp = &ep->rxp; - cap = &ep->dpcd; bp = rp->data; - memset(cap, 0, sizeof(*cap)); - data = *bp++; /* byte 0 */ cap->major = (data >> 4) & 0x0f; cap->minor = data & 0x0f; @@ -909,6 +1155,11 @@ int mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *ep) data = *bp++; /* Byte 7: DOWN_STREAM_PORT_COUNT */ cap->downstream_port.dfp_count = data & 0x7; + if (cap->downstream_port.dfp_count > DP_MAX_DS_PORT_COUNT) { + pr_debug("DS port count %d greater that max (%d) supported\n", + cap->downstream_port.dfp_count, DP_MAX_DS_PORT_COUNT); + cap->downstream_port.dfp_count = DP_MAX_DS_PORT_COUNT; + } cap->downstream_port.msa_timing_par_ignored = data & BIT(6); cap->downstream_port.oui_support = data & BIT(7); pr_debug("dfp_count = %d, msa_timing_par_ignored = %d\n", @@ -916,17 +1167,23 @@ int mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *ep) cap->downstream_port.msa_timing_par_ignored); pr_debug("oui_support = %d\n", cap->downstream_port.oui_support); - data = *bp++; /* byte 8 */ - if (data & BIT(1)) { - cap->flags |= DPCD_PORT_0_EDID_PRESENTED; - pr_debug("edid presented\n"); - } - - data = *bp++; /* byte 9 */ - cap->rx_port0_buf_size = (data + 1) * 32; - pr_debug("lane_buf_size=%d\n", cap->rx_port0_buf_size); + for (i = 0; i < DP_MAX_DS_PORT_COUNT; i++) { + data = *bp++; /* byte 8 + i*2 */ + pr_debug("parsing capabilities for DS port %d\n", i); + if (data & BIT(1)) { + if (i == 0) + cap->flags |= DPCD_PORT_0_EDID_PRESENTED; + else + cap->flags |= DPCD_PORT_1_EDID_PRESENTED; + pr_debug("local edid present\n"); + } else { + pr_debug("local edid absent\n"); + } - bp += 2; /* skip 10, 11 port1 capability */ + data = *bp++; /* byte 9 + i*2 */ + cap->rx_port_buf_size[i] = (data + 1) * 32; + pr_debug("lane_buf_size=%d\n", cap->rx_port_buf_size[i]); + } data = *bp++; /* byte 12 */ cap->i2c_speed_ctrl = data; @@ -1258,6 +1515,8 @@ static void dp_sink_parse_sink_count(struct mdss_dp_drv_pdata *ep) int const param_len = 0x1; int const sink_count_addr = 0x200; + ep->prev_sink_count = ep->sink_count; + rlen = dp_aux_read_buf(ep, sink_count_addr, param_len, 0); if (rlen < param_len) { pr_err("failed to read sink count\n"); @@ -2363,8 +2622,8 @@ clear: void mdss_dp_aux_parse_sink_status_field(struct mdss_dp_drv_pdata *ep) { dp_sink_parse_sink_count(ep); - dp_sink_parse_test_request(ep); mdss_dp_aux_link_status_read(ep, 6); + dp_sink_parse_test_request(ep); } int mdss_dp_dpcd_status_read(struct mdss_dp_drv_pdata *ep) diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c index ea492f54054c..0d9cf7b72b4d 100644 --- a/drivers/video/fbdev/msm/mdss_dp_util.c +++ b/drivers/video/fbdev/msm/mdss_dp_util.c @@ -858,6 +858,48 @@ void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate, pr_debug("dp_tu=0x%x\n", dp_tu); } +void mdss_dp_aux_set_limits(struct dss_io_data *ctrl_io) +{ + u32 const max_aux_timeout_count = 0xFFFFF; + u32 const max_aux_limits = 0xFFFFFFFF; + + pr_debug("timeout=0x%x, limits=0x%x\n", + max_aux_timeout_count, max_aux_limits); + + writel_relaxed(max_aux_timeout_count, + ctrl_io->base + DP_AUX_TIMEOUT_COUNT); + writel_relaxed(max_aux_limits, ctrl_io->base + DP_AUX_LIMITS); +} + +void mdss_dp_phy_aux_update_config(struct mdss_dp_drv_pdata *dp, + enum dp_phy_aux_config_type config_type) +{ + u32 new_index; + struct dss_io_data *phy_io = &dp->phy_io; + struct mdss_dp_phy_cfg *cfg = mdss_dp_phy_aux_get_config(dp, + config_type); + + if (!cfg) { + pr_err("invalid config type %s", + mdss_dp_phy_aux_config_type_to_string(config_type)); + return; + } + + new_index = (cfg->current_index + 1) % cfg->cfg_cnt; + + pr_debug("Updating %s from 0x%08x to 0x%08x\n", + mdss_dp_phy_aux_config_type_to_string(config_type), + cfg->lut[cfg->current_index], cfg->lut[new_index]); + writel_relaxed(cfg->lut[new_index], phy_io->base + cfg->offset); + cfg->current_index = new_index; + + /* Make sure the new HW configuration takes effect */ + wmb(); + + /* Reset the AUX controller before any subsequent transactions */ + mdss_dp_aux_reset(&dp->ctrl_io); +} + void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io, char *l_map) { u8 bits_per_lane = 2; @@ -870,26 +912,24 @@ void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io, char *l_map) ctrl_io->base + DP_LOGICAL2PHYSCIAL_LANE_MAPPING); } -void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io, u32 *aux_cfg, - u32 phy_reg_offset) +void mdss_dp_phy_aux_setup(struct mdss_dp_drv_pdata *dp) { - void __iomem *adjusted_phy_io_base = phy_io->base + phy_reg_offset; + int i; + void __iomem *adjusted_phy_io_base = dp->phy_io.base + + dp->phy_reg_offset; writel_relaxed(0x3d, adjusted_phy_io_base + DP_PHY_PD_CTL); - /* DP AUX CFG register programming */ - writel_relaxed(aux_cfg[0], adjusted_phy_io_base + DP_PHY_AUX_CFG0); - writel_relaxed(aux_cfg[1], adjusted_phy_io_base + DP_PHY_AUX_CFG1); - writel_relaxed(aux_cfg[2], adjusted_phy_io_base + DP_PHY_AUX_CFG2); - writel_relaxed(aux_cfg[3], adjusted_phy_io_base + DP_PHY_AUX_CFG3); - writel_relaxed(aux_cfg[4], adjusted_phy_io_base + DP_PHY_AUX_CFG4); - writel_relaxed(aux_cfg[5], adjusted_phy_io_base + DP_PHY_AUX_CFG5); - writel_relaxed(aux_cfg[6], adjusted_phy_io_base + DP_PHY_AUX_CFG6); - writel_relaxed(aux_cfg[7], adjusted_phy_io_base + DP_PHY_AUX_CFG7); - writel_relaxed(aux_cfg[8], adjusted_phy_io_base + DP_PHY_AUX_CFG8); - writel_relaxed(aux_cfg[9], adjusted_phy_io_base + DP_PHY_AUX_CFG9); - - writel_relaxed(0x1f, adjusted_phy_io_base + DP_PHY_AUX_INTERRUPT_MASK); + for (i = 0; i < PHY_AUX_CFG_MAX; i++) { + struct mdss_dp_phy_cfg *cfg = mdss_dp_phy_aux_get_config(dp, i); + + pr_debug("%s: offset=0x%08x, value=0x%08x\n", + mdss_dp_phy_aux_config_type_to_string(i), cfg->offset, + cfg->lut[cfg->current_index]); + writel_relaxed(cfg->lut[cfg->current_index], + dp->phy_io.base + cfg->offset); + }; + writel_relaxed(0x1e, adjusted_phy_io_base + DP_PHY_AUX_INTERRUPT_MASK); } int mdss_dp_irq_setup(struct mdss_dp_drv_pdata *dp_drv) diff --git a/drivers/video/fbdev/msm/mdss_dp_util.h b/drivers/video/fbdev/msm/mdss_dp_util.h index 8f19e7cdf3cf..4c93e48e97dc 100644 --- a/drivers/video/fbdev/msm/mdss_dp_util.h +++ b/drivers/video/fbdev/msm/mdss_dp_util.h @@ -35,6 +35,8 @@ #define DP_AUX_CTRL (0x00000230) #define DP_AUX_DATA (0x00000234) #define DP_AUX_TRANS_CTRL (0x00000238) +#define DP_AUX_TIMEOUT_COUNT (0x0000023C) +#define DP_AUX_LIMITS (0x00000240) #define DP_AUX_STATUS (0x00000244) #define DP_DPCD_CP_IRQ (0x201) @@ -163,6 +165,7 @@ #define DP_PHY_AUX_CFG9 (0x00000040) #define DP_PHY_AUX_INTERRUPT_MASK (0x00000044) #define DP_PHY_AUX_INTERRUPT_CLEAR (0x00000048) +#define DP_PHY_AUX_INTERRUPT_STATUS (0x000000B8) #define DP_PHY_SPARE0 0x00A8 @@ -271,6 +274,19 @@ static const struct dp_vc_tu_mapping_table tu_table[] = { 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27}, }; +static inline struct mdss_dp_phy_cfg *mdss_dp_phy_aux_get_config( + struct mdss_dp_drv_pdata *dp, enum dp_phy_aux_config_type cfg_type) +{ + return &dp->aux_cfg[cfg_type]; +} + +static inline u32 mdss_dp_phy_aux_get_config_cnt( + struct mdss_dp_drv_pdata *dp, enum dp_phy_aux_config_type cfg_type) +{ + return dp->aux_cfg[cfg_type].cfg_cnt; +} + +void mdss_dp_aux_set_limits(struct dss_io_data *ctrl_io); int dp_aux_read(void *ep, struct edp_cmd *cmds); int dp_aux_write(void *ep, struct edp_cmd *cmd); void mdss_dp_state_ctrl(struct dss_io_data *ctrl_io, u32 data); @@ -285,8 +301,9 @@ void mdss_dp_assert_phy_reset(struct dss_io_data *ctrl_io, bool assert); void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate, u8 ln_cnt, u32 res, struct mdss_panel_info *pinfo); void mdss_dp_config_misc(struct mdss_dp_drv_pdata *dp, u32 bd, u32 cc); -void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io, u32 *aux_cfg, - u32 phy_reg_offset); +void mdss_dp_phy_aux_setup(struct mdss_dp_drv_pdata *dp); +void mdss_dp_phy_aux_update_config(struct mdss_dp_drv_pdata *dp, + enum dp_phy_aux_config_type config_type); void mdss_dp_hpd_configure(struct dss_io_data *ctrl_io, bool enable); void mdss_dp_aux_ctrl(struct dss_io_data *ctrl_io, bool enable); void mdss_dp_mainlink_ctrl(struct dss_io_data *ctrl_io, bool enable); diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c index 17722eac3006..b1552829508d 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.c +++ b/drivers/video/fbdev/msm/mdss_dsi.c @@ -2713,10 +2713,7 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata, rc = mdss_dsi_reconfig(pdata, mode); break; case MDSS_EVENT_DSI_PANEL_STATUS: - if (ctrl_pdata->check_status) - rc = ctrl_pdata->check_status(ctrl_pdata); - else - rc = true; + rc = mdss_dsi_check_panel_status(ctrl_pdata, arg); break; case MDSS_EVENT_PANEL_TIMING_SWITCH: rc = mdss_dsi_panel_timing_switch(ctrl_pdata, arg); diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h index 2a76466abf3e..00f23380591b 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.h +++ b/drivers/video/fbdev/msm/mdss_dsi.h @@ -704,6 +704,7 @@ void mdss_dsi_cfg_lane_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, void mdss_dsi_set_reg(struct mdss_dsi_ctrl_pdata *ctrl, int off, u32 mask, u32 val); int mdss_dsi_phy_pll_reset_status(struct mdss_dsi_ctrl_pdata *ctrl); +int mdss_dsi_check_panel_status(struct mdss_dsi_ctrl_pdata *ctrl, void *arg); static inline const char *__mdss_dsi_pm_name(enum dsi_pm_type module) { diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c index 9f4b7eb52492..3b48bb642792 100644 --- a/drivers/video/fbdev/msm/mdss_dsi_host.c +++ b/drivers/video/fbdev/msm/mdss_dsi_host.c @@ -778,6 +778,12 @@ static void mdss_dsi_ctl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl, u32 event) u32 loop = 10, u_dly = 200; pr_debug("%s: MDSS DSI CTRL and PHY reset. ctrl-num = %d\n", __func__, ctrl->ndx); + + if (ctrl->panel_mode == DSI_CMD_MODE) { + pr_warn("ctl_phy_reset not applicable for cmd mode\n"); + return; + } + if (event == DSI_EV_DLNx_FIFO_OVERFLOW) { mask = BIT(20); /* clock lane only for overflow recovery */ } else if (event == DSI_EV_LP_RX_TIMEOUT) { @@ -792,15 +798,6 @@ static void mdss_dsi_ctl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl, u32 event) ctrl0 = mdss_dsi_get_ctrl_by_index(DSI_CTRL_0); ctrl1 = mdss_dsi_get_ctrl_by_index(DSI_CTRL_1); - if (ctrl0->recovery) { - rc = ctrl0->recovery->fxn(ctrl0->recovery->data, - MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW); - if (rc < 0) { - pr_debug("%s: Target is in suspend/shutdown\n", - __func__); - return; - } - } /* * Disable PHY contention detection and receive. * Configure the strength ctrl 1 register. @@ -874,6 +871,15 @@ static void mdss_dsi_ctl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl, u32 event) MIPI_OUTP(ctrl0->ctrl_base + 0x0ac, ln_ctrl0 & ~mask); MIPI_OUTP(ctrl1->ctrl_base + 0x0ac, ln_ctrl1 & ~mask); + if (ctrl0->recovery) { + rc = ctrl0->recovery->fxn(ctrl0->recovery->data, + MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW); + if (rc < 0) { + pr_debug("%s: Target is in suspend/shutdown\n", + __func__); + return; + } + } /* Enable Video mode for DSI controller */ MIPI_OUTP(ctrl0->ctrl_base + 0x004, data0); MIPI_OUTP(ctrl1->ctrl_base + 0x004, data1); @@ -890,15 +896,6 @@ static void mdss_dsi_ctl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl, u32 event) */ udelay(200); } else { - if (ctrl->recovery) { - rc = ctrl->recovery->fxn(ctrl->recovery->data, - MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW); - if (rc < 0) { - pr_debug("%s: Target is in suspend/shutdown\n", - __func__); - return; - } - } /* Disable PHY contention detection and receive */ MIPI_OUTP((ctrl->phy_io.base) + 0x0188, 0); @@ -951,6 +948,15 @@ static void mdss_dsi_ctl_phy_reset(struct mdss_dsi_ctrl_pdata *ctrl, u32 event) __func__, ln0); MIPI_OUTP(ctrl->ctrl_base + 0x0ac, ln_ctrl0 & ~mask); + if (ctrl->recovery) { + rc = ctrl->recovery->fxn(ctrl->recovery->data, + MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW); + if (rc < 0) { + pr_debug("%s: Target is in suspend/shutdown\n", + __func__); + return; + } + } /* Enable Video mode for DSI controller */ MIPI_OUTP(ctrl->ctrl_base + 0x004, data0); /* Enable PHY contention detection and receiver */ @@ -1311,6 +1317,31 @@ void mdss_dsi_set_burst_mode(struct mdss_dsi_ctrl_pdata *ctrl) } +static void mdss_dsi_split_link_setup(struct mdss_dsi_ctrl_pdata *ctrl_pdata) +{ + u32 data = 0; + struct mdss_panel_info *pinfo; + + if (!ctrl_pdata) + return; + + pinfo = &ctrl_pdata->panel_data.panel_info; + if (!pinfo->split_link_enabled) + return; + + pr_debug("%s: enable split link\n", __func__); + + data = MIPI_INP((ctrl_pdata->ctrl_base) + 0x330); + /* DMA_LINK_SEL */ + data |= 0x3 << 12; + /* MDP0_LINK_SEL */ + data |= 0x5 << 20; + /* EN */ + data |= 0x1; + /* DSI_SPLIT_LINK_CTRL */ + MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x330, data); +} + static void mdss_dsi_mode_setup(struct mdss_panel_data *pdata) { struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL; @@ -1429,6 +1460,8 @@ static void mdss_dsi_mode_setup(struct mdss_panel_data *pdata) } mdss_dsi_dsc_config(ctrl_pdata, dsc); + + mdss_dsi_split_link_setup(ctrl_pdata); } void mdss_dsi_ctrl_setup(struct mdss_dsi_ctrl_pdata *ctrl) @@ -2998,7 +3031,10 @@ bool mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl) * warning message is ignored. */ if (ctrl->panel_data.panel_info.esd_check_enabled && - (ctrl->status_mode == ESD_BTA) && (status & 0x1008000)) + ((ctrl->status_mode == ESD_BTA) || + (ctrl->status_mode == ESD_REG) || + (ctrl->status_mode == ESD_REG_NT35596)) && + (status & 0x1008000)) return false; pr_err("%s: status=%x\n", __func__, status); @@ -3229,8 +3265,10 @@ irqreturn_t mdss_dsi_isr(int irq, void *ptr) * cleared. */ if (ctrl->panel_data.panel_info.esd_check_enabled && - (ctrl->status_mode == ESD_BTA) && - (ctrl->panel_mode == DSI_VIDEO_MODE)) { + ((ctrl->status_mode == ESD_BTA) || + (ctrl->status_mode == ESD_REG) || + (ctrl->status_mode == ESD_REG_NT35596)) && + (ctrl->panel_mode == DSI_VIDEO_MODE)) { isr &= ~DSI_INTR_ERROR; /* clear only overflow */ mdss_dsi_set_reg(ctrl, 0x0c, 0x44440000, 0x44440000); diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c index 6f20c0ed0455..9faa1531c256 100644 --- a/drivers/video/fbdev/msm/mdss_dsi_panel.c +++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c @@ -1350,6 +1350,44 @@ static int mdss_dsi_parse_hdr_settings(struct device_node *np, return 0; } +static int mdss_dsi_parse_split_link_settings(struct device_node *np, + struct mdss_panel_info *pinfo) +{ + u32 tmp; + int rc = 0; + + if (!np) { + pr_err("%s: device node pointer is NULL\n", __func__); + return -EINVAL; + } + + if (!pinfo) { + pr_err("%s: panel info is NULL\n", __func__); + return -EINVAL; + } + + pinfo->split_link_enabled = of_property_read_bool(np, + "qcom,split-link-enabled"); + + if (pinfo->split_link_enabled) { + rc = of_property_read_u32(np, + "qcom,sublinks-count", &tmp); + /* default num of sublink is 1*/ + pinfo->mipi.num_of_sublinks = (!rc ? tmp : 1); + + rc = of_property_read_u32(np, + "qcom,lanes-per-sublink", &tmp); + /* default num of lanes per sublink is 1 */ + pinfo->mipi.lanes_per_sublink = (!rc ? tmp : 1); + } + + pr_info("%s: enable %d sublinks-count %d lanes per sublink %d\n", + __func__, pinfo->split_link_enabled, + pinfo->mipi.num_of_sublinks, + pinfo->mipi.lanes_per_sublink); + return 0; +} + static int mdss_dsi_parse_dsc_version(struct device_node *np, struct mdss_panel_timing *timing) { @@ -2734,9 +2772,15 @@ static int mdss_panel_parse_dt(struct device_node *np, pinfo->mipi.data_lane3 = of_property_read_bool(np, "qcom,mdss-dsi-lane-3-state"); + /* parse split link properties */ + rc = mdss_dsi_parse_split_link_settings(np, pinfo); + if (rc) + return rc; + rc = mdss_panel_parse_display_timings(np, &ctrl_pdata->panel_data); if (rc) return rc; + rc = mdss_dsi_parse_hdr_settings(np, pinfo); if (rc) return rc; diff --git a/drivers/video/fbdev/msm/mdss_dsi_status.c b/drivers/video/fbdev/msm/mdss_dsi_status.c index 4208c2c43efb..0f24f66dbcc6 100644 --- a/drivers/video/fbdev/msm/mdss_dsi_status.c +++ b/drivers/video/fbdev/msm/mdss_dsi_status.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -39,6 +39,35 @@ static uint32_t interval = STATUS_CHECK_INTERVAL_MS; static int32_t dsi_status_disable = DSI_STATUS_CHECK_INIT; struct dsi_status_data *pstatus_data; +int mdss_dsi_check_panel_status(struct mdss_dsi_ctrl_pdata *ctrl, void *arg) +{ + struct mdss_mdp_ctl *ctl = NULL; + struct msm_fb_data_type *mfd = arg; + int ret = 0; + + if (!mfd) + return -EINVAL; + + ctl = mfd_to_ctl(mfd); + + if (!ctl || !ctrl) + return -EINVAL; + + mutex_lock(&ctl->offlock); + /* + * if check_status method is not defined + * then no need to fail this function, + * instead return a positive value. + */ + if (ctrl->check_status) + ret = ctrl->check_status(ctrl); + else + ret = 1; + mutex_unlock(&ctl->offlock); + + return ret; +} + /* * check_dsi_ctrl_status() - Reads MFD structure and * calls platform specific DSI ctrl Status function. diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c index fc47de7692e7..698c5633cf6a 100644 --- a/drivers/video/fbdev/msm/mdss_fb.c +++ b/drivers/video/fbdev/msm/mdss_fb.c @@ -374,7 +374,8 @@ static int mdss_fb_get_panel_xres(struct mdss_panel_info *pinfo) xres = pinfo->xres; if (pdata->next && pdata->next->active) xres += mdss_fb_get_panel_xres(&pdata->next->panel_info); - + if (pinfo->split_link_enabled) + xres = xres * pinfo->mipi.num_of_sublinks; return xres; } @@ -652,7 +653,7 @@ static ssize_t mdss_fb_get_panel_status(struct device *dev, ret = scnprintf(buf, PAGE_SIZE, "panel_status=%s\n", "suspend"); } else { panel_status = mdss_fb_send_panel_event(mfd, - MDSS_EVENT_DSI_PANEL_STATUS, NULL); + MDSS_EVENT_DSI_PANEL_STATUS, mfd); ret = scnprintf(buf, PAGE_SIZE, "panel_status=%s\n", panel_status > 0 ? "alive" : "dead"); } @@ -1595,13 +1596,30 @@ static int mdss_fb_resume(struct platform_device *pdev) static int mdss_fb_pm_suspend(struct device *dev) { struct msm_fb_data_type *mfd = dev_get_drvdata(dev); + int rc = 0; if (!mfd) return -ENODEV; dev_dbg(dev, "display pm suspend\n"); - return mdss_fb_suspend_sub(mfd); + rc = mdss_fb_suspend_sub(mfd); + + /* + * Call MDSS footswitch control to ensure GDSC is + * off after pm suspend call. There are cases when + * mdss runtime call doesn't trigger even when clock + * ref count is zero after fb pm suspend. + */ + if (!rc) { + if (mfd->mdp.footswitch_ctrl) + mfd->mdp.footswitch_ctrl(false); + } else { + pr_err("fb pm suspend failed, rc: %d\n", rc); + } + + return rc; + } static int mdss_fb_pm_resume(struct device *dev) @@ -1621,6 +1639,9 @@ static int mdss_fb_pm_resume(struct device *dev) pm_runtime_set_suspended(dev); pm_runtime_enable(dev); + if (mfd->mdp.footswitch_ctrl) + mfd->mdp.footswitch_ctrl(true); + return mdss_fb_resume_sub(mfd); } #endif diff --git a/drivers/video/fbdev/msm/mdss_fb.h b/drivers/video/fbdev/msm/mdss_fb.h index f046ff08cbf7..518c24810acd 100644 --- a/drivers/video/fbdev/msm/mdss_fb.h +++ b/drivers/video/fbdev/msm/mdss_fb.h @@ -232,6 +232,7 @@ struct msm_mdp_interface { int (*configure_panel)(struct msm_fb_data_type *mfd, int mode, int dest_ctrl); int (*input_event_handler)(struct msm_fb_data_type *mfd); + void (*footswitch_ctrl)(bool on); int (*pp_release_fnc)(struct msm_fb_data_type *mfd); void *private1; }; @@ -392,6 +393,12 @@ static inline void mdss_fb_update_notify_update(struct msm_fb_data_type *mfd) } } +/* Function returns true for split link */ +static inline bool is_panel_split_link(struct msm_fb_data_type *mfd) +{ + return mfd && mfd->panel_info && mfd->panel_info->split_link_enabled; +} + /* Function returns true for either any kind of dual display */ static inline bool is_panel_split(struct msm_fb_data_type *mfd) { diff --git a/drivers/video/fbdev/msm/mdss_hdcp_1x.c b/drivers/video/fbdev/msm/mdss_hdcp_1x.c index 834726e84bda..2dc9c8f96c5b 100644 --- a/drivers/video/fbdev/msm/mdss_hdcp_1x.c +++ b/drivers/video/fbdev/msm/mdss_hdcp_1x.c @@ -1381,7 +1381,8 @@ int hdcp_1x_authenticate(void *input) flush_delayed_work(&hdcp->hdcp_auth_work); - if (!hdcp_1x_state(HDCP_STATE_INACTIVE)) { + if (!hdcp_1x_state(HDCP_STATE_INACTIVE) && + !hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) { pr_err("invalid state\n"); return -EINVAL; } @@ -1443,7 +1444,6 @@ int hdcp_1x_reauthenticate(void *input) DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit); - hdcp->hdcp_state = HDCP_STATE_INACTIVE; hdcp_1x_authenticate(hdcp); return ret; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c index 37c4be6135aa..599f6cb44c63 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c @@ -2604,6 +2604,11 @@ void hdmi_edid_set_video_resolution(void *input, u32 resolution, bool reset) return; } + if (resolution == HDMI_VFRMT_UNKNOWN) { + pr_debug("%s: Default video resolution not set\n", __func__); + return; + } + edid_ctrl->video_resolution = resolution; if (reset) { diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c index 171f44815430..a645a3495593 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.c +++ b/drivers/video/fbdev/msm/mdss_mdp.c @@ -233,7 +233,6 @@ static struct mdss_mdp_irq mdp_irq_map[] = { static struct intr_callback *mdp_intr_cb; -static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on); static int mdss_mdp_parse_dt(struct platform_device *pdev); static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev); static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev); @@ -5172,7 +5171,7 @@ static void mdss_mdp_notify_idle_pc(struct mdss_data_type *mdata) * active (but likely in an idle state), the vote for the CX and the batfet * rails should not be released. */ -static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on) +void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on) { int ret; int active_cnt = 0; diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h index 2fd047edd3e8..db037ed263b4 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.h +++ b/drivers/video/fbdev/msm/mdss_mdp.h @@ -1267,6 +1267,8 @@ static inline u32 get_panel_width(struct mdss_mdp_ctl *ctl) width = get_panel_xres(&ctl->panel_data->panel_info); if (ctl->panel_data->next && is_pingpong_split(ctl->mfd)) width += get_panel_xres(&ctl->panel_data->next->panel_info); + else if (is_panel_split_link(ctl->mfd)) + width *= (ctl->panel_data->panel_info.mipi.num_of_sublinks); return width; } @@ -1638,6 +1640,7 @@ int mdss_mdp_set_intr_callback_nosync(u32 intr_type, u32 intf_num, void (*fnc_ptr)(void *), void *arg); u32 mdss_mdp_get_irq_mask(u32 intr_type, u32 intf_num); +void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on); void mdss_mdp_footswitch_ctrl_splash(int on); void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable); void mdss_mdp_set_clk_rate(unsigned long min_clk_rate, bool locked); diff --git a/drivers/video/fbdev/msm/mdss_mdp_cdm.c b/drivers/video/fbdev/msm/mdss_mdp_cdm.c index f1d1bdd301e3..10928e6bceaa 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_cdm.c +++ b/drivers/video/fbdev/msm/mdss_mdp_cdm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -377,6 +377,17 @@ int mdss_mdp_cdm_destroy(struct mdss_mdp_cdm *cdm) return -EINVAL; } + mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON); + mutex_lock(&cdm->lock); + /* Disable HDMI packer */ + writel_relaxed(0x0, cdm->base + MDSS_MDP_REG_CDM_HDMI_PACK_OP_MODE); + + /* Put CDM in bypass */ + writel_relaxed(0x0, cdm->mdata->mdp_base + MDSS_MDP_MDP_OUT_CTL_0); + + mutex_unlock(&cdm->lock); + mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); + kref_put(&cdm->kref, mdss_mdp_cdm_free); return rc; diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c index 49348e5e16a9..a66ecb7a57b7 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c +++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c @@ -709,6 +709,8 @@ int mdss_mdp_get_panel_params(struct mdss_mdp_pipe *pipe, *h_total += mdss_panel_get_htotal( &mixer->ctl->panel_data->next->panel_info, false); + else if (is_panel_split_link(mixer->ctl->mfd)) + *h_total *= pinfo->mipi.num_of_sublinks; } else { *v_total = mixer->height; *xres = mixer->width; @@ -4090,6 +4092,13 @@ static void mdss_mdp_ctl_split_display_enable(int enable, if (is_pingpong_split(main_ctl->mfd)) lower |= BIT(2); upper = lower; + + /* + * align command mode stream0 output for + * intferace 1 and 2 to start of frame. + */ + if (main_ctl->mdata->mdp_rev >= MDSS_MDP_HW_REV_320) + lower |= BIT(12); } else { /* interface controlling sw trigger (video mode) */ if (main_ctl->intf_num == MDSS_MDP_INTF2) { @@ -4101,6 +4110,9 @@ static void mdss_mdp_ctl_split_display_enable(int enable, } } } + + if (is_panel_split_link(main_ctl->mfd)) + upper = lower = 0; writel_relaxed(upper, main_ctl->mdata->mdp_base + MDSS_MDP_REG_SPLIT_DISPLAY_UPPER_PIPE_CTRL); writel_relaxed(lower, main_ctl->mdata->mdp_base + @@ -4269,7 +4281,8 @@ void mdss_mdp_ctl_restore(bool locked) if (sctl) { mdss_mdp_ctl_restore_sub(sctl); mdss_mdp_ctl_split_display_enable(1, ctl, sctl); - } else if (is_pingpong_split(ctl->mfd)) { + } else if (is_pingpong_split(ctl->mfd) || + is_panel_split_link(ctl->mfd)) { mdss_mdp_ctl_pp_split_display_enable(1, ctl); } @@ -4396,6 +4409,8 @@ int mdss_mdp_ctl_start(struct mdss_mdp_ctl *ctl, bool handoff) } else if (is_pingpong_split(ctl->mfd)) { ctl->slave_intf_num = (ctl->intf_num + 1); mdss_mdp_ctl_pp_split_display_enable(true, ctl); + } else if (is_panel_split_link(ctl->mfd)) { + mdss_mdp_ctl_pp_split_display_enable(true, ctl); } } diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h index 7d495232c198..d9e2b042bfc3 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h +++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h @@ -850,4 +850,8 @@ enum mdss_mdp_pingpong_index { #define MDSS_MDP_REG_TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4)) #define MDSS_MDP_REG_TRAFFIC_SHAPER_FIXPOINT_FACTOR 4 +#define MDSS_MDP_REG_SPLIT_LINK 0x00060 +#define MDSS_MDP_REG_SPLIT_LINK_LEFT_LINK_EN BIT(1) +#define MDSS_MDP_REG_SPLIT_LINK_RIGHT_LINK_EN BIT(2) + #endif diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c index a3511a1a07ef..fe258d85b667 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c +++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c @@ -1071,6 +1071,13 @@ static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl, int panel_power_state) { int intfs_num, ret = 0; + if (ctl->cdm) { + if (!mdss_mdp_cdm_destroy(ctl->cdm)) + mdss_mdp_ctl_write(ctl, + MDSS_MDP_REG_CTL_FLUSH, BIT(26)); + ctl->cdm = NULL; + } + intfs_num = ctl->intf_num - MDSS_MDP_INTF0; ret = mdss_mdp_video_intfs_stop(ctl, ctl->panel_data, intfs_num); if (IS_ERR_VALUE(ret)) { @@ -1083,10 +1090,6 @@ static int mdss_mdp_video_stop(struct mdss_mdp_ctl *ctl, int panel_power_state) mdss_mdp_ctl_reset(ctl, false); ctl->intf_ctx[MASTER_CTX] = NULL; - if (ctl->cdm) { - mdss_mdp_cdm_destroy(ctl->cdm); - ctl->cdm = NULL; - } return 0; } @@ -1682,6 +1685,16 @@ static int mdss_mdp_video_display(struct mdss_mdp_ctl *ctl, void *arg) mdss_bus_bandwidth_ctrl(true); + /* configure the split link to both sublinks */ + if (is_panel_split_link(ctl->mfd)) { + mdp_video_write(ctx, MDSS_MDP_REG_SPLIT_LINK, 0x3); + /* + * ensure split link register is written before + * enabling timegen + */ + wmb(); + } + mdp_video_write(ctx, MDSS_MDP_REG_INTF_TIMING_ENGINE_EN, 1); wmb(); @@ -1796,7 +1809,9 @@ int mdss_mdp_video_reconfigure_splash_done(struct mdss_mdp_ctl *ctl, mdss_mdp_video_timegen_flush(ctl, sctx); /* wait for 1 VSYNC for the pipe to be unstaged */ - msleep(20); + mdss_mdp_video_wait4comp(ctl, NULL); + if (sctl) + mdss_mdp_video_wait4comp(sctl, NULL); ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_CONT_SPLASH_FINISH, NULL, diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c index 87ed56028edd..9b63499e64b0 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c +++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c @@ -792,7 +792,9 @@ static int mdss_mdp_writeback_stop(struct mdss_mdp_ctl *ctl, } if (ctl->cdm) { - mdss_mdp_cdm_destroy(ctl->cdm); + if (!mdss_mdp_cdm_destroy(ctl->cdm)) + mdss_mdp_ctl_write(ctl, + MDSS_MDP_REG_CTL_FLUSH, BIT(26)); ctl->cdm = NULL; } return 0; diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c index 8eb12d764be3..8c612e2b83fb 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c +++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c @@ -2601,6 +2601,18 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd, mdss_mdp_splash_cleanup(mfd, true); /* + * Wait for pingpong done only during resume for + * command mode panels. Ensure that one commit is + * sent before kickoff completes so that backlight + * update happens after it. + */ + if (mdss_fb_is_power_off(mfd) && + mfd->panel_info->type == MIPI_CMD_PANEL) { + pr_debug("wait for pp done after resume for cmd mode\n"); + mdss_mdp_display_wait4pingpong(ctl, true); + } + + /* * Configure Timing Engine, if new fps was set. * We need to do this after the wait for vsync * to guarantee that mdp flush bit and dsi flush @@ -6279,6 +6291,13 @@ int mdss_mdp_input_event_handler(struct msm_fb_data_type *mfd) return rc; } +void mdss_mdp_footswitch_ctrl_handler(bool on) +{ + struct mdss_data_type *mdata = mdss_mdp_get_mdata(); + + mdss_mdp_footswitch_ctrl(mdata, on); +} + int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd) { struct device *dev = mfd->fbi->dev; @@ -6321,6 +6340,14 @@ int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd) mdp5_interface->configure_panel = mdss_mdp_update_panel_info; mdp5_interface->input_event_handler = mdss_mdp_input_event_handler; + /* + * Register footswitch control only for primary fb pm + * suspend/resume calls. + */ + if (mfd->panel_info->is_prim_panel) + mdp5_interface->footswitch_ctrl = + mdss_mdp_footswitch_ctrl_handler; + if (mfd->panel_info->type == WRITEBACK_PANEL) { mdp5_interface->atomic_validate = mdss_mdp_layer_atomic_validate_wfd; diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c index 8d7bd60318ad..724913f376a7 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c +++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c @@ -2292,6 +2292,9 @@ static int mdss_mdp_src_addr_setup(struct mdss_mdp_pipe *pipe, mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC3_ADDR, addr[2]); } + MDSS_XLOG(pipe->num, pipe->multirect.num, pipe->mixer_left->num, + pipe->play_cnt, addr[0], addr[1], addr[2], addr[3]); + return 0; } @@ -2734,9 +2737,6 @@ int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe, goto update_nobuf; } - MDSS_XLOG(pipe->num, pipe->multirect.num, pipe->mixer_left->num, - pipe->play_cnt, 0x222); - if (params_changed) { pipe->params_changed = 0; diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h index 92413e078244..fa1df94976f9 100644 --- a/drivers/video/fbdev/msm/mdss_panel.h +++ b/drivers/video/fbdev/msm/mdss_panel.h @@ -533,6 +533,8 @@ struct mipi_panel_info { char lp11_init; u32 init_delay; u32 post_init_delay; + u32 num_of_sublinks; + u32 lanes_per_sublink; }; struct edp_panel_info { @@ -847,6 +849,7 @@ struct mdss_panel_info { bool is_lpm_mode; bool is_split_display; /* two DSIs in one display, pp split or not */ bool use_pingpong_split; + bool split_link_enabled; /* * index[0] = left layer mixer, value of 0 not valid diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c index 62e25500060e..f56158446c0d 100644 --- a/drivers/video/fbdev/msm/mdss_smmu.c +++ b/drivers/video/fbdev/msm/mdss_smmu.c @@ -600,24 +600,30 @@ int mdss_smmu_fault_handler(struct iommu_domain *domain, struct device *dev, (struct mdss_smmu_client *)user_data; u32 fsynr1, mid, i; - if (!mdss_smmu || !mdss_smmu->mmu_base) + if (!mdss_smmu) goto end; - fsynr1 = readl_relaxed(mdss_smmu->mmu_base + SMMU_CBN_FSYNR1); - mid = fsynr1 & 0xff; - pr_err("mdss_smmu: iova:0x%lx flags:0x%x fsynr1: 0x%x mid: 0x%x\n", - iova, flags, fsynr1, mid); + if (mdss_smmu->mmu_base) { + fsynr1 = readl_relaxed(mdss_smmu->mmu_base + SMMU_CBN_FSYNR1); + mid = fsynr1 & 0xff; + pr_err("mdss_smmu: iova:0x%lx flags:0x%x fsynr1: 0x%x mid: 0x%x\n", + iova, flags, fsynr1, mid); - /* get domain id information */ - for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) { - if (mdss_smmu == mdss_smmu_get_cb(i)) - break; - } + /* get domain id information */ + for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) { + if (mdss_smmu == mdss_smmu_get_cb(i)) + break; + } - if (i == MDSS_IOMMU_MAX_DOMAIN) - goto end; + if (i == MDSS_IOMMU_MAX_DOMAIN) + goto end; - mdss_mdp_debug_mid(mid); + mdss_mdp_debug_mid(mid); + } else { + pr_err("mdss_smmu: iova:0x%lx flags:0x%x\n", + iova, flags); + MDSS_XLOG_TOUT_HANDLER("mdp"); + } end: return -ENOSYS; } @@ -844,14 +850,13 @@ int mdss_smmu_probe(struct platform_device *pdev) mdss_smmu->base.dev = dev; + iommu_set_fault_handler(mdss_smmu->mmu_mapping->domain, + mdss_smmu_fault_handler, mdss_smmu); address = of_get_address_by_name(pdev->dev.of_node, "mmu_cb", 0, 0); if (address) { size = address + 1; mdss_smmu->mmu_base = ioremap(be32_to_cpu(*address), be32_to_cpu(*size)); - if (mdss_smmu->mmu_base) - iommu_set_fault_handler(mdss_smmu->mmu_mapping->domain, - mdss_smmu_fault_handler, mdss_smmu); } else { pr_debug("unable to map context bank base\n"); } diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c index 03e78733d168..9c156af6b63c 100644 --- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c +++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c @@ -38,16 +38,23 @@ #define MDSS_DSI_DSIPHY_GLBL_TEST_CTRL 0x1d4 #define MDSS_DSI_DSIPHY_CTRL_0 0x170 #define MDSS_DSI_DSIPHY_CTRL_1 0x174 +#define MDSS_DSI_DSIPHY_CMN_CLK_CFG0 0x0010 +#define MDSS_DSI_DSIPHY_CMN_CLK_CFG1 0x0014 + +#define MDSS_DSI_NUM_DATA_LANES 0x04 +#define MDSS_DSI_NUM_CLK_LANES 0x01 #define SW_RESET BIT(2) #define SW_RESET_PLL BIT(0) #define PWRDN_B BIT(7) /* 8996 */ -#define DATALANE_OFFSET_FROM_BASE_8996 0x100 -#define DSIPHY_CMN_PLL_CNTRL 0x0048 +#define DATALANE_OFFSET_FROM_BASE_8996 0x100 +#define CLKLANE_OFFSET_FROM_BASE_8996 0x300 #define DATALANE_SIZE_8996 0x80 +#define CLKLANE_SIZE_8996 0x80 +#define DSIPHY_CMN_PLL_CNTRL 0x0048 #define DSIPHY_CMN_GLBL_TEST_CTRL 0x0018 #define DSIPHY_CMN_CTRL_0 0x001c #define DSIPHY_CMN_CTRL_1 0x0020 @@ -55,6 +62,24 @@ #define DSIPHY_PLL_CLKBUFLR_EN 0x041c #define DSIPHY_PLL_PLL_BANDGAP 0x0508 +#define DSIPHY_LANE_STRENGTH_CTRL_NUM 0x0002 +#define DSIPHY_LANE_STRENGTH_CTRL_OFFSET 0x0004 +#define DSIPHY_LANE_STRENGTH_CTRL_BASE 0x0038 + +#define DSIPHY_LANE_CFG_NUM 0x0004 +#define DSIPHY_LANE_CFG_OFFSET 0x0004 +#define DSIPHY_LANE_CFG_BASE 0x0000 + +#define DSIPHY_LANE_VREG_NUM 0x0001 +#define DSIPHY_LANE_VREG_OFFSET 0x0004 +#define DSIPHY_LANE_VREG_BASE 0x0064 + +#define DSIPHY_LANE_TIMING_CTRL_NUM 0x0008 +#define DSIPHY_LANE_TIMING_CTRL_OFFSET 0x0004 +#define DSIPHY_LANE_TIMING_CTRL_BASE 0x0018 + +#define DSIPHY_LANE_TEST_STR 0x0014 + #define DSIPHY_LANE_STRENGTH_CTRL_1 0x003c #define DSIPHY_LANE_VREG_CNTRL 0x0064 @@ -131,6 +156,8 @@ #define DSIPHY_PLL_RESETSM_CNTRL5 0x043c +#define DSIPHY_CMN_CLK_CFG1_SPLIT_LINK 0x1 + #define PLL_CALC_DATA(addr0, addr1, data0, data1) \ (((data1) << 24) | ((((addr1)/4) & 0xFF) << 16) | \ ((data0) << 8) | (((addr0)/4) & 0xFF)) @@ -911,35 +938,59 @@ static void mdss_dsi_8996_phy_regulator_enable( int j, off, ln, cnt, ln_off; char *ip; void __iomem *base; + struct mdss_panel_info *panel_info; + + if (!ctrl) { + pr_warn("%s: null ctrl pdata\n", __func__); + return; + } + panel_info = &((ctrl->panel_data).panel_info); pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db); - if (pd->regulator_len != 5) { + if (pd->regulator_len != (MDSS_DSI_NUM_DATA_LANES + + MDSS_DSI_NUM_CLK_LANES)) { pr_warn("%s: invalid regulator settings\n", __func__); return; } - /* 4 lanes + clk lane configuration */ - for (ln = 0; ln < 5; ln++) { - /* - * data lane offset frome base: 0x100 - * data lane size: 0x80 - */ - base = ctrl->phy_io.base + - DATALANE_OFFSET_FROM_BASE_8996; - base += (ln * DATALANE_SIZE_8996); /* lane base */ - - /* vreg ctrl, 1 * 5 */ - cnt = 1; + /* + * data lane offset from base: 0x100 + * data lane size: 0x80 + */ + base = ctrl->phy_io.base + DATALANE_OFFSET_FROM_BASE_8996; + /* data lanes configuration */ + for (ln = 0; ln < MDSS_DSI_NUM_DATA_LANES; ln++) { + /* vreg ctrl, 1 * MDSS_DSI_NUM_DATA_LANES */ + cnt = DSIPHY_LANE_VREG_NUM; + off = DSIPHY_LANE_VREG_BASE; ln_off = cnt * ln; ip = &pd->regulator[ln_off]; - off = 0x64; - for (j = 0; j < cnt; j++, off += 4) + for (j = 0; j < cnt; j++) { MIPI_OUTP(base + off, *ip++); + off += DSIPHY_LANE_VREG_OFFSET; + } + base += DATALANE_SIZE_8996; /* next lane */ } - wmb(); /* make sure registers committed */ + /* + * clk lane offset from base: 0x300 + * clk lane size: 0x80 + */ + base = ctrl->phy_io.base + CLKLANE_OFFSET_FROM_BASE_8996; + /* + * clk lane configuration for vreg ctrl + * for split link there are two clock lanes, one + * clock lane per sublink needs to be configured + */ + off = DSIPHY_LANE_VREG_BASE; + ln_off = MDSS_DSI_NUM_DATA_LANES; + ip = &pd->regulator[ln_off]; + MIPI_OUTP(base + off, *ip); + if (panel_info->split_link_enabled) + MIPI_OUTP(base + CLKLANE_SIZE_8996 + off, *ip); + wmb(); /* make sure registers committed */ } static void mdss_dsi_8996_phy_power_off( @@ -948,31 +999,51 @@ static void mdss_dsi_8996_phy_power_off( int ln; void __iomem *base; u32 data; + struct mdss_panel_info *panel_info; + + if (ctrl) { + panel_info = &((ctrl->panel_data).panel_info); + } else { + pr_warn("%s: null ctrl pdata\n", __func__); + return; + } /* Turn off PLL power */ data = MIPI_INP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0); MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0, data & ~BIT(7)); - /* 4 lanes + clk lane configuration */ - for (ln = 0; ln < 5; ln++) { - base = ctrl->phy_io.base + - DATALANE_OFFSET_FROM_BASE_8996; - base += (ln * DATALANE_SIZE_8996); /* lane base */ - + /* data lanes configuration */ + base = ctrl->phy_io.base + DATALANE_OFFSET_FROM_BASE_8996; + for (ln = 0; ln < MDSS_DSI_NUM_DATA_LANES; ln++) { /* turn off phy ldo */ - MIPI_OUTP(base + DSIPHY_LANE_VREG_CNTRL, 0x1c); + MIPI_OUTP(base + DSIPHY_LANE_VREG_BASE, 0x1c); + base += DATALANE_SIZE_8996; /* next lane */ } - MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_LDO_CNTRL, 0x1c); - /* 4 lanes + clk lane configuration */ - for (ln = 0; ln < 5; ln++) { - base = ctrl->phy_io.base + - DATALANE_OFFSET_FROM_BASE_8996; - base += (ln * DATALANE_SIZE_8996); /* lane base */ + /* clk lane configuration */ + base = ctrl->phy_io.base + CLKLANE_OFFSET_FROM_BASE_8996; + /* turn off phy ldo */ + MIPI_OUTP(base + DSIPHY_LANE_VREG_BASE, 0x1c); + if (panel_info->split_link_enabled) + MIPI_OUTP(base + CLKLANE_SIZE_8996 + + DSIPHY_LANE_VREG_BASE, 0x1c); + + MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_LDO_CNTRL, 0x1c); + /* data lanes configuration */ + base = ctrl->phy_io.base + DATALANE_OFFSET_FROM_BASE_8996; + for (ln = 0; ln < MDSS_DSI_NUM_DATA_LANES; ln++) { MIPI_OUTP(base + DSIPHY_LANE_STRENGTH_CTRL_1, 0x0); + base += DATALANE_SIZE_8996; /* next lane */ } + /* clk lane configuration */ + base = ctrl->phy_io.base + CLKLANE_OFFSET_FROM_BASE_8996; + MIPI_OUTP(base + DSIPHY_LANE_STRENGTH_CTRL_1, 0x0); + if (panel_info->split_link_enabled) + MIPI_OUTP(base + CLKLANE_SIZE_8996 + + DSIPHY_LANE_STRENGTH_CTRL_1, 0x0); + wmb(); /* make sure registers committed */ } @@ -1008,22 +1079,46 @@ static void mdss_dsi_8996_phy_power_on( struct mdss_dsi_phy_ctrl *pd; char *ip; u32 data; + struct mdss_panel_info *panel_info; - pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db); + if (ctrl) { + panel_info = &((ctrl->panel_data).panel_info); + } else { + pr_warn("%s: null ctrl pdata\n", __func__); + return; + } - /* 4 lanes + clk lane configuration */ - for (ln = 0; ln < 5; ln++) { - base = ctrl->phy_io.base + - DATALANE_OFFSET_FROM_BASE_8996; - base += (ln * DATALANE_SIZE_8996); /* lane base */ + pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db); - /* strength, 2 * 5 */ - cnt = 2; + /* data lanes configuration */ + base = ctrl->phy_io.base + DATALANE_OFFSET_FROM_BASE_8996; + for (ln = 0; ln < MDSS_DSI_NUM_DATA_LANES; ln++) { + /* strength, 2 * MDSS_DSI_NUM_DATA_LANES */ + cnt = DSIPHY_LANE_STRENGTH_CTRL_NUM; ln_off = cnt * ln; ip = &pd->strength[ln_off]; - off = 0x38; - for (j = 0; j < cnt; j++, off += 4) + off = DSIPHY_LANE_STRENGTH_CTRL_BASE; + for (j = 0; j < cnt; j++, + off += DSIPHY_LANE_STRENGTH_CTRL_OFFSET) MIPI_OUTP(base + off, *ip++); + base += DATALANE_SIZE_8996; /* next lane */ + } + + /* + * clk lane configuration for strength ctrl + * for split link there are two clock lanes, one + * clock lane per sublink needs to be configured + */ + base = ctrl->phy_io.base + CLKLANE_OFFSET_FROM_BASE_8996; + cnt = DSIPHY_LANE_STRENGTH_CTRL_NUM; + ln_off = MDSS_DSI_NUM_DATA_LANES; + ip = &pd->strength[ln_off]; + off = DSIPHY_LANE_STRENGTH_CTRL_BASE; + for (j = 0; j < cnt; j++, + off += DSIPHY_LANE_STRENGTH_CTRL_OFFSET) { + MIPI_OUTP(base + off, *ip); + if (panel_info->split_link_enabled) + MIPI_OUTP(base + CLKLANE_SIZE_8996 + off, *ip); } mdss_dsi_8996_phy_regulator_enable(ctrl); @@ -1051,67 +1146,126 @@ static void mdss_dsi_8996_phy_config(struct mdss_dsi_ctrl_pdata *ctrl) int j, off, ln, cnt, ln_off; char *ip; void __iomem *base; + struct mdss_panel_info *panel_info; + int num_of_lanes = 0; + + if (ctrl) { + panel_info = &((ctrl->panel_data).panel_info); + } else { + pr_warn("%s: null ctrl pdata\n", __func__); + return; + } pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db); + num_of_lanes = MDSS_DSI_NUM_DATA_LANES + MDSS_DSI_NUM_CLK_LANES; MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_LDO_CNTRL, 0x1c); /* clk_en */ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_GLBL_TEST_CTRL, 0x1); - if (pd->lanecfg_len != 20) { + if (pd->lanecfg_len != (num_of_lanes * DSIPHY_LANE_CFG_NUM)) { pr_err("%s: wrong lane cfg\n", __func__); return; } - if (pd->strength_len != 10) { + if (pd->strength_len != (num_of_lanes * + DSIPHY_LANE_STRENGTH_CTRL_NUM)) { pr_err("%s: wrong strength ctrl\n", __func__); return; } - if (pd->regulator_len != 5) { + if (pd->regulator_len != (num_of_lanes * DSIPHY_LANE_VREG_NUM)) { pr_err("%s: wrong regulator setting\n", __func__); return; } - /* 4 lanes + clk lane configuration */ - for (ln = 0; ln < 5; ln++) { - /* - * data lane offset frome base: 0x100 - * data lane size: 0x80 - */ - base = ctrl->phy_io.base + - DATALANE_OFFSET_FROM_BASE_8996; - base += (ln * DATALANE_SIZE_8996); /* lane base */ - - /* lane cfg, 4 * 5 */ - cnt = 4; + /* data lanes configuration */ + base = ctrl->phy_io.base + DATALANE_OFFSET_FROM_BASE_8996; + for (ln = 0; ln < MDSS_DSI_NUM_DATA_LANES; ln++) { + /* lane cfg, 4 * MDSS_DSI_NUM_DATA_LANES */ + cnt = DSIPHY_LANE_CFG_NUM; + off = DSIPHY_LANE_CFG_BASE; ln_off = cnt * ln; ip = &pd->lanecfg[ln_off]; - off = 0x0; for (j = 0; j < cnt; j++) { MIPI_OUTP(base + off, *ip++); - off += 4; + off += DSIPHY_LANE_CFG_OFFSET; } /* test str */ - MIPI_OUTP(base + 0x14, 0x0088); /* fixed */ + MIPI_OUTP(base + DSIPHY_LANE_TEST_STR, 0x88); /* fixed */ - /* phy timing, 8 * 5 */ - cnt = 8; + /* phy timing, 8 * MDSS_DSI_NUM_DATA_LANES */ + cnt = DSIPHY_LANE_TIMING_CTRL_NUM; + off = DSIPHY_LANE_TIMING_CTRL_BASE; ln_off = cnt * ln; ip = &pd->timing_8996[ln_off]; - off = 0x18; - for (j = 0; j < cnt; j++, off += 4) + for (j = 0; j < cnt; j++) { MIPI_OUTP(base + off, *ip++); + off += DSIPHY_LANE_TIMING_CTRL_OFFSET; + } - /* strength, 2 * 5 */ - cnt = 2; + /* strength, 2 * MDSS_DSI_NUM_DATA_LANES */ + cnt = DSIPHY_LANE_STRENGTH_CTRL_NUM; + off = DSIPHY_LANE_STRENGTH_CTRL_BASE; ln_off = cnt * ln; ip = &pd->strength[ln_off]; - off = 0x38; - for (j = 0; j < cnt; j++, off += 4) + for (j = 0; j < cnt; j++) { MIPI_OUTP(base + off, *ip++); + off += DSIPHY_LANE_STRENGTH_CTRL_OFFSET; + } + + base += DATALANE_SIZE_8996; /* next lane */ + } + + /* + * clk lane configuration + * for split link there are two clock lanes, one + * clock lane per sublink needs to be configured + */ + base = ctrl->phy_io.base + CLKLANE_OFFSET_FROM_BASE_8996; + cnt = DSIPHY_LANE_CFG_NUM; + off = DSIPHY_LANE_CFG_BASE; + ln_off = cnt * MDSS_DSI_NUM_DATA_LANES; + ip = &pd->lanecfg[ln_off]; + for (j = 0; j < cnt; j++, *ip++) { + MIPI_OUTP(base + off, *ip); + if (panel_info->split_link_enabled) + MIPI_OUTP(base + CLKLANE_SIZE_8996 + off, *ip); + off += DSIPHY_LANE_CFG_OFFSET; + } + + /* test str */ + MIPI_OUTP(base + DSIPHY_LANE_TEST_STR, 0x88); /* fixed */ + if (panel_info->split_link_enabled) + MIPI_OUTP(base + CLKLANE_SIZE_8996 + off, 0x88); + + cnt = DSIPHY_LANE_TIMING_CTRL_NUM; + off = DSIPHY_LANE_TIMING_CTRL_BASE; + ln_off = cnt * MDSS_DSI_NUM_DATA_LANES; + ip = &pd->timing_8996[ln_off]; + for (j = 0; j < cnt; j++, *ip++) { + MIPI_OUTP(base + off, *ip); + if (panel_info->split_link_enabled) + MIPI_OUTP(base + CLKLANE_SIZE_8996 + off, *ip); + off += DSIPHY_LANE_TIMING_CTRL_OFFSET; + } + + /* + * clk lane configuration for timing + * for split link there are two clock lanes, one + * clock lane per sublink needs to be configured + */ + cnt = DSIPHY_LANE_STRENGTH_CTRL_NUM; + off = DSIPHY_LANE_STRENGTH_CTRL_BASE; + ln_off = cnt * MDSS_DSI_NUM_DATA_LANES; + ip = &pd->strength[ln_off]; + for (j = 0; j < cnt; j++, *ip++) { + MIPI_OUTP(base + off, *ip); + if (panel_info->split_link_enabled) + MIPI_OUTP(base + CLKLANE_SIZE_8996 + off, *ip); + off += DSIPHY_LANE_STRENGTH_CTRL_OFFSET; } wmb(); /* make sure registers committed */ @@ -1665,6 +1819,9 @@ int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info, u32 dsi_pclk_rate; u8 lanes = 0, bpp; + if (!panel_info) + return -EINVAL; + if (panel_info->mipi.data_lane3) lanes += 1; if (panel_info->mipi.data_lane2) @@ -1690,6 +1847,8 @@ int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info, } h_period = mdss_panel_get_htotal(panel_info, true); + if (panel_info->split_link_enabled) + h_period *= panel_info->mipi.num_of_sublinks; v_period = mdss_panel_get_vtotal(panel_info); if (ctrl_pdata->refresh_clk_rate || is_diff_frame_rate(panel_info, @@ -1710,7 +1869,12 @@ int mdss_dsi_clk_div_config(struct mdss_panel_info *panel_info, clk_rate = panel_info->clk_rate; do_div(clk_rate, 8 * bpp); - dsi_pclk_rate = (u32) clk_rate * lanes; + + if (panel_info->split_link_enabled) + dsi_pclk_rate = (u32) clk_rate * + panel_info->mipi.lanes_per_sublink; + else + dsi_pclk_rate = (u32) clk_rate * lanes; if ((dsi_pclk_rate < 3300000) || (dsi_pclk_rate > 250000000)) dsi_pclk_rate = 35000000; @@ -2320,6 +2484,32 @@ int mdss_dsi_pre_clkoff_cb(void *priv, return rc; } +static void mdss_dsi_split_link_clk_cfg(struct mdss_dsi_ctrl_pdata *ctrl, + int enable) +{ + struct mdss_panel_data *pdata = NULL; + void __iomem *base; + u32 data = 0; + + if (ctrl) + pdata = &ctrl->panel_data; + else { + pr_err("%s: ctrl pdata is NULL\n", __func__); + return; + } + + /* + * for split link there are two clock lanes, and + * both clock lanes needs to be enabled + */ + if (pdata->panel_info.split_link_enabled) { + base = ctrl->phy_io.base; + data = MIPI_INP(base + MDSS_DSI_DSIPHY_CMN_CLK_CFG1); + data |= (enable << DSIPHY_CMN_CLK_CFG1_SPLIT_LINK); + MIPI_OUTP(base + MDSS_DSI_DSIPHY_CMN_CLK_CFG1, data); + } +} + int mdss_dsi_post_clkon_cb(void *priv, enum mdss_dsi_clk_type clk, enum mdss_dsi_clk_state curr_state) @@ -2393,6 +2583,9 @@ int mdss_dsi_post_clkon_cb(void *priv, } if (pdata->panel_info.mipi.force_clk_lane_hs) mdss_dsi_cfg_lane_ctrl(ctrl, BIT(28), 1); + + /* enable split link for cmn clk cfg1 */ + mdss_dsi_split_link_clk_cfg(ctrl, 1); } error: return rc; diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index 162689227a23..1cf907ecded4 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -533,6 +533,10 @@ hdmi_picture_aspect_get_name(enum hdmi_picture_aspect picture_aspect) return "4:3"; case HDMI_PICTURE_ASPECT_16_9: return "16:9"; + case HDMI_PICTURE_ASPECT_64_27: + return "64:27"; + case HDMI_PICTURE_ASPECT_256_135: + return "256:135"; case HDMI_PICTURE_ASPECT_RESERVED: return "Reserved"; } |
