summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/android/binder.c170
-rw-r--r--drivers/iio/adc/qcom-rradc.c317
-rw-r--r--drivers/irqchip/irq-gic-v3.c3
-rw-r--r--drivers/mfd/wcd934x-regmap.c9
-rw-r--r--drivers/misc/hdcp.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c18
-rw-r--r--drivers/platform/msm/ipa/ipa_api.h2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_client.c114
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c22
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h8
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c10
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c1
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c10
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c22
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c22
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c1
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c10
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/power/qcom-charger/qpnp-qnovo.c15
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c158
-rw-r--r--drivers/power/qcom-charger/smb-lib.c121
-rw-r--r--drivers/power/qcom-charger/smb-lib.h22
-rw-r--r--drivers/power/qcom-charger/smb-reg.h3
-rw-r--r--drivers/soc/qcom/Kconfig9
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/irq-helper.c179
-rw-r--r--drivers/soc/qcom/socinfo.c9
-rw-r--r--drivers/spmi/spmi-pmic-arb.c87
-rw-r--r--drivers/thermal/msm-tsens.c60
-rw-r--r--drivers/thermal/thermal_core.c42
-rw-r--r--drivers/tty/serial/msm_serial_hs.c261
-rw-r--r--drivers/usb/gadget/function/f_cdev.c10
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c1
39 files changed, 1303 insertions, 442 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 4c67945ef36f..20d17906fc9b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -379,6 +379,7 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
struct files_struct *files = proc->files;
unsigned long rlim_cur;
unsigned long irqs;
+ int ret;
if (files == NULL)
return -ESRCH;
@@ -389,7 +390,11 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
unlock_task_sighand(proc->tsk, &irqs);
- return __alloc_fd(files, 0, rlim_cur, flags);
+ preempt_enable_no_resched();
+ ret = __alloc_fd(files, 0, rlim_cur, flags);
+ preempt_disable();
+
+ return ret;
}
/*
@@ -398,8 +403,11 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
static void task_fd_install(
struct binder_proc *proc, unsigned int fd, struct file *file)
{
- if (proc->files)
+ if (proc->files) {
+ preempt_enable_no_resched();
__fd_install(proc->files, fd, file);
+ preempt_disable();
+ }
}
/*
@@ -427,6 +435,7 @@ static inline void binder_lock(const char *tag)
{
trace_binder_lock(tag);
mutex_lock(&binder_main_lock);
+ preempt_disable();
trace_binder_locked(tag);
}
@@ -434,8 +443,62 @@ static inline void binder_unlock(const char *tag)
{
trace_binder_unlock(tag);
mutex_unlock(&binder_main_lock);
+ preempt_enable();
+}
+
+static inline void *kzalloc_preempt_disabled(size_t size)
+{
+ void *ptr;
+
+ ptr = kzalloc(size, GFP_NOWAIT);
+ if (ptr)
+ return ptr;
+
+ preempt_enable_no_resched();
+ ptr = kzalloc(size, GFP_KERNEL);
+ preempt_disable();
+
+ return ptr;
+}
+
+static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
+{
+ long ret;
+
+ preempt_enable_no_resched();
+ ret = copy_to_user(to, from, n);
+ preempt_disable();
+ return ret;
+}
+
+static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
+{
+ long ret;
+
+ preempt_enable_no_resched();
+ ret = copy_from_user(to, from, n);
+ preempt_disable();
+ return ret;
}
+#define get_user_preempt_disabled(x, ptr) \
+({ \
+ int __ret; \
+ preempt_enable_no_resched(); \
+ __ret = get_user(x, ptr); \
+ preempt_disable(); \
+ __ret; \
+})
+
+#define put_user_preempt_disabled(x, ptr) \
+({ \
+ int __ret; \
+ preempt_enable_no_resched(); \
+ __ret = put_user(x, ptr); \
+ preempt_disable(); \
+ __ret; \
+})
+
static void binder_set_nice(long nice)
{
long min_nice;
@@ -568,6 +631,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
else
mm = get_task_mm(proc->tsk);
+ preempt_enable_no_resched();
+
if (mm) {
down_write(&mm->mmap_sem);
vma = proc->vma;
@@ -622,6 +687,9 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
up_write(&mm->mmap_sem);
mmput(mm);
}
+
+ preempt_disable();
+
return 0;
free_range:
@@ -644,6 +712,9 @@ err_no_vma:
up_write(&mm->mmap_sem);
mmput(mm);
}
+
+ preempt_disable();
+
return -ENOMEM;
}
@@ -903,7 +974,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
return NULL;
}
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc_preempt_disabled(sizeof(*node));
if (node == NULL)
return NULL;
binder_stats_created(BINDER_STAT_NODE);
@@ -1040,7 +1111,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
else
return ref;
}
- new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ new_ref = kzalloc_preempt_disabled(sizeof(*ref));
if (new_ref == NULL)
return NULL;
binder_stats_created(BINDER_STAT_REF);
@@ -1438,14 +1509,14 @@ static void binder_transaction(struct binder_proc *proc,
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
- t = kzalloc(sizeof(*t), GFP_KERNEL);
+ t = kzalloc_preempt_disabled(sizeof(*t));
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
- tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+ tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
@@ -1502,14 +1573,14 @@ static void binder_transaction(struct binder_proc *proc,
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
- if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+ if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
- if (copy_from_user(offp, (const void __user *)(uintptr_t)
+ if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
@@ -1778,7 +1849,7 @@ static int binder_thread_write(struct binder_proc *proc,
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
- if (get_user(cmd, (uint32_t __user *)ptr))
+ if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
@@ -1796,7 +1867,7 @@ static int binder_thread_write(struct binder_proc *proc,
struct binder_ref *ref;
const char *debug_string;
- if (get_user(target, (uint32_t __user *)ptr))
+ if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (target == 0 && binder_context_mgr_node &&
@@ -1846,10 +1917,10 @@ static int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t cookie;
struct binder_node *node;
- if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
+ if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
node = binder_get_node(proc, node_ptr);
@@ -1907,7 +1978,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
- if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
+ if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
@@ -1949,7 +2020,7 @@ static int binder_thread_write(struct binder_proc *proc,
case BC_REPLY: {
struct binder_transaction_data tr;
- if (copy_from_user(&tr, ptr, sizeof(tr)))
+ if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
@@ -1999,10 +2070,10 @@ static int binder_thread_write(struct binder_proc *proc,
struct binder_ref *ref;
struct binder_ref_death *death;
- if (get_user(target, (uint32_t __user *)ptr))
+ if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
ref = binder_get_ref(proc, target);
@@ -2031,7 +2102,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid);
break;
}
- death = kzalloc(sizeof(*death), GFP_KERNEL);
+ death = kzalloc_preempt_disabled(sizeof(*death));
if (death == NULL) {
thread->return_error = BR_ERROR;
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2085,8 +2156,7 @@ static int binder_thread_write(struct binder_proc *proc,
struct binder_work *w;
binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
-
- if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(cookie);
@@ -2118,7 +2188,8 @@ static int binder_thread_write(struct binder_proc *proc,
wake_up_interruptible(&proc->wait);
}
}
- } break;
+ }
+ break;
default:
pr_err("%d:%d unknown command %d\n",
@@ -2167,7 +2238,7 @@ static int binder_thread_read(struct binder_proc *proc,
int wait_for_proc_work;
if (*consumed == 0) {
- if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
@@ -2178,7 +2249,7 @@ retry:
if (thread->return_error != BR_OK && ptr < end) {
if (thread->return_error2 != BR_OK) {
- if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, thread->return_error2);
@@ -2186,7 +2257,7 @@ retry:
goto done;
thread->return_error2 = BR_OK;
}
- if (put_user(thread->return_error, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, thread->return_error);
@@ -2264,7 +2335,7 @@ retry:
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
@@ -2306,14 +2377,14 @@ retry:
node->has_weak_ref = 0;
}
if (cmd != BR_NOOP) {
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(node->ptr,
+ if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *)
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- if (put_user(node->cookie,
+ if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *)
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
@@ -2357,11 +2428,10 @@ retry:
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (put_user(death->cookie,
- (binder_uintptr_t __user *)ptr))
+ if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
binder_stat_br(proc, thread, cmd);
@@ -2428,10 +2498,10 @@ retry:
ALIGN(t->buffer->data_size,
sizeof(void *));
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr)))
+ if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
@@ -2473,7 +2543,7 @@ done:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
- if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+ if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *) buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
}
@@ -2548,7 +2618,7 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
break;
}
if (*p == NULL) {
- thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ thread = kzalloc_preempt_disabled(sizeof(*thread));
if (thread == NULL)
return NULL;
binder_stats_created(BINDER_STAT_THREAD);
@@ -2652,7 +2722,7 @@ static int binder_ioctl_write_read(struct file *filp,
ret = -EINVAL;
goto out;
}
- if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+ if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@@ -2670,7 +2740,7 @@ static int binder_ioctl_write_read(struct file *filp,
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@@ -2684,7 +2754,7 @@ static int binder_ioctl_write_read(struct file *filp,
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@@ -2694,7 +2764,7 @@ static int binder_ioctl_write_read(struct file *filp,
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
- if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+ if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@@ -2772,7 +2842,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
break;
case BINDER_SET_MAX_THREADS:
- if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
@@ -2795,9 +2865,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = -EINVAL;
goto err;
}
- if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
- &ver->protocol_version)) {
- ret = -EINVAL;
+ if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) {
+ ret = -EINVAL;
goto err;
}
break;
@@ -2858,6 +2927,7 @@ static const struct vm_operations_struct binder_vm_ops = {
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
+
struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
@@ -2918,7 +2988,11 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+ /* binder_update_page_range assumes preemption is disabled */
+ preempt_disable();
+ ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
+ preempt_enable_no_resched();
+ if (ret) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
@@ -3188,8 +3262,12 @@ static void binder_deferred_func(struct work_struct *work)
int defer;
do {
- binder_lock(__func__);
+ trace_binder_lock(__func__);
+ mutex_lock(&binder_main_lock);
+ trace_binder_locked(__func__);
+
mutex_lock(&binder_deferred_lock);
+ preempt_disable();
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
struct binder_proc, deferred_work_node);
@@ -3215,7 +3293,9 @@ static void binder_deferred_func(struct work_struct *work)
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
- binder_unlock(__func__);
+ trace_binder_unlock(__func__);
+ mutex_unlock(&binder_main_lock);
+ preempt_enable_no_resched();
if (files)
put_files_struct(files);
} while (proc);
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index dea0448c365c..ae2df4f7ff0d 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -149,12 +149,12 @@
#define FG_ADC_RR_TEMP_FS_VOLTAGE_NUM 5000000
#define FG_ADC_RR_TEMP_FS_VOLTAGE_DEN 3
#define FG_ADC_RR_DIE_TEMP_OFFSET 600000
-#define FG_ADC_RR_DIE_TEMP_SLOPE 2000
-#define FG_ADC_RR_DIE_TEMP_OFFSET_DEGC 25
+#define FG_ADC_RR_DIE_TEMP_SLOPE 2
+#define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC 25000
#define FG_ADC_RR_CHG_TEMP_OFFSET 1288000
-#define FG_ADC_RR_CHG_TEMP_SLOPE 4000
-#define FG_ADC_RR_CHG_TEMP_OFFSET_DEGC 27
+#define FG_ADC_RR_CHG_TEMP_SLOPE 4
+#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 27000
#define FG_ADC_RR_VOLT_INPUT_FACTOR 8
#define FG_ADC_RR_CURR_INPUT_FACTOR 2
@@ -162,6 +162,9 @@
#define FG_ADC_KELVINMIL_CELSIUSMIL 273150
#define FG_ADC_RR_GPIO_FS_RANGE 5000
+#define FG_RR_ADC_COHERENT_CHECK_RETRY 5
+#define FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN 16
+#define FG_RR_ADC_STS_CHANNEL_READING_MASK 0x3
/*
* The channel number is not a physical index in hardware,
@@ -171,21 +174,20 @@
* the RR ADC before RR_ADC_MAX.
*/
enum rradc_channel_id {
- RR_ADC_BATT_ID_5 = 0,
- RR_ADC_BATT_ID_15,
- RR_ADC_BATT_ID_150,
- RR_ADC_BATT_ID,
+ RR_ADC_BATT_ID = 0,
RR_ADC_BATT_THERM,
RR_ADC_SKIN_TEMP,
- RR_ADC_USBIN_V,
RR_ADC_USBIN_I,
- RR_ADC_DCIN_V,
+ RR_ADC_USBIN_V,
RR_ADC_DCIN_I,
+ RR_ADC_DCIN_V,
RR_ADC_DIE_TEMP,
RR_ADC_CHG_TEMP,
RR_ADC_GPIO,
- RR_ADC_ATEST,
- RR_ADC_TM_ADC,
+ RR_ADC_CHG_HOT_TEMP,
+ RR_ADC_CHG_TOO_HOT_TEMP,
+ RR_ADC_SKIN_HOT_TEMP,
+ RR_ADC_SKIN_TOO_HOT_TEMP,
RR_ADC_MAX
};
@@ -205,51 +207,75 @@ struct rradc_channels {
long info_mask;
u8 lsb;
u8 msb;
+ u8 sts;
int (*scale)(struct rradc_chip *chip, struct rradc_chan_prop *prop,
u16 adc_code, int *result);
};
struct rradc_chan_prop {
enum rradc_channel_id channel;
+ uint32_t channel_data;
int (*scale)(struct rradc_chip *chip, struct rradc_chan_prop *prop,
u16 adc_code, int *result);
};
static int rradc_read(struct rradc_chip *rr_adc, u16 offset, u8 *data, int len)
{
- int rc = 0;
+ int rc = 0, retry_cnt = 0, i = 0;
+ u8 data_check[FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN];
+ bool coherent_err = false;
+
+ if (len > FG_RR_ADC_MAX_CONTINUOUS_BUFFER_LEN) {
+ pr_err("Increase the buffer length\n");
+ return -EINVAL;
+ }
+
+ while (retry_cnt < FG_RR_ADC_COHERENT_CHECK_RETRY) {
+ rc = regmap_bulk_read(rr_adc->regmap, rr_adc->base + offset,
+ data, len);
+ if (rc < 0) {
+ pr_err("rr_adc reg 0x%x failed :%d\n", offset, rc);
+ return rc;
+ }
+
+ rc = regmap_bulk_read(rr_adc->regmap, rr_adc->base + offset,
+ data_check, len);
+ if (rc < 0) {
+ pr_err("rr_adc reg 0x%x failed :%d\n", offset, rc);
+ return rc;
+ }
+
+ for (i = 0; i < len; i++) {
+ if (data[i] != data_check[i])
+ coherent_err = true;
+ }
+
+ if (coherent_err) {
+ retry_cnt++;
+ coherent_err = false;
+ pr_debug("retry_cnt:%d\n", retry_cnt);
+ } else {
+ break;
+ }
+ }
- rc = regmap_bulk_read(rr_adc->regmap, rr_adc->base + offset, data, len);
- if (rc < 0)
- pr_err("rr adc read reg %d failed with %d\n", offset, rc);
+ if (retry_cnt == FG_RR_ADC_COHERENT_CHECK_RETRY)
+ pr_err("Retry exceeded for coherrency check\n");
return rc;
}
static int rradc_post_process_batt_id(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
- int *result_mohms)
+ int *result_ohms)
{
uint32_t current_value;
int64_t r_id;
- switch (prop->channel) {
- case RR_ADC_BATT_ID_5:
- current_value = FG_ADC_RR_BATT_ID_5_MA;
- break;
- case RR_ADC_BATT_ID_15:
- current_value = FG_ADC_RR_BATT_ID_15_MA;
- break;
- case RR_ADC_BATT_ID_150:
- current_value = FG_ADC_RR_BATT_ID_150_MA;
- break;
- default:
- return -EINVAL;
- }
-
+ current_value = prop->channel_data;
r_id = ((int64_t)adc_code * FG_ADC_RR_FS_VOLTAGE_MV);
r_id = div64_s64(r_id, (FG_MAX_ADC_READINGS * current_value));
- *result_mohms = (r_id * FG_ADC_SCALE_MILLI_FACTOR);
+ *result_ohms = (r_id * FG_ADC_SCALE_MILLI_FACTOR);
return 0;
}
@@ -270,30 +296,30 @@ static int rradc_post_process_therm(struct rradc_chip *chip,
static int rradc_post_process_volt(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
- int *result_mv)
+ int *result_uv)
{
- int64_t mv = 0;
+ int64_t uv = 0;
/* 8x input attenuation; 2.5V ADC full scale */
- mv = ((int64_t)adc_code * FG_ADC_RR_VOLT_INPUT_FACTOR);
- mv *= FG_ADC_RR_FS_VOLTAGE_MV;
- mv = div64_s64(mv, FG_MAX_ADC_READINGS);
- *result_mv = mv;
+ uv = ((int64_t)adc_code * FG_ADC_RR_VOLT_INPUT_FACTOR);
+ uv *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR);
+ uv = div64_s64(uv, FG_MAX_ADC_READINGS);
+ *result_uv = uv;
return 0;
}
static int rradc_post_process_curr(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
- int *result_ma)
+ int *result_ua)
{
- int64_t ma = 0;
+ int64_t ua = 0;
/* 0.5 V/A; 2.5V ADC full scale */
- ma = ((int64_t)adc_code * FG_ADC_RR_CURR_INPUT_FACTOR);
- ma *= FG_ADC_RR_FS_VOLTAGE_MV;
- ma = div64_s64(ma, FG_MAX_ADC_READINGS);
- *result_ma = ma;
+ ua = ((int64_t)adc_code * FG_ADC_RR_CURR_INPUT_FACTOR);
+ ua *= (FG_ADC_RR_FS_VOLTAGE_MV * FG_ADC_SCALE_MILLI_FACTOR);
+ ua = div64_s64(ua, FG_MAX_ADC_READINGS);
+ *result_ua = ua;
return 0;
}
@@ -309,8 +335,41 @@ static int rradc_post_process_die_temp(struct rradc_chip *chip,
FG_MAX_ADC_READINGS));
temp -= FG_ADC_RR_DIE_TEMP_OFFSET;
temp = div64_s64(temp, FG_ADC_RR_DIE_TEMP_SLOPE);
- temp += FG_ADC_RR_DIE_TEMP_OFFSET_DEGC;
- *result_millidegc = (temp * FG_ADC_SCALE_MILLI_FACTOR);
+ temp += FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = temp;
+
+ return 0;
+}
+
+static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip,
+ struct rradc_chan_prop *prop, u16 adc_code,
+ int *result_millidegc)
+{
+ int64_t temp = 0;
+
+ temp = (int64_t) adc_code * 4;
+ temp = temp * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
+ temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+ FG_MAX_ADC_READINGS));
+ temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
+ temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
+ temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = temp;
+
+ return 0;
+}
+
+static int rradc_post_process_skin_temp_hot(struct rradc_chip *chip,
+ struct rradc_chan_prop *prop, u16 adc_code,
+ int *result_millidegc)
+{
+ int64_t temp = 0;
+
+ temp = (int64_t) adc_code;
+ temp = div64_s64(temp, 2);
+ temp = temp - 30;
+ temp *= FG_ADC_SCALE_MILLI_FACTOR;
+ *result_millidegc = temp;
return 0;
}
@@ -326,8 +385,8 @@ static int rradc_post_process_chg_temp(struct rradc_chip *chip,
FG_MAX_ADC_READINGS));
temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
- temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_DEGC;
- *result_millidegc = (temp * FG_ADC_SCALE_MILLI_FACTOR);
+ temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = temp;
return 0;
}
@@ -346,63 +405,80 @@ static int rradc_post_process_gpio(struct rradc_chip *chip,
return 0;
}
-#define RR_ADC_CHAN(_dname, _type, _mask, _scale, _lsb, _msb) \
+#define RR_ADC_CHAN(_dname, _type, _mask, _scale, _lsb, _msb, _sts) \
{ \
- .datasheet_name = __stringify(_dname), \
+ .datasheet_name = (_dname), \
.type = _type, \
.info_mask = _mask, \
.scale = _scale, \
.lsb = _lsb, \
.msb = _msb, \
+ .sts = _sts, \
}, \
-#define RR_ADC_CHAN_TEMP(_dname, _scale, _lsb, _msb) \
+#define RR_ADC_CHAN_TEMP(_dname, _scale, _lsb, _msb, _sts) \
RR_ADC_CHAN(_dname, IIO_TEMP, \
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED), \
- _scale, _lsb, _msb) \
+ _scale, _lsb, _msb, _sts) \
-#define RR_ADC_CHAN_VOLT(_dname, _scale, _lsb, _msb) \
+#define RR_ADC_CHAN_VOLT(_dname, _scale, _lsb, _msb, _sts) \
RR_ADC_CHAN(_dname, IIO_VOLTAGE, \
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
- _scale, _lsb, _msb) \
+ _scale, _lsb, _msb, _sts) \
-#define RR_ADC_CHAN_CURRENT(_dname, _scale, _lsb, _msb) \
+#define RR_ADC_CHAN_CURRENT(_dname, _scale, _lsb, _msb, _sts) \
RR_ADC_CHAN(_dname, IIO_CURRENT, \
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
- _scale, _lsb, _msb) \
+ _scale, _lsb, _msb, _sts) \
-#define RR_ADC_CHAN_RESISTANCE(_dname, _scale, _lsb, _msb) \
+#define RR_ADC_CHAN_RESISTANCE(_dname, _scale, _lsb, _msb, _sts) \
RR_ADC_CHAN(_dname, IIO_RESISTANCE, \
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_PROCESSED),\
- _scale, _lsb, _msb) \
+ _scale, _lsb, _msb, _sts) \
static const struct rradc_channels rradc_chans[] = {
- RR_ADC_CHAN_RESISTANCE("batt_id_5", rradc_post_process_batt_id,
- FG_ADC_RR_BATT_ID_5_LSB, FG_ADC_RR_BATT_ID_5_MSB)
- RR_ADC_CHAN_RESISTANCE("batt_id_15", rradc_post_process_batt_id,
- FG_ADC_RR_BATT_ID_15_LSB, FG_ADC_RR_BATT_ID_15_MSB)
- RR_ADC_CHAN_RESISTANCE("batt_id_150", rradc_post_process_batt_id,
- FG_ADC_RR_BATT_ID_150_LSB, FG_ADC_RR_BATT_ID_150_MSB)
RR_ADC_CHAN_RESISTANCE("batt_id", rradc_post_process_batt_id,
- FG_ADC_RR_BATT_ID_5_LSB, FG_ADC_RR_BATT_ID_5_MSB)
+ FG_ADC_RR_BATT_ID_5_LSB, FG_ADC_RR_BATT_ID_5_MSB,
+ FG_ADC_RR_BATT_ID_STS)
RR_ADC_CHAN_TEMP("batt_therm", &rradc_post_process_therm,
- FG_ADC_RR_BATT_THERM_LSB, FG_ADC_RR_BATT_THERM_MSB)
+ FG_ADC_RR_BATT_THERM_LSB, FG_ADC_RR_BATT_THERM_MSB,
+ FG_ADC_RR_BATT_THERM_STS)
RR_ADC_CHAN_TEMP("skin_temp", &rradc_post_process_therm,
- FG_ADC_RR_SKIN_TEMP_LSB, FG_ADC_RR_SKIN_TEMP_MSB)
+ FG_ADC_RR_SKIN_TEMP_LSB, FG_ADC_RR_SKIN_TEMP_MSB,
+ FG_ADC_RR_AUX_THERM_STS)
RR_ADC_CHAN_CURRENT("usbin_i", &rradc_post_process_curr,
- FG_ADC_RR_USB_IN_V_LSB, FG_ADC_RR_USB_IN_V_MSB)
+ FG_ADC_RR_USB_IN_I_LSB, FG_ADC_RR_USB_IN_I_MSB,
+ FG_ADC_RR_USB_IN_I_STS)
RR_ADC_CHAN_VOLT("usbin_v", &rradc_post_process_volt,
- FG_ADC_RR_USB_IN_I_LSB, FG_ADC_RR_USB_IN_I_MSB)
+ FG_ADC_RR_USB_IN_V_LSB, FG_ADC_RR_USB_IN_V_MSB,
+ FG_ADC_RR_USB_IN_V_STS)
RR_ADC_CHAN_CURRENT("dcin_i", &rradc_post_process_curr,
- FG_ADC_RR_DC_IN_V_LSB, FG_ADC_RR_DC_IN_V_MSB)
+ FG_ADC_RR_DC_IN_I_LSB, FG_ADC_RR_DC_IN_I_MSB,
+ FG_ADC_RR_DC_IN_I_STS)
RR_ADC_CHAN_VOLT("dcin_v", &rradc_post_process_volt,
- FG_ADC_RR_DC_IN_I_LSB, FG_ADC_RR_DC_IN_I_MSB)
+ FG_ADC_RR_DC_IN_V_LSB, FG_ADC_RR_DC_IN_V_MSB,
+ FG_ADC_RR_DC_IN_V_STS)
RR_ADC_CHAN_TEMP("die_temp", &rradc_post_process_die_temp,
- FG_ADC_RR_PMI_DIE_TEMP_LSB, FG_ADC_RR_PMI_DIE_TEMP_MSB)
+ FG_ADC_RR_PMI_DIE_TEMP_LSB, FG_ADC_RR_PMI_DIE_TEMP_MSB,
+ FG_ADC_RR_PMI_DIE_TEMP_STS)
RR_ADC_CHAN_TEMP("chg_temp", &rradc_post_process_chg_temp,
- FG_ADC_RR_CHARGER_TEMP_LSB, FG_ADC_RR_CHARGER_TEMP_MSB)
+ FG_ADC_RR_CHARGER_TEMP_LSB, FG_ADC_RR_CHARGER_TEMP_MSB,
+ FG_ADC_RR_CHARGER_TEMP_STS)
RR_ADC_CHAN_VOLT("gpio", &rradc_post_process_gpio,
- FG_ADC_RR_GPIO_LSB, FG_ADC_RR_GPIO_MSB)
+ FG_ADC_RR_GPIO_LSB, FG_ADC_RR_GPIO_MSB,
+ FG_ADC_RR_GPIO_STS)
+ RR_ADC_CHAN_TEMP("chg_temp_hot", &rradc_post_process_chg_temp_hot,
+ FG_ADC_RR_CHARGER_HOT, FG_ADC_RR_CHARGER_HOT,
+ FG_ADC_RR_CHARGER_TEMP_STS)
+ RR_ADC_CHAN_TEMP("chg_temp_too_hot", &rradc_post_process_chg_temp_hot,
+ FG_ADC_RR_CHARGER_TOO_HOT, FG_ADC_RR_CHARGER_TOO_HOT,
+ FG_ADC_RR_CHARGER_TEMP_STS)
+ RR_ADC_CHAN_TEMP("skin_temp_hot", &rradc_post_process_skin_temp_hot,
+ FG_ADC_RR_SKIN_HOT, FG_ADC_RR_SKIN_HOT,
+ FG_ADC_RR_AUX_THERM_STS)
+ RR_ADC_CHAN_TEMP("skin_temp_too_hot", &rradc_post_process_skin_temp_hot,
+ FG_ADC_RR_SKIN_TOO_HOT, FG_ADC_RR_SKIN_TOO_HOT,
+ FG_ADC_RR_AUX_THERM_STS)
};
static int rradc_do_conversion(struct rradc_chip *chip,
@@ -411,15 +487,44 @@ static int rradc_do_conversion(struct rradc_chip *chip,
int rc = 0, bytes_to_read = 0;
u8 buf[6];
u16 offset = 0, batt_id_5 = 0, batt_id_15 = 0, batt_id_150 = 0;
+ u16 status = 0;
mutex_lock(&chip->lock);
+ if ((prop->channel != RR_ADC_BATT_ID) &&
+ (prop->channel != RR_ADC_CHG_HOT_TEMP) &&
+ (prop->channel != RR_ADC_CHG_TOO_HOT_TEMP) &&
+ (prop->channel != RR_ADC_SKIN_HOT_TEMP) &&
+ (prop->channel != RR_ADC_SKIN_TOO_HOT_TEMP)) {
+ /* BATT_ID STS bit does not get set initially */
+ status = rradc_chans[prop->channel].sts;
+ rc = rradc_read(chip, status, buf, 1);
+ if (rc < 0) {
+ pr_err("status read failed:%d\n", rc);
+ goto fail;
+ }
+
+ buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK;
+ if (buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) {
+ pr_warn("%s is not ready; nothing to read\n",
+ rradc_chans[prop->channel].datasheet_name);
+ rc = -ENODATA;
+ goto fail;
+ }
+ }
+
offset = rradc_chans[prop->channel].lsb;
if (prop->channel == RR_ADC_BATT_ID)
bytes_to_read = 6;
+ else if ((prop->channel == RR_ADC_CHG_HOT_TEMP) ||
+ (prop->channel == RR_ADC_CHG_TOO_HOT_TEMP) ||
+ (prop->channel == RR_ADC_SKIN_HOT_TEMP) ||
+ (prop->channel == RR_ADC_SKIN_TOO_HOT_TEMP))
+ bytes_to_read = 1;
else
bytes_to_read = 2;
+ buf[0] = 0;
rc = rradc_read(chip, offset, buf, bytes_to_read);
if (rc) {
pr_err("read data failed\n");
@@ -427,19 +532,33 @@ static int rradc_do_conversion(struct rradc_chip *chip,
}
if (prop->channel == RR_ADC_BATT_ID) {
- batt_id_150 = (buf[4] << 8) | buf[5];
- batt_id_15 = (buf[2] << 8) | buf[3];
- batt_id_5 = (buf[0] << 8) | buf[1];
+ batt_id_150 = (buf[5] << 8) | buf[4];
+ batt_id_15 = (buf[3] << 8) | buf[2];
+ batt_id_5 = (buf[1] << 8) | buf[0];
+ if ((!batt_id_150) && (!batt_id_15) && (!batt_id_5)) {
+ pr_err("Invalid batt_id values with all zeros\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
if (batt_id_150 <= FG_ADC_RR_BATT_ID_RANGE) {
pr_debug("Batt_id_150 is chosen\n");
*data = batt_id_150;
+ prop->channel_data = FG_ADC_RR_BATT_ID_150_MA;
} else if (batt_id_15 <= FG_ADC_RR_BATT_ID_RANGE) {
pr_debug("Batt_id_15 is chosen\n");
*data = batt_id_15;
+ prop->channel_data = FG_ADC_RR_BATT_ID_15_MA;
} else {
pr_debug("Batt_id_5 is chosen\n");
*data = batt_id_5;
+ prop->channel_data = FG_ADC_RR_BATT_ID_5_MA;
}
+ } else if ((prop->channel == RR_ADC_CHG_HOT_TEMP) ||
+ (prop->channel == RR_ADC_CHG_TOO_HOT_TEMP) ||
+ (prop->channel == RR_ADC_SKIN_HOT_TEMP) ||
+ (prop->channel == RR_ADC_SKIN_TOO_HOT_TEMP)) {
+ *data = buf[0];
} else {
*data = (buf[1] << 8) | buf[0];
}
@@ -458,6 +577,11 @@ static int rradc_read_raw(struct iio_dev *indio_dev,
u16 adc_code;
int rc = 0;
+ if (chan->address >= RR_ADC_MAX) {
+ pr_err("Invalid channel index:%ld\n", chan->address);
+ return -EINVAL;
+ }
+
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
prop = &chip->chan_props[chan->address];
@@ -477,10 +601,6 @@ static int rradc_read_raw(struct iio_dev *indio_dev,
*val = (int) adc_code;
return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- *val = 0;
- *val2 = 1000;
- return IIO_VAL_INT_PLUS_MICRO;
default:
rc = -EINVAL;
break;
@@ -498,15 +618,11 @@ static int rradc_get_dt_data(struct rradc_chip *chip, struct device_node *node)
{
const struct rradc_channels *rradc_chan;
struct iio_chan_spec *iio_chan;
- struct device_node *child;
- unsigned int index = 0, chan, base;
+ unsigned int i = 0, base;
int rc = 0;
struct rradc_chan_prop prop;
- chip->nchannels = of_get_available_child_count(node);
- if (!chip->nchannels || (chip->nchannels >= RR_ADC_MAX))
- return -EINVAL;
-
+ chip->nchannels = RR_ADC_MAX;
chip->iio_chans = devm_kcalloc(chip->dev, chip->nchannels,
sizeof(*chip->iio_chans), GFP_KERNEL);
if (!chip->iio_chans)
@@ -529,30 +645,21 @@ static int rradc_get_dt_data(struct rradc_chip *chip, struct device_node *node)
chip->base = base;
iio_chan = chip->iio_chans;
- for_each_available_child_of_node(node, child) {
- rc = of_property_read_u32(child, "channel", &chan);
- if (rc) {
- dev_err(chip->dev, "invalid channel number %d\n", chan);
- return rc;
- }
-
- if (chan > RR_ADC_MAX || chan < RR_ADC_BATT_ID_5) {
- dev_err(chip->dev, "invalid channel number %d\n", chan);
- return -EINVAL;
- }
-
- prop.channel = chan;
- prop.scale = rradc_chans[chan].scale;
- chip->chan_props[index] = prop;
+ for (i = 0; i < RR_ADC_MAX; i++) {
+ prop.channel = i;
+ prop.scale = rradc_chans[i].scale;
+ /* Private channel data used for selecting batt_id */
+ prop.channel_data = 0;
+ chip->chan_props[i] = prop;
- rradc_chan = &rradc_chans[chan];
+ rradc_chan = &rradc_chans[i];
iio_chan->channel = prop.channel;
iio_chan->datasheet_name = rradc_chan->datasheet_name;
+ iio_chan->extend_name = rradc_chan->datasheet_name;
iio_chan->info_mask_separate = rradc_chan->info_mask;
iio_chan->type = rradc_chan->type;
- iio_chan->indexed = 1;
- iio_chan->address = index++;
+ iio_chan->address = i;
iio_chan++;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 022473473971..190d294197a7 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -588,9 +588,6 @@ static int gic_populate_rdist(void)
u64 offset = ptr - gic_data.redist_regions[i].redist_base;
gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
- pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
- smp_processor_id(), mpidr, i,
- &gic_data_rdist()->phys_base);
return 0;
}
diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c
index 9a09f87d8472..398f0086537a 100644
--- a/drivers/mfd/wcd934x-regmap.c
+++ b/drivers/mfd/wcd934x-regmap.c
@@ -1848,6 +1848,15 @@ static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
if (reg_tbl && reg_tbl[reg_offset] == WCD934X_READ)
return true;
+ /*
+ * Need to mark volatile for registers that are writable but
+ * only few bits are read-only
+ */
+ switch (reg) {
+ case WCD934X_CPE_SS_SOC_SW_COLLAPSE_CTL:
+ return true;
+ }
+
return false;
}
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
index 7d1b9e9121a9..0c6f1de2465b 100644
--- a/drivers/misc/hdcp.c
+++ b/drivers/misc/hdcp.c
@@ -456,6 +456,7 @@ struct hdcp_lib_handle {
atomic_t hdcp_off;
uint32_t session_id;
bool legacy_app;
+ enum hdcp_device_type device_type;
struct task_struct *thread;
struct completion topo_wait;
@@ -901,7 +902,7 @@ static int hdcp_lib_session_init(struct hdcp_lib_handle *handle)
req_buf =
(struct hdcp_lib_session_init_req *)handle->qseecom_handle->sbuf;
req_buf->commandid = HDCP_SESSION_INIT;
- req_buf->deviceid = HDCP_TXMTR_HDMI;
+ req_buf->deviceid = handle->device_type;
rsp_buf = (struct hdcp_lib_session_init_rsp *)
(handle->qseecom_handle->sbuf +
QSEECOM_ALIGN(sizeof(struct hdcp_lib_session_init_req)));
@@ -2060,6 +2061,7 @@ int hdcp_library_register(struct hdcp_register_data *data)
handle->tethered = data->tethered;
handle->hdcp_app_init = NULL;
handle->hdcp_txmtr_init = NULL;
+ handle->device_type = data->device_type;
pr_debug("tethered %d\n", handle->tethered);
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 208a4ce1e40e..7a9307294a6d 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -369,6 +369,24 @@ int ipa_reset_endpoint(u32 clnt_hdl)
}
EXPORT_SYMBOL(ipa_reset_endpoint);
+/**
+* ipa_disable_endpoint() - Disable an endpoint from IPA perspective
+* @clnt_hdl: [in] IPA client handle
+*
+* Returns: 0 on success, negative on failure
+*
+* Note: Should not be called from atomic context
+*/
+int ipa_disable_endpoint(u32 clnt_hdl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_disable_endpoint, clnt_hdl);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_disable_endpoint);
+
/**
* ipa_cfg_ep - IPA end-point configuration
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 862bdc475025..3c2471dd11dd 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -26,6 +26,8 @@ struct ipa_api_controller {
int (*ipa_clear_endpoint_delay)(u32 clnt_hdl);
+ int (*ipa_disable_endpoint)(u32 clnt_hdl);
+
int (*ipa_cfg_ep)(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
int (*ipa_cfg_ep_nat)(u32 clnt_hdl,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
index 64246ac4eec0..66e329a03df7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
@@ -560,22 +560,30 @@ int ipa2_disconnect(u32 clnt_hdl)
if (!ep->keep_ipa_awake)
IPA_ACTIVE_CLIENTS_INC_EP(client_type);
- /* Set Disconnect in Progress flag. */
- spin_lock(&ipa_ctx->disconnect_lock);
- ep->disconnect_in_progress = true;
- spin_unlock(&ipa_ctx->disconnect_lock);
-
- /* Notify uc to stop monitoring holb on USB BAM Producer pipe. */
- if (IPA_CLIENT_IS_USB_CONS(ep->client)) {
- ipa_uc_monitor_holb(ep->client, false);
- IPADBG("Disabling holb monitor for client: %d\n", ep->client);
- }
+ /* For USB 2.0 controller, first the ep will be disabled.
+ * so this sequence is not needed again when disconnecting the pipe.
+ */
+ if (!ep->ep_disabled) {
+ /* Set Disconnect in Progress flag. */
+ spin_lock(&ipa_ctx->disconnect_lock);
+ ep->disconnect_in_progress = true;
+ spin_unlock(&ipa_ctx->disconnect_lock);
+
+ /* Notify uc to stop monitoring holb on USB BAM
+ * Producer pipe.
+ */
+ if (IPA_CLIENT_IS_USB_CONS(ep->client)) {
+ ipa_uc_monitor_holb(ep->client, false);
+ IPADBG("Disabling holb monitor for client: %d\n",
+ ep->client);
+ }
- result = ipa_disable_data_path(clnt_hdl);
- if (result) {
- IPAERR("disable data path failed res=%d clnt=%d.\n", result,
- clnt_hdl);
- return -EPERM;
+ result = ipa_disable_data_path(clnt_hdl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n",
+ result, clnt_hdl);
+ return -EPERM;
+ }
}
result = sps_disconnect(ep->ep_hdl);
@@ -784,6 +792,82 @@ int ipa2_clear_endpoint_delay(u32 clnt_hdl)
}
/**
+ * ipa2_disable_endpoint() - low-level IPA client disable endpoint
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to
+ * disable the pipe from IPA in BAM-BAM mode.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa2_disable_endpoint(u32 clnt_hdl)
+{
+ int result;
+ struct ipa_ep_context *ep;
+ enum ipa_client_type client_type;
+ unsigned long bam;
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm.\n");
+ return -EINVAL;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+ client_type = ipa2_get_client_mapping(clnt_hdl);
+ IPA_ACTIVE_CLIENTS_INC_EP(client_type);
+
+ /* Set Disconnect in Progress flag. */
+ spin_lock(&ipa_ctx->disconnect_lock);
+ ep->disconnect_in_progress = true;
+ spin_unlock(&ipa_ctx->disconnect_lock);
+
+ /* Notify uc to stop monitoring holb on USB BAM Producer pipe. */
+ if (IPA_CLIENT_IS_USB_CONS(ep->client)) {
+ ipa_uc_monitor_holb(ep->client, false);
+ IPADBG("Disabling holb monitor for client: %d\n", ep->client);
+ }
+
+ result = ipa_disable_data_path(clnt_hdl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ clnt_hdl);
+ goto fail;
+ }
+
+ if (IPA_CLIENT_IS_CONS(ep->client))
+ bam = ep->connect.source;
+ else
+ bam = ep->connect.destination;
+
+ result = sps_pipe_reset(bam, clnt_hdl);
+ if (result) {
+ IPAERR("SPS pipe reset failed.\n");
+ goto fail;
+ }
+
+ ep->ep_disabled = true;
+
+ IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+
+ IPADBG("client (ep: %d) disabled\n", clnt_hdl);
+
+ return 0;
+
+fail:
+ IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+ return -EPERM;
+}
+
+
+/**
* ipa_sps_connect_safe() - connect endpoint from BAM prespective
* @h: [in] sps pipe handle
* @connect: [in] sps connect parameters
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 50caeb9a19ee..0bb863037772 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -3169,7 +3169,7 @@ static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
IPA_GENERIC_RX_BUFF_SZ(
ipa_adjust_ra_buff_base_sz(
in->ipa_ep_cfg.aggr.
- aggr_byte_limit));
+ aggr_byte_limit - IPA_HEADROOM));
in->ipa_ep_cfg.aggr.
aggr_byte_limit =
sys->rx_buff_sz < in->
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 790a0b41147e..62e026262663 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -581,7 +581,8 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
bad_len:
- hdr_entry->ref_cnt--;
+ if (add_ref_hdr)
+ hdr_entry->ref_cnt--;
entry->cookie = 0;
kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, entry);
return -EPERM;
@@ -761,7 +762,7 @@ static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl, bool release_hdr)
}
if (release_hdr)
- __ipa_release_hdr(entry->hdr->id);
+ __ipa_del_hdr(entry->hdr->id);
/* move the offset entry to appropriate free list */
list_move(&entry->offset_entry->link,
@@ -1089,12 +1090,19 @@ int ipa2_reset_hdr(void)
&ipa_ctx->hdr_tbl.head_hdr_entry_list, link) {
/* do not remove the default header */
- if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME))
+ if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+ if (entry->is_hdr_proc_ctx) {
+ mutex_unlock(&ipa_ctx->lock);
+ WARN_ON(1);
+ IPAERR("default header is proc ctx\n");
+ return -EFAULT;
+ }
continue;
+ }
if (ipa_id_find(entry->id) == NULL) {
- WARN_ON(1);
mutex_unlock(&ipa_ctx->lock);
+ WARN_ON(1);
return -EFAULT;
}
if (entry->is_hdr_proc_ctx) {
@@ -1147,8 +1155,8 @@ int ipa2_reset_hdr(void)
link) {
if (ipa_id_find(ctx_entry->id) == NULL) {
- WARN_ON(1);
mutex_unlock(&ipa_ctx->lock);
+ WARN_ON(1);
return -EFAULT;
}
list_del(&ctx_entry->link);
@@ -1311,8 +1319,8 @@ int ipa2_put_hdr(u32 hdr_hdl)
goto bail;
}
- if (entry == NULL || entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("invalid header entry\n");
result = -EINVAL;
goto bail;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 0a0b23815ce3..6a5b779b24f8 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -242,7 +242,7 @@ struct ipa_rt_tbl {
* @is_partial: flag indicating if header table entry is partial
* @is_hdr_proc_ctx: false - hdr entry resides in hdr table,
* true - hdr entry resides in DDR and pointed to by proc ctx
- * @phys_base: physical address of entry in SRAM when is_hdr_proc_ctx is true,
+ * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true,
* else 0
* @proc_ctx: processing context header
* @offset_entry: entry's offset
@@ -553,6 +553,7 @@ struct ipa_ep_context {
bool switch_to_intr;
int inactive_cycles;
u32 eot_in_poll_err;
+ bool ep_disabled;
/* sys MUST be the last element of this struct */
struct ipa_sys_context *sys;
@@ -1431,6 +1432,11 @@ int ipa2_reset_endpoint(u32 clnt_hdl);
int ipa2_clear_endpoint_delay(u32 clnt_hdl);
/*
+ * Disable ep
+ */
+int ipa2_disable_endpoint(u32 clnt_hdl);
+
+/*
* Configuration
*/
int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
index 7c10c4cee150..e8f25c9c23d3 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
@@ -276,8 +276,6 @@ fail_ep_exists:
*/
int ipa2_disconnect_mhi_pipe(u32 clnt_hdl)
{
- struct ipa_ep_context *ep;
-
IPA_MHI_FUNC_ENTRY();
if (clnt_hdl >= ipa_ctx->ipa_num_pipes) {
@@ -290,7 +288,8 @@ int ipa2_disconnect_mhi_pipe(u32 clnt_hdl)
return -EINVAL;
}
- ep->valid = 0;
+ ipa_ctx->ep[clnt_hdl].valid = 0;
+
ipa_delete_dflt_flt_rules(clnt_hdl);
IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -302,14 +301,13 @@ int ipa2_mhi_resume_channels_internal(enum ipa_client_type client,
bool LPTransitionRejected, bool brstmode_enabled,
union __packed gsi_channel_scratch ch_scratch, u8 index)
{
- int i;
int res;
IPA_MHI_FUNC_ENTRY();
res = ipa_uc_mhi_resume_channel(index, LPTransitionRejected);
if (res) {
- IPA_MHI_ERR("failed to suspend channel %d error %d\n",
- i, res);
+ IPA_MHI_ERR("failed to suspend channel %u error %d\n",
+ index, res);
return res;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index 9d4704ded0c3..15476f38cf44 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1008,6 +1008,10 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
return 0;
ipa_insert_failed:
+ if (entry->hdr)
+ entry->hdr->ref_cnt--;
+ else if (entry->proc_ctx)
+ entry->proc_ctx->ref_cnt--;
list_del(&entry->link);
kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
error:
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 69052eb289bb..1d88082352c6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -4949,6 +4949,7 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_disconnect = ipa2_disconnect;
api_ctrl->ipa_reset_endpoint = ipa2_reset_endpoint;
api_ctrl->ipa_clear_endpoint_delay = ipa2_clear_endpoint_delay;
+ api_ctrl->ipa_disable_endpoint = ipa2_disable_endpoint;
api_ctrl->ipa_cfg_ep = ipa2_cfg_ep;
api_ctrl->ipa_cfg_ep_nat = ipa2_cfg_ep_nat;
api_ctrl->ipa_cfg_ep_hdr = ipa2_cfg_ep_hdr;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 50e820992f29..2420dd78b4c0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1144,14 +1144,16 @@ static void apps_ipa_tx_complete_notify(void *priv,
struct net_device *dev = (struct net_device *)priv;
struct wwan_private *wwan_ptr;
- if (evt != IPA_WRITE_DONE) {
- IPAWANDBG("unsupported event on Tx callback\n");
+ if (dev != ipa_netdevs[0]) {
+ IPAWANDBG("Received pre-SSR packet completion\n");
+ dev_kfree_skb_any(skb);
return;
}
- if (dev != ipa_netdevs[0]) {
- IPAWANDBG("Received pre-SSR packet completion\n");
+ if (evt != IPA_WRITE_DONE) {
+ IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
return;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 3dd9738f67c7..33066e8b9c19 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1899,44 +1899,43 @@ static int ipa3_q6_clean_q6_tables(void)
if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
- goto bail_desc;
+ return -EFAULT;
}
/* Flush rules cache */
desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
if (!desc) {
IPAERR("failed to allocate memory\n");
- retval = -ENOMEM;
- goto bail_dma;
+ return -ENOMEM;
}
flush.v4_flt = true;
@@ -1953,6 +1952,7 @@ static int ipa3_q6_clean_q6_tables(void)
&reg_write_cmd, false);
if (!cmd_pyld) {
IPAERR("fail construct register_write imm cmd\n");
+ retval = -EFAULT;
goto bail_desc;
}
desc->opcode =
@@ -1969,9 +1969,9 @@ static int ipa3_q6_clean_q6_tables(void)
}
ipahal_destroy_imm_cmd(cmd_pyld);
+
bail_desc:
kfree(desc);
-bail_dma:
IPADBG("Done - retval = %d\n", retval);
return retval;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 029647213531..11da023c9d6a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -418,7 +418,8 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
bad_len:
- hdr_entry->ref_cnt--;
+ if (add_ref_hdr)
+ hdr_entry->ref_cnt--;
entry->cookie = 0;
kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
return -EPERM;
@@ -589,7 +590,7 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, bool release_hdr)
}
if (release_hdr)
- __ipa3_release_hdr(entry->hdr->id);
+ __ipa3_del_hdr(entry->hdr->id);
/* move the offset entry to appropriate free list */
list_move(&entry->offset_entry->link,
@@ -893,12 +894,19 @@ int ipa3_reset_hdr(void)
&ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
/* do not remove the default header */
- if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME))
+ if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+ if (entry->is_hdr_proc_ctx) {
+ IPAERR("default header is proc ctx\n");
+ mutex_unlock(&ipa3_ctx->lock);
+ WARN_ON(1);
+ return -EFAULT;
+ }
continue;
+ }
if (ipa3_id_find(entry->id) == NULL) {
- WARN_ON(1);
mutex_unlock(&ipa3_ctx->lock);
+ WARN_ON(1);
return -EFAULT;
}
if (entry->is_hdr_proc_ctx) {
@@ -951,8 +959,8 @@ int ipa3_reset_hdr(void)
link) {
if (ipa3_id_find(ctx_entry->id) == NULL) {
- WARN_ON(1);
mutex_unlock(&ipa3_ctx->lock);
+ WARN_ON(1);
return -EFAULT;
}
list_del(&ctx_entry->link);
@@ -1115,8 +1123,8 @@ int ipa3_put_hdr(u32 hdr_hdl)
goto bail;
}
- if (entry == NULL || entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_COOKIE) {
+ IPAERR("invalid header entry\n");
result = -EINVAL;
goto bail;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index cce05cf31b3c..97a3117d44e9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -271,7 +271,7 @@ struct ipa3_rt_tbl {
* @is_partial: flag indicating if header table entry is partial
* @is_hdr_proc_ctx: false - hdr entry resides in hdr table,
* true - hdr entry resides in DDR and pointed to by proc ctx
- * @phys_base: physical address of entry in SRAM when is_hdr_proc_ctx is true,
+ * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true,
* else 0
* @proc_ctx: processing context header
* @offset_entry: entry's offset
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 14e2f1f4c510..e83c249ad425 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -549,11 +549,6 @@ int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
return res;
}
}
- if (res) {
- IPA_MHI_ERR("failed to resume channel error %d\n",
- res);
- return res;
- }
res = gsi_start_channel(ep->gsi_chan_hdl);
if (res) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 138db3dbde84..b06e33a8258a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -957,6 +957,10 @@ static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
return 0;
ipa_insert_failed:
+ if (entry->hdr)
+ entry->hdr->ref_cnt--;
+ else if (entry->proc_ctx)
+ entry->proc_ctx->ref_cnt--;
idr_remove(&tbl->rule_ids, entry->rule_id);
list_del(&entry->link);
kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 2c2708c4e2f3..5499eba92b1c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -3046,6 +3046,7 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_disconnect = ipa3_disconnect;
api_ctrl->ipa_reset_endpoint = ipa3_reset_endpoint;
api_ctrl->ipa_clear_endpoint_delay = ipa3_clear_endpoint_delay;
+ api_ctrl->ipa_disable_endpoint = NULL;
api_ctrl->ipa_cfg_ep = ipa3_cfg_ep;
api_ctrl->ipa_cfg_ep_nat = ipa3_cfg_ep_nat;
api_ctrl->ipa_cfg_ep_hdr = ipa3_cfg_ep_hdr;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 4f6097c6da35..6c4d14b093c3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -1215,7 +1215,10 @@ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
(!phys_base && !hdr_base_addr) ||
!hdr_base_addr ||
((is_hdr_proc_ctx == false) && !offset_entry)) {
- IPAHAL_ERR("failed on validating params");
+ IPAHAL_ERR(
+ "invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n"
+ , hdr_len, &phys_base, hdr_base_addr
+ , is_hdr_proc_ctx, offset_entry);
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index a4eab02cb571..aebdaab3ac77 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1157,14 +1157,16 @@ static void apps_ipa_tx_complete_notify(void *priv,
struct net_device *dev = (struct net_device *)priv;
struct ipa3_wwan_private *wwan_ptr;
- if (evt != IPA_WRITE_DONE) {
- IPAWANDBG("unsupported event on Tx callback\n");
+ if (dev != IPA_NETDEV()) {
+ IPAWANDBG("Received pre-SSR packet completion\n");
+ dev_kfree_skb_any(skb);
return;
}
- if (dev != IPA_NETDEV()) {
- IPAWANDBG("Received pre-SSR packet completion\n");
+ if (evt != IPA_WRITE_DONE) {
+ IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
return;
}
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 61f611296ad6..a45a5d103040 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -265,6 +265,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(typec_power_role),
POWER_SUPPLY_ATTR(pd_allowed),
POWER_SUPPLY_ATTR(pd_active),
+ POWER_SUPPLY_ATTR(charger_temp),
+ POWER_SUPPLY_ATTR(charger_temp_max),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/qcom-charger/qpnp-qnovo.c b/drivers/power/qcom-charger/qpnp-qnovo.c
index d50188a5efbf..2418b112d670 100644
--- a/drivers/power/qcom-charger/qpnp-qnovo.c
+++ b/drivers/power/qcom-charger/qpnp-qnovo.c
@@ -153,7 +153,7 @@ struct qnovo {
struct work_struct status_change_work;
int fv_uV_request;
int fcc_uA_request;
- struct votable *fcc_votable;
+ struct votable *fcc_max_votable;
struct votable *fv_votable;
};
@@ -243,8 +243,9 @@ static int qnovo_disable_cb(struct votable *votable, void *data, int disable,
vote(chip->fv_votable, QNOVO_VOTER, false, 0);
}
if (chip->fcc_uA_request != -EINVAL) {
- if (chip->fcc_votable)
- vote(chip->fcc_votable, QNOVO_VOTER, false, 0);
+ if (chip->fcc_max_votable)
+ vote(chip->fcc_max_votable, QNOVO_VOTER,
+ false, 0);
}
}
@@ -265,10 +266,10 @@ static int qnovo_disable_cb(struct votable *votable, void *data, int disable,
true, chip->fv_uV_request);
}
if (chip->fcc_uA_request != -EINVAL) {
- if (!chip->fcc_votable)
- chip->fcc_votable = find_votable("FCC");
- if (chip->fcc_votable)
- vote(chip->fcc_votable, QNOVO_VOTER,
+ if (!chip->fcc_max_votable)
+ chip->fcc_max_votable = find_votable("FCC_MAX");
+ if (chip->fcc_max_votable)
+ vote(chip->fcc_max_votable, QNOVO_VOTER,
true, chip->fcc_uA_request);
}
}
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index a3a4591f05ed..08e64973d588 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -146,7 +146,7 @@ static int smb2_parse_dt(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
struct device_node *node = chg->dev->of_node;
- int rc;
+ int rc, byte_len;
if (!node) {
pr_err("device tree node missing\n");
@@ -181,6 +181,25 @@ static int smb2_parse_dt(struct smb2 *chip)
if (rc < 0)
chip->dt.wipower_max_uw = SMB2_DEFAULT_WPWR_UW;
+ if (of_find_property(node, "qcom,thermal-mitigation", &byte_len)) {
+ chg->thermal_mitigation = devm_kzalloc(chg->dev, byte_len,
+ GFP_KERNEL);
+
+ if (chg->thermal_mitigation == NULL)
+ return -ENOMEM;
+
+ chg->thermal_levels = byte_len / sizeof(u32);
+ rc = of_property_read_u32_array(node,
+ "qcom,thermal-mitigation",
+ chg->thermal_mitigation,
+ chg->thermal_levels);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't read threm limits rc = %d\n", rc);
+ return rc;
+ }
+ }
+
return 0;
}
@@ -350,6 +369,105 @@ static int smb2_init_usb_psy(struct smb2 *chip)
}
/*************************
+ * DC PSY REGISTRATION *
+ *************************/
+
+static enum power_supply_property smb2_dc_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static int smb2_dc_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smb2 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ rc = smblib_get_prop_dc_present(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ rc = smblib_get_prop_dc_online(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_get_prop_dc_current_max(chg, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smb2_dc_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smb2 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = smblib_set_prop_dc_current_max(chg, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smb2_dc_prop_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int rc;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+
+ return rc;
+}
+
+static const struct power_supply_desc dc_psy_desc = {
+ .name = "dc",
+ .type = POWER_SUPPLY_TYPE_WIPOWER,
+ .properties = smb2_dc_props,
+ .num_properties = ARRAY_SIZE(smb2_dc_props),
+ .get_property = smb2_dc_get_prop,
+ .set_property = smb2_dc_set_prop,
+ .property_is_writeable = smb2_dc_prop_is_writeable,
+};
+
+static int smb2_init_dc_psy(struct smb2 *chip)
+{
+ struct power_supply_config dc_cfg = {};
+ struct smb_charger *chg = &chip->chg;
+
+ dc_cfg.drv_data = chip;
+ dc_cfg.of_node = chg->dev->of_node;
+ chg->dc_psy = devm_power_supply_register(chg->dev,
+ &dc_psy_desc,
+ &dc_cfg);
+ if (IS_ERR(chg->dc_psy)) {
+ pr_err("Couldn't register USB power supply\n");
+ return PTR_ERR(chg->dc_psy);
+ }
+
+ return 0;
+}
+
+/*************************
* BATT PSY REGISTRATION *
*************************/
@@ -360,6 +478,7 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
};
static int smb2_batt_get_prop(struct power_supply *psy,
@@ -387,9 +506,11 @@ static int smb2_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
smblib_get_prop_batt_capacity(chg, val);
break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ smblib_get_prop_system_temp_level(chg, val);
+ break;
default:
- pr_err("batt power supply prop %d not supported\n",
- psp);
+ pr_err("batt power supply prop %d not supported\n", psp);
return -EINVAL;
}
@@ -400,17 +521,21 @@ static int smb2_batt_set_prop(struct power_supply *psy,
enum power_supply_property prop,
const union power_supply_propval *val)
{
+ int rc = 0;
struct smb_charger *chg = power_supply_get_drvdata(psy);
switch (prop) {
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
- smblib_set_prop_input_suspend(chg, val);
+ rc = smblib_set_prop_input_suspend(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ rc = smblib_set_prop_system_temp_level(chg, val);
break;
default:
- return -EINVAL;
+ rc = -EINVAL;
}
- return 0;
+ return rc;
}
static int smb2_batt_prop_is_writeable(struct power_supply *psy,
@@ -418,6 +543,7 @@ static int smb2_batt_prop_is_writeable(struct power_supply *psy,
{
switch (psp) {
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
return 1;
default:
break;
@@ -608,7 +734,7 @@ static int smb2_init_hw(struct smb2 *chip)
DEFAULT_VOTER, chip->dt.suspend_input, 0);
vote(chg->dc_suspend_votable,
DEFAULT_VOTER, chip->dt.suspend_input, 0);
- vote(chg->fcc_votable,
+ vote(chg->fcc_max_votable,
DEFAULT_VOTER, true, chip->dt.fcc_ua);
vote(chg->fv_votable,
DEFAULT_VOTER, true, chip->dt.fv_uv);
@@ -617,12 +743,16 @@ static int smb2_init_hw(struct smb2 *chip)
vote(chg->dc_icl_votable,
DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
- /* configure charge enable for software control; active high */
+ /*
+ * Configure charge enable for software control; active high, and end
+ * the charge cycle while the battery is OV.
+ */
rc = smblib_masked_write(chg, CHGR_CFG2_REG,
- CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);
+ CHG_EN_POLARITY_BIT |
+ CHG_EN_SRC_BIT |
+ BAT_OV_ECC_BIT, BAT_OV_ECC_BIT);
if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't configure charge enable source rc=%d\n", rc);
+ dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
return rc;
}
@@ -903,6 +1033,12 @@ static int smb2_probe(struct platform_device *pdev)
goto cleanup;
}
+ rc = smb2_init_dc_psy(chip);
+ if (rc < 0) {
+ pr_err("Couldn't initialize dc psy rc=%d\n", rc);
+ goto cleanup;
+ }
+
rc = smb2_init_usb_psy(chip);
if (rc < 0) {
pr_err("Couldn't initialize usb psy rc=%d\n", rc);
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index 43c360d98b69..8fe882e078f0 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -400,6 +400,14 @@ static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
return smblib_set_dc_suspend(chg, suspend);
}
+static int smblib_fcc_max_vote_callback(struct votable *votable, void *data,
+ int fcc_ua, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ return vote(chg->fcc_votable, FCC_MAX_RESULT, true, fcc_ua);
+}
+
static int smblib_fcc_vote_callback(struct votable *votable, void *data,
int fcc_ua, const char *client)
{
@@ -850,6 +858,13 @@ done:
return rc;
}
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = chg->system_temp_level;
+ return 0;
+}
+
/***********************
* BATTERY PSY SETTERS *
***********************/
@@ -877,6 +892,101 @@ int smblib_set_prop_input_suspend(struct smb_charger *chg,
return rc;
}
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ if (val->intval < 0)
+ return -EINVAL;
+
+ if (chg->thermal_levels <= 0)
+ return -EINVAL;
+
+ if (val->intval > chg->thermal_levels)
+ return -EINVAL;
+
+ chg->system_temp_level = val->intval;
+ if (chg->system_temp_level == chg->thermal_levels)
+ return vote(chg->chg_disable_votable, THERMAL_DAEMON, true, 0);
+
+ vote(chg->chg_disable_votable, THERMAL_DAEMON, false, 0);
+ if (chg->system_temp_level == 0)
+ return vote(chg->fcc_votable, THERMAL_DAEMON, false, 0);
+
+ vote(chg->fcc_votable, THERMAL_DAEMON, true,
+ chg->thermal_mitigation[chg->system_temp_level]);
+ return 0;
+}
+
+/*******************
+ * DC PSY GETTERS *
+ *******************/
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc = 0;
+ u8 stat;
+
+ rc = smblib_read(chg, DC_INT_RT_STS_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read DC_INT_RT_STS_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ smblib_dbg(chg, PR_REGISTER, "DC_INT_RT_STS_REG = 0x%02x\n",
+ stat);
+
+ val->intval = (bool)(stat & DCIN_PLUGIN_RT_STS_BIT);
+
+ return rc;
+}
+
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc = 0;
+ u8 stat;
+
+ if (get_client_vote(chg->dc_suspend_votable, USER_VOTER)) {
+ val->intval = false;
+ return rc;
+ }
+
+ rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ rc);
+ return rc;
+ }
+ smblib_dbg(chg, PR_REGISTER, "POWER_PATH_STATUS = 0x%02x\n",
+ stat);
+
+ val->intval = (stat & USE_DCIN_BIT) &&
+ (stat & VALID_INPUT_POWER_SOURCE_BIT);
+
+ return rc;
+}
+
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = get_effective_result_locked(chg->dc_icl_votable);
+ return 0;
+}
+
+/*******************
+ * USB PSY SETTERS *
+ * *****************/
+
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+
+ rc = vote(chg->dc_icl_votable, USER_VOTER, true, val->intval);
+ return rc;
+}
+
/*******************
* USB PSY GETTERS *
*******************/
@@ -1695,7 +1805,15 @@ int smblib_create_votables(struct smb_charger *chg)
return rc;
}
- chg->fcc_votable = create_votable("FCC", VOTE_MAX,
+ chg->fcc_max_votable = create_votable("FCC_MAX", VOTE_MAX,
+ smblib_fcc_max_vote_callback,
+ chg);
+ if (IS_ERR(chg->fcc_max_votable)) {
+ rc = PTR_ERR(chg->fcc_max_votable);
+ return rc;
+ }
+
+ chg->fcc_votable = create_votable("FCC", VOTE_MIN,
smblib_fcc_vote_callback,
chg);
if (IS_ERR(chg->fcc_votable)) {
@@ -1805,6 +1923,7 @@ int smblib_deinit(struct smb_charger *chg)
{
destroy_votable(chg->usb_suspend_votable);
destroy_votable(chg->dc_suspend_votable);
+ destroy_votable(chg->fcc_max_votable);
destroy_votable(chg->fcc_votable);
destroy_votable(chg->fv_votable);
destroy_votable(chg->usb_icl_votable);
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index 06a4428ffd13..1521fdb3fccf 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -31,6 +31,8 @@ enum print_reason {
#define CHG_STATE_VOTER "CHG_STATE_VOTER"
#define TYPEC_SRC_VOTER "TYPEC_SRC_VOTER"
#define TAPER_END_VOTER "TAPER_END_VOTER"
+#define FCC_MAX_RESULT "FCC_MAX_RESULT"
+#define THERMAL_DAEMON "THERMAL_DAEMON"
enum smb_mode {
PARALLEL_MASTER = 0,
@@ -93,6 +95,7 @@ struct smb_charger {
/* power supplies */
struct power_supply *batt_psy;
struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
struct power_supply_desc usb_psy_desc;
/* parallel charging */
@@ -106,6 +109,7 @@ struct smb_charger {
/* votables */
struct votable *usb_suspend_votable;
struct votable *dc_suspend_votable;
+ struct votable *fcc_max_votable;
struct votable *fcc_votable;
struct votable *fv_votable;
struct votable *usb_icl_votable;
@@ -126,6 +130,10 @@ struct smb_charger {
int voltage_max_uv;
bool pd_active;
bool vbus_present;
+
+ int system_temp_level;
+ int thermal_levels;
+ int *thermal_mitigation;
};
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -172,8 +180,22 @@ int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_batt_health(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_system_temp_level(struct smb_charger *chg,
+ union power_supply_propval *val);
+
int smblib_set_prop_input_suspend(struct smb_charger *chg,
const union power_supply_propval *val);
+int smblib_set_prop_system_temp_level(struct smb_charger *chg,
+ const union power_supply_propval *val);
+
+int smblib_get_prop_dc_present(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_dc_online(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_prop_dc_current_max(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_set_prop_dc_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val);
int smblib_get_prop_usb_present(struct smb_charger *chg,
union power_supply_propval *val);
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index 5af01c229f01..b03e8a7e0403 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -641,6 +641,9 @@ enum {
#define WIPWR_RANGE_STATUS_REG (DCIN_BASE + 0x08)
#define WIPWR_RANGE_STATUS_MASK GENMASK(4, 0)
+#define DC_INT_RT_STS_REG (DCIN_BASE + 0x10)
+#define DCIN_PLUGIN_RT_STS_BIT BIT(4)
+
/* DCIN Interrupt Bits */
#define WIPWR_VOLTAGE_RANGE_RT_STS_BIT BIT(7)
#define DCIN_ICL_CHANGE_RT_STS_BIT BIT(6)
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 45dc329a776e..3f8aa534c220 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -374,6 +374,15 @@ config QCOM_WATCHDOG_V2
deadlocks. It does not run during the bootup process, so it will
not catch any early lockups.
+config QCOM_IRQ_HELPER
+ bool "QCOM Irq Helper"
+ help
+ This enables the irq helper module. It exposes two APIs
+ int irq_blacklist_on(void) and int irq_blacklist_off(void)
+ to other kernel module.
+ These two apis will be used to control the black list used
+ by the irq balancer.
+
config QCOM_MEMORY_DUMP
bool "Qualcomm Memory Dump Support"
help
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index adbf2dc7a166..f8450a4868ad 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_QCOM_MEMORY_DUMP_V2) += memory_dump_v2.o
obj-$(CONFIG_QCOM_DCC) += dcc.o
obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
obj-$(CONFIG_QCOM_COMMON_LOG) += common_log.o
+obj-$(CONFIG_QCOM_IRQ_HELPER) += irq-helper.o
obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o
obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o
obj-$(CONFIG_SOC_BUS) += socinfo.o
diff --git a/drivers/soc/qcom/irq-helper.c b/drivers/soc/qcom/irq-helper.c
new file mode 100644
index 000000000000..270a1ba9ba19
--- /dev/null
+++ b/drivers/soc/qcom/irq-helper.c
@@ -0,0 +1,179 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+
+struct irq_helper {
+ bool enable;
+ bool deploy;
+ uint32_t count;
+ struct kobject kobj;
+ /* spinlock to protect reference count variable 'count' */
+ spinlock_t lock;
+};
+
+struct irq_helper_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+ char *buf);
+ size_t (*store)(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count);
+};
+
+#define IRQ_HELPER_ATTR(_name, _mode, _show, _store) \
+ struct irq_helper_attr irq_helper_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+#define to_irq_helper(kobj) \
+ container_of(kobj, struct irq_helper, kobj)
+
+#define to_irq_helper_attr(_attr) \
+ container_of(_attr, struct irq_helper_attr, attr)
+
+static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct irq_helper_attr *irq_attr = to_irq_helper_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (irq_attr->show)
+ ret = irq_attr->show(kobj, attr, buf);
+
+ return ret;
+}
+
+static const struct sysfs_ops irq_helper_sysfs_ops = {
+ .show = attr_show,
+};
+
+static struct kobj_type irq_helper_ktype = {
+ .sysfs_ops = &irq_helper_sysfs_ops,
+};
+
+static ssize_t show_deploy(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct irq_helper *irq = to_irq_helper(kobj);
+
+ return snprintf(buf, sizeof(irq->deploy), "%u\n", irq->deploy);
+}
+IRQ_HELPER_ATTR(irq_blacklist_on, 0444, show_deploy, NULL);
+
+static struct irq_helper *irq_h;
+
+int irq_blacklist_on(void)
+{
+ bool flag = false;
+
+ if (!irq_h) {
+ pr_err("%s: init function is not called", __func__);
+ return -EPERM;
+ }
+ if (!irq_h->enable) {
+ pr_err("%s: enable bit is not set up", __func__);
+ return -EPERM;
+ }
+ spin_lock(&irq_h->lock);
+ irq_h->count++;
+ if (!irq_h->deploy) {
+ irq_h->deploy = true;
+ flag = true;
+ }
+ spin_unlock(&irq_h->lock);
+ if (flag)
+ sysfs_notify(&irq_h->kobj, NULL, "irq_blacklist_on");
+ return 0;
+}
+EXPORT_SYMBOL(irq_blacklist_on);
+
+int irq_blacklist_off(void)
+{
+ bool flag = false;
+
+ if (!irq_h) {
+ pr_err("%s: init function is not called", __func__);
+ return -EPERM;
+ }
+ if (!irq_h->enable) {
+ pr_err("%s: enable bit is not set up", __func__);
+ return -EPERM;
+ }
+ spin_lock(&irq_h->lock);
+ if (irq_h->count == 0) {
+ pr_err("%s: ref-count is 0, cannot call irq blacklist off.",
+ __func__);
+ spin_unlock(&irq_h->lock);
+ return -EPERM;
+ }
+ irq_h->count--;
+ if (irq_h->count == 0) {
+ irq_h->deploy = false;
+ flag = true;
+ }
+ spin_unlock(&irq_h->lock);
+
+ if (flag)
+ sysfs_notify(&irq_h->kobj, NULL, "irq_blacklist_on");
+ return 0;
+}
+EXPORT_SYMBOL(irq_blacklist_off);
+
+static int __init irq_helper_init(void)
+{
+ int ret;
+
+ irq_h = kzalloc(sizeof(struct irq_helper), GFP_KERNEL);
+ if (!irq_h)
+ return -ENOMEM;
+
+ ret = kobject_init_and_add(&irq_h->kobj, &irq_helper_ktype,
+ kernel_kobj, "%s", "irq_helper");
+ if (ret) {
+ pr_err("%s:Error in creation kobject_add\n", __func__);
+ goto out_free_irq;
+ }
+
+ ret = sysfs_create_file(&irq_h->kobj,
+ &irq_helper_irq_blacklist_on.attr);
+ if (ret) {
+ pr_err("%s:Error in sysfs_create_file\n", __func__);
+ goto out_put_kobj;
+ }
+
+ spin_lock_init(&irq_h->lock);
+ irq_h->count = 0;
+ irq_h->enable = true;
+ return 0;
+out_put_kobj:
+ koject_put(&irq_h->kobj);
+out_free_irq:
+ kfree(irq_h);
+ return ret;
+}
+module_init(irq_helper_init);
+
+static void __exit irq_helper_exit(void)
+{
+ sysfs_remove_file(&irq_h->kobj, &irq_helper_irq_blacklist_on.attr);
+ kobject_del(&irq_h->kobj);
+ kobject_put(&irq_h->kobj);
+ kfree(irq_h);
+}
+module_exit(irq_helper_exit);
+MODULE_DESCRIPTION("IRQ Helper APIs");
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 4f29923e054c..b8cef11f4067 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -528,10 +528,11 @@ static struct msm_soc_info cpu_of_id[] = {
[270] = {MSM_CPU_8929, "MSM8229"},
[271] = {MSM_CPU_8929, "APQ8029"},
- /* Cobalt ID */
+ /* Cobalt IDs */
[292] = {MSM_CPU_COBALT, "MSMCOBALT"},
+ [319] = {MSM_CPU_COBALT, "APQCOBALT"},
- /* Cobalt ID */
+ /* Hamster ID */
[306] = {MSM_CPU_HAMSTER, "MSMHAMSTER"},
/* falcon ID */
@@ -1205,6 +1206,10 @@ static void * __init setup_dummy_socinfo(void)
dummy_socinfo.id = 317;
strlcpy(dummy_socinfo.build_id, "msmfalcon - ",
sizeof(dummy_socinfo.build_id));
+ } else if (early_machine_is_apqcobalt()) {
+ dummy_socinfo.id = 319;
+ strlcpy(dummy_socinfo.build_id, "apqcobalt - ",
+ sizeof(dummy_socinfo.build_id));
}
strlcat(dummy_socinfo.build_id, "Dummy socinfo",
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 5e1fd988b22c..b02e48185355 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -97,6 +97,17 @@ enum pmic_arb_cmd_op_code {
/* interrupt enable bit */
#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
+#define HWIRQ(slave_id, periph_id, irq_id, apid) \
+ ((((slave_id) & 0xF) << 28) | \
+ (((periph_id) & 0xFF) << 20) | \
+ (((irq_id) & 0x7) << 16) | \
+ (((apid) & 0x1FF) << 0))
+
+#define HWIRQ_SID(hwirq) (((hwirq) >> 28) & 0xF)
+#define HWIRQ_PER(hwirq) (((hwirq) >> 20) & 0xFF)
+#define HWIRQ_IRQ(hwirq) (((hwirq) >> 16) & 0x7)
+#define HWIRQ_APID(hwirq) (((hwirq) >> 0) & 0x1FF)
+
struct pmic_arb_ver_ops;
struct apid_data {
@@ -172,7 +183,7 @@ struct spmi_pmic_arb {
struct pmic_arb_ver_ops {
const char *ver_str;
int (*ppid_to_apid)(struct spmi_pmic_arb *pa, u8 sid, u16 addr,
- u8 *apid);
+ u16 *apid);
int (*mode)(struct spmi_pmic_arb *dev, u8 sid, u16 addr,
mode_t *mode);
/* spmi commands (read_cmd, write_cmd, cmd) functionality */
@@ -181,10 +192,10 @@ struct pmic_arb_ver_ops {
u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
/* Interrupts controller functionality (offset of PIC registers) */
- u32 (*owner_acc_status)(u8 m, u8 n);
- u32 (*acc_enable)(u8 n);
- u32 (*irq_status)(u8 n);
- u32 (*irq_clear)(u8 n);
+ u32 (*owner_acc_status)(u8 m, u16 n);
+ u32 (*acc_enable)(u16 n);
+ u32 (*irq_status)(u16 n);
+ u32 (*irq_clear)(u16 n);
};
static inline void pmic_arb_base_write(struct spmi_pmic_arb *pa,
@@ -466,8 +477,8 @@ static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
size_t len)
{
struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 sid = d->hwirq >> 24;
- u8 per = d->hwirq >> 16;
+ u8 sid = HWIRQ_SID(d->hwirq);
+ u8 per = HWIRQ_PER(d->hwirq);
if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
(per << 8) + reg, buf, len))
@@ -479,8 +490,8 @@ static void qpnpint_spmi_write(struct irq_data *d, u8 reg, void *buf,
static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
{
struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 sid = d->hwirq >> 24;
- u8 per = d->hwirq >> 16;
+ u8 sid = HWIRQ_SID(d->hwirq);
+ u8 per = HWIRQ_PER(d->hwirq);
if (pmic_arb_read_cmd(pa->spmic, SPMI_CMD_EXT_READL, sid,
(per << 8) + reg, buf, len))
@@ -489,7 +500,7 @@ static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
d->irq);
}
-static void cleanup_irq(struct spmi_pmic_arb *pa, u8 apid, int id)
+static void cleanup_irq(struct spmi_pmic_arb *pa, u16 apid, int id)
{
u16 ppid = pa->apid_data[apid].ppid;
u8 sid = ppid >> 8;
@@ -514,20 +525,19 @@ static void cleanup_irq(struct spmi_pmic_arb *pa, u8 apid, int id)
irq_mask, ppid);
}
-static void periph_interrupt(struct spmi_pmic_arb *pa, u8 apid)
+static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid)
{
unsigned int irq;
u32 status;
int id;
+ u8 sid = (pa->apid_data[apid].ppid >> 8) & 0xF;
+ u8 per = pa->apid_data[apid].ppid & 0xFF;
status = readl_relaxed(pa->intr + pa->ver_ops->irq_status(apid));
while (status) {
id = ffs(status) - 1;
status &= ~BIT(id);
- irq = irq_find_mapping(pa->domain,
- pa->apid_data[apid].ppid << 16
- | id << 8
- | apid);
+ irq = irq_find_mapping(pa->domain, HWIRQ(sid, per, id, apid));
if (irq == 0) {
cleanup_irq(pa, apid, id);
continue;
@@ -568,8 +578,8 @@ static void pmic_arb_chained_irq(struct irq_desc *desc)
static void qpnpint_irq_ack(struct irq_data *d)
{
struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 irq = d->hwirq >> 8;
- u8 apid = d->hwirq;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u16 apid = HWIRQ_APID(d->hwirq);
u8 data;
writel_relaxed(BIT(irq), pa->intr + pa->ver_ops->irq_clear(apid));
@@ -580,7 +590,7 @@ static void qpnpint_irq_ack(struct irq_data *d)
static void qpnpint_irq_mask(struct irq_data *d)
{
- u8 irq = d->hwirq >> 8;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
u8 data = BIT(irq);
qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &data, 1);
@@ -589,8 +599,8 @@ static void qpnpint_irq_mask(struct irq_data *d)
static void qpnpint_irq_unmask(struct irq_data *d)
{
struct spmi_pmic_arb *pa = irq_data_get_irq_chip_data(d);
- u8 irq = d->hwirq >> 8;
- u8 apid = d->hwirq;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u16 apid = HWIRQ_APID(d->hwirq);
u8 buf[2];
writel_relaxed(SPMI_PIC_ACC_ENABLE_BIT,
@@ -612,7 +622,7 @@ static void qpnpint_irq_unmask(struct irq_data *d)
static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct spmi_pmic_arb_qpnpint_type type;
- u8 irq = d->hwirq >> 8;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
u8 bit_mask_irq = BIT(irq);
qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
@@ -649,7 +659,7 @@ static int qpnpint_get_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which,
bool *state)
{
- u8 irq = d->hwirq >> 8;
+ u8 irq = HWIRQ_IRQ(d->hwirq);
u8 status = 0;
if (which != IRQCHIP_STATE_LINE_LEVEL)
@@ -681,7 +691,7 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
{
struct spmi_pmic_arb *pa = d->host_data;
int rc;
- u8 apid;
+ u16 apid;
dev_dbg(&pa->spmic->dev,
"intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n",
@@ -709,10 +719,7 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
if (apid < pa->min_apid)
pa->min_apid = apid;
- *out_hwirq = (intspec[0] & 0xF) << 24
- | (intspec[1] & 0xFF) << 16
- | (intspec[2] & 0x7) << 8
- | apid;
+ *out_hwirq = HWIRQ(intspec[0], intspec[1], intspec[2], apid);
*out_type = intspec[3] & IRQ_TYPE_SENSE_MASK;
dev_dbg(&pa->spmic->dev, "out_hwirq = %lu\n", *out_hwirq);
@@ -735,7 +742,7 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
}
static int
-pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u8 *apid)
+pmic_arb_ppid_to_apid_v1(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
{
u16 ppid = sid << 8 | ((addr >> 8) & 0xFF);
u32 *mapping_table = pa->mapping_table;
@@ -834,7 +841,7 @@ static u16 pmic_arb_find_apid(struct spmi_pmic_arb *pa, u16 ppid)
}
static int
-pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u8 *apid)
+pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u16 *apid)
{
u16 ppid = (sid << 8) | (addr >> 8);
u16 apid_valid;
@@ -852,7 +859,7 @@ pmic_arb_ppid_to_apid_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u8 *apid)
static int
pmic_arb_mode_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
{
- u8 apid;
+ u16 apid;
int rc;
rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
@@ -871,7 +878,7 @@ pmic_arb_mode_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, mode_t *mode)
static int
pmic_arb_offset_v2(struct spmi_pmic_arb *pa, u8 sid, u16 addr, u32 *offset)
{
- u8 apid;
+ u16 apid;
int rc;
rc = pmic_arb_ppid_to_apid_v2(pa, sid, addr, &apid);
@@ -892,47 +899,47 @@ static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc)
return (opc << 27) | ((addr & 0xff) << 4) | (bc & 0x7);
}
-static u32 pmic_arb_owner_acc_status_v1(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v1(u8 m, u16 n)
{
return 0x20 * m + 0x4 * n;
}
-static u32 pmic_arb_owner_acc_status_v2(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v2(u8 m, u16 n)
{
return 0x100000 + 0x1000 * m + 0x4 * n;
}
-static u32 pmic_arb_owner_acc_status_v3(u8 m, u8 n)
+static u32 pmic_arb_owner_acc_status_v3(u8 m, u16 n)
{
return 0x200000 + 0x1000 * m + 0x4 * n;
}
-static u32 pmic_arb_acc_enable_v1(u8 n)
+static u32 pmic_arb_acc_enable_v1(u16 n)
{
return 0x200 + 0x4 * n;
}
-static u32 pmic_arb_acc_enable_v2(u8 n)
+static u32 pmic_arb_acc_enable_v2(u16 n)
{
return 0x1000 * n;
}
-static u32 pmic_arb_irq_status_v1(u8 n)
+static u32 pmic_arb_irq_status_v1(u16 n)
{
return 0x600 + 0x4 * n;
}
-static u32 pmic_arb_irq_status_v2(u8 n)
+static u32 pmic_arb_irq_status_v2(u16 n)
{
return 0x4 + 0x1000 * n;
}
-static u32 pmic_arb_irq_clear_v1(u8 n)
+static u32 pmic_arb_irq_clear_v1(u16 n)
{
return 0xA00 + 0x4 * n;
}
-static u32 pmic_arb_irq_clear_v2(u8 n)
+static u32 pmic_arb_irq_clear_v2(u16 n)
{
return 0x8 + 0x1000 * n;
}
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 73d7435d2eb8..97ab02dfc753 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -831,6 +831,7 @@ struct tsens_tm_device {
bool prev_reading_avail;
bool calibration_less_mode;
bool tsens_local_init;
+ bool gain_offset_programmed;
int tsens_factor;
uint32_t tsens_num_sensor;
int tsens_irq;
@@ -5341,17 +5342,25 @@ static int get_device_tree_data(struct platform_device *pdev,
return -ENODEV;
}
- tsens_slope_data = devm_kzalloc(&pdev->dev,
+ /* TSENS calibration region */
+ tmdev->res_calib_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "tsens_eeprom_physical");
+ if (!tmdev->res_calib_mem) {
+ pr_debug("Using controller programmed gain and offset\n");
+ tmdev->gain_offset_programmed = true;
+ } else {
+ tsens_slope_data = devm_kzalloc(&pdev->dev,
tsens_num_sensors * sizeof(u32), GFP_KERNEL);
- if (!tsens_slope_data)
- return -ENOMEM;
+ if (!tsens_slope_data)
+ return -ENOMEM;
- rc = of_property_read_u32_array(of_node,
- "qcom,slope", tsens_slope_data, tsens_num_sensors);
- if (rc) {
- dev_err(&pdev->dev, "invalid or missing property: tsens-slope\n");
- return rc;
- };
+ rc = of_property_read_u32_array(of_node,
+ "qcom,slope", tsens_slope_data, tsens_num_sensors);
+ if (rc) {
+ dev_err(&pdev->dev, "missing property: tsens-slope\n");
+ return rc;
+ };
+ }
if (!of_match_node(tsens_match, of_node)) {
pr_err("Need to read SoC specific fuse map\n");
@@ -5364,9 +5373,13 @@ static int get_device_tree_data(struct platform_device *pdev,
return -ENODEV;
}
- for (i = 0; i < tsens_num_sensors; i++)
- tmdev->sensor[i].slope_mul_tsens_factor = tsens_slope_data[i];
- tmdev->tsens_factor = TSENS_SLOPE_FACTOR;
+ if (!tmdev->gain_offset_programmed) {
+ for (i = 0; i < tsens_num_sensors; i++)
+ tmdev->sensor[i].slope_mul_tsens_factor =
+ tsens_slope_data[i];
+ tmdev->tsens_factor = TSENS_SLOPE_FACTOR;
+ }
+
tmdev->tsens_num_sensor = tsens_num_sensors;
tmdev->calibration_less_mode = of_property_read_bool(of_node,
"qcom,calibration-less-mode");
@@ -5536,24 +5549,17 @@ static int get_device_tree_data(struct platform_device *pdev,
goto fail_unmap_tsens_region;
}
- /* TSENS calibration region */
- tmdev->res_calib_mem = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, "tsens_eeprom_physical");
- if (!tmdev->res_calib_mem) {
- pr_err("Could not get qfprom physical address resource\n");
- rc = -EINVAL;
- goto fail_unmap_tsens;
- }
-
- tmdev->calib_len = tmdev->res_calib_mem->end -
+ if (!tmdev->gain_offset_programmed) {
+ tmdev->calib_len = tmdev->res_calib_mem->end -
tmdev->res_calib_mem->start + 1;
- tmdev->tsens_calib_addr = ioremap(tmdev->res_calib_mem->start,
+ tmdev->tsens_calib_addr = ioremap(tmdev->res_calib_mem->start,
tmdev->calib_len);
- if (!tmdev->tsens_calib_addr) {
- pr_err("Failed to IO map EEPROM registers.\n");
- rc = -EINVAL;
- goto fail_unmap_tsens;
+ if (!tmdev->tsens_calib_addr) {
+ pr_err("Failed to IO map EEPROM registers.\n");
+ rc = -EINVAL;
+ goto fail_unmap_tsens;
+ }
}
return 0;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index d7bedf8beee3..e03d3b41c25b 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -39,7 +39,6 @@
#include <linux/kthread.h>
#include <net/netlink.h>
#include <net/genetlink.h>
-#include <linux/suspend.h>
#define CREATE_TRACE_POINTS
#include <trace/events/thermal.h>
@@ -64,8 +63,6 @@ static LIST_HEAD(thermal_governor_list);
static DEFINE_MUTEX(thermal_list_lock);
static DEFINE_MUTEX(thermal_governor_lock);
-static atomic_t in_suspend;
-
static struct thermal_governor *def_governor;
static struct thermal_governor *__find_governor(const char *name)
@@ -961,9 +958,6 @@ void thermal_zone_device_update(struct thermal_zone_device *tz)
{
int count;
- if (atomic_read(&in_suspend))
- return;
-
if (!tz->ops->get_temp)
return;
@@ -2641,36 +2635,6 @@ static void thermal_unregister_governors(void)
thermal_gov_power_allocator_unregister();
}
-static int thermal_pm_notify(struct notifier_block *nb,
- unsigned long mode, void *_unused)
-{
- struct thermal_zone_device *tz;
-
- switch (mode) {
- case PM_HIBERNATION_PREPARE:
- case PM_RESTORE_PREPARE:
- case PM_SUSPEND_PREPARE:
- atomic_set(&in_suspend, 1);
- break;
- case PM_POST_HIBERNATION:
- case PM_POST_RESTORE:
- case PM_POST_SUSPEND:
- atomic_set(&in_suspend, 0);
- list_for_each_entry(tz, &thermal_tz_list, node) {
- thermal_zone_device_reset(tz);
- thermal_zone_device_update(tz);
- }
- break;
- default:
- break;
- }
- return 0;
-}
-
-static struct notifier_block thermal_pm_nb = {
- .notifier_call = thermal_pm_notify,
-};
-
static int __init thermal_init(void)
{
int result;
@@ -2691,11 +2655,6 @@ static int __init thermal_init(void)
if (result)
goto exit_netlink;
- result = register_pm_notifier(&thermal_pm_nb);
- if (result)
- pr_warn("Thermal: Can not register suspend notifier, return %d\n",
- result);
-
return 0;
exit_netlink:
@@ -2715,7 +2674,6 @@ error:
static void __exit thermal_exit(void)
{
- unregister_pm_notifier(&thermal_pm_nb);
of_thermal_destroy_zones();
genetlink_exit();
class_unregister(&thermal_class);
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 6843711774b2..d4ece0e56954 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -70,9 +70,11 @@
#define UART_SPS_CONS_PERIPHERAL 0
#define UART_SPS_PROD_PERIPHERAL 1
-#define IPC_MSM_HS_LOG_PAGES 5
+#define IPC_MSM_HS_LOG_STATE_PAGES 2
+#define IPC_MSM_HS_LOG_USER_PAGES 2
+#define IPC_MSM_HS_LOG_DATA_PAGES 3
#define UART_DMA_DESC_NR 8
-#define BUF_DUMP_SIZE 20
+#define BUF_DUMP_SIZE 32
/* If the debug_mask gets set to FATAL_LEV,
* a fatal error has happened and further IPC logging
@@ -121,6 +123,11 @@ enum {
} \
} while (0)
+#define LOG_USR_MSG(ctx, x...) do { \
+ if (ctx) \
+ ipc_log_string(ctx, x); \
+} while (0)
+
/*
* There are 3 different kind of UART Core available on MSM.
* High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
@@ -164,6 +171,7 @@ struct msm_hs_tx {
struct task_struct *task;
struct msm_hs_sps_ep_conn_data cons;
struct timer_list tx_timeout_timer;
+ void *ipc_tx_ctxt;
};
struct msm_hs_rx {
@@ -181,6 +189,7 @@ struct msm_hs_rx {
unsigned long pending_flag;
int rx_inx;
struct sps_iovec iovec[UART_DMA_DESC_NR]; /* track descriptors */
+ void *ipc_rx_ctxt;
};
enum buffer_states {
NONE_PENDING = 0x0,
@@ -214,7 +223,7 @@ struct msm_hs_port {
struct clk *pclk;
struct msm_hs_tx tx;
struct msm_hs_rx rx;
- atomic_t clk_count;
+ atomic_t resource_count;
struct msm_hs_wakeup wakeup;
struct dentry *loopback_dir;
@@ -248,6 +257,7 @@ struct msm_hs_port {
bool obs; /* out of band sleep flag */
atomic_t client_req_state;
void *ipc_msm_hs_log_ctxt;
+ void *ipc_msm_hs_pwr_ctxt;
int ipc_debug_mask;
};
@@ -315,7 +325,7 @@ static int msm_hs_ioctl(struct uart_port *uport, unsigned int cmd,
break;
}
default: {
- MSM_HS_DBG("%s():Unknown cmd specified: cmd=%d\n", __func__,
+ MSM_HS_INFO("%s():Unknown cmd specified: cmd=%d\n", __func__,
cmd);
ret = -ENOIOCTLCMD;
break;
@@ -380,7 +390,7 @@ static void msm_hs_clk_bus_unvote(struct msm_hs_port *msm_uport)
static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
{
struct uart_port *uport = &(msm_uport->uport);
- int rc = atomic_read(&msm_uport->clk_count);
+ int rc = atomic_read(&msm_uport->resource_count);
MSM_HS_DBG("%s(): power usage count %d", __func__, rc);
if (rc <= 0) {
@@ -388,7 +398,7 @@ static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
WARN_ON(1);
return;
}
- atomic_dec(&msm_uport->clk_count);
+ atomic_dec(&msm_uport->resource_count);
pm_runtime_mark_last_busy(uport->dev);
pm_runtime_put_autosuspend(uport->dev);
}
@@ -400,12 +410,12 @@ static void msm_hs_resource_vote(struct msm_hs_port *msm_uport)
struct uart_port *uport = &(msm_uport->uport);
ret = pm_runtime_get_sync(uport->dev);
if (ret < 0 || msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
- MSM_HS_WARN("%s(): %p runtime PM callback not invoked(%d)",
- __func__, uport->dev, ret);
+ MSM_HS_WARN("%s:%s runtime callback not invoked ret:%d st:%d",
+ __func__, dev_name(uport->dev), ret,
+ msm_uport->pm_state);
msm_hs_pm_resume(uport->dev);
}
-
- atomic_inc(&msm_uport->clk_count);
+ atomic_inc(&msm_uport->resource_count);
}
/* Check if the uport line number matches with user id stored in pdata.
@@ -567,23 +577,21 @@ static int sps_rx_disconnect(struct sps_pipe *sps_pipe_handler)
return sps_disconnect(sps_pipe_handler);
}
-static void hex_dump_ipc(struct msm_hs_port *msm_uport,
- char *prefix, char *string, int size)
+static void hex_dump_ipc(struct msm_hs_port *msm_uport, void *ipc_ctx,
+ char *prefix, char *string, u64 addr, int size)
+
{
- unsigned char linebuf[512];
- unsigned char firstbuf[40], lastbuf[40];
+ char buf[(BUF_DUMP_SIZE * 3) + 2];
+ int len = 0;
- if ((msm_uport->ipc_debug_mask != DBG_LEV) && (size > BUF_DUMP_SIZE)) {
- hex_dump_to_buffer(string, 10, 16, 1,
- firstbuf, sizeof(firstbuf), 1);
- hex_dump_to_buffer(string + (size - 10), 10, 16, 1,
- lastbuf, sizeof(lastbuf), 1);
- MSM_HS_INFO("%s : %s...%s", prefix, firstbuf, lastbuf);
- } else {
- hex_dump_to_buffer(string, size, 16, 1,
- linebuf, sizeof(linebuf), 1);
- MSM_HS_INFO("%s : %s", prefix, linebuf);
- }
+ len = min(size, BUF_DUMP_SIZE);
+ /*
+ * Print upto 32 data bytes, 32 bytes per line, 1 byte at a time and
+ * don't include the ASCII text at the end of the buffer.
+ */
+ hex_dump_to_buffer(string, len, 32, 1, buf, sizeof(buf), false);
+ ipc_log_string(ipc_ctx, "%s[0x%.10x:%d] : %s", prefix,
+ (unsigned int)addr, size, buf);
}
/*
@@ -594,8 +602,8 @@ static void dump_uart_hs_registers(struct msm_hs_port *msm_uport)
struct uart_port *uport = &(msm_uport->uport);
if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
- MSM_HS_INFO("%s:Failed clocks are off, clk_count %d",
- __func__, atomic_read(&msm_uport->clk_count));
+ MSM_HS_INFO("%s:Failed clocks are off, resource_count %d",
+ __func__, atomic_read(&msm_uport->resource_count));
return;
}
@@ -757,8 +765,10 @@ static int msm_hs_spsconnect_tx(struct msm_hs_port *msm_uport)
unsigned long flags;
unsigned int data;
- if (tx->flush != FLUSH_SHUTDOWN)
+ if (tx->flush != FLUSH_SHUTDOWN) {
+ MSM_HS_ERR("%s:Invalid flush state:%d\n", __func__, tx->flush);
return 0;
+ }
/* Establish connection between peripheral and memory endpoint */
ret = sps_connect(sps_pipe_handle, sps_config);
@@ -1100,7 +1110,6 @@ static void msm_hs_set_termios(struct uart_port *uport,
mutex_lock(&msm_uport->mtx);
msm_hs_write(uport, UART_DM_IMR, 0);
- MSM_HS_DBG("Entering %s\n", __func__);
msm_hs_disable_flow_control(uport, true);
/*
@@ -1214,10 +1223,10 @@ static void msm_hs_set_termios(struct uart_port *uport,
msm_uport->flow_control = true;
}
msm_hs_write(uport, UART_DM_MR1, data);
+ MSM_HS_INFO("%s: Cflags 0x%x Baud %u\n", __func__, c_cflag, bps);
mutex_unlock(&msm_uport->mtx);
- MSM_HS_DBG("Exit %s\n", __func__);
msm_hs_resource_unvote(msm_uport);
}
@@ -1400,9 +1409,6 @@ static void msm_hs_submit_tx_locked(struct uart_port *uport)
if (tx_count > left)
tx_count = left;
- MSM_HS_INFO("%s(): [UART_TX]<%d>\n", __func__, tx_count);
- hex_dump_ipc(msm_uport, "HSUART write: ",
- &tx_buf->buf[tx_buf->tail], tx_count);
src_addr = tx->dma_base + tx_buf->tail;
/* Mask the src_addr to align on a cache
@@ -1415,6 +1421,8 @@ static void msm_hs_submit_tx_locked(struct uart_port *uport)
tx->tx_count = tx_count;
+ hex_dump_ipc(msm_uport, tx->ipc_tx_ctxt, "Tx",
+ &tx_buf->buf[tx_buf->tail], (u64)src_addr, tx_count);
sps_pipe_handle = tx->cons.pipe_handle;
/* Queue transfer request to SPS */
ret = sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
@@ -1717,12 +1725,11 @@ static void msm_serial_hs_rx_work(struct kthread_work *work)
goto out;
rx_count = msm_uport->rx.iovec[msm_uport->rx.rx_inx].size;
-
- MSM_HS_INFO("%s():[UART_RX]<%d>\n", __func__, rx_count);
- hex_dump_ipc(msm_uport, "HSUART Read: ",
- (msm_uport->rx.buffer +
- (msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)),
- rx_count);
+ hex_dump_ipc(msm_uport, rx->ipc_rx_ctxt, "Rx",
+ (msm_uport->rx.buffer +
+ (msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)),
+ msm_uport->rx.iovec[msm_uport->rx.rx_inx].addr,
+ rx_count);
/*
* We are in a spin locked context, spin lock taken at
@@ -1733,7 +1740,7 @@ static void msm_serial_hs_rx_work(struct kthread_work *work)
&msm_uport->rx.pending_flag) &&
!test_bit(msm_uport->rx.rx_inx,
&msm_uport->rx.queued_flag))
- MSM_HS_ERR("RX INX not set");
+ MSM_HS_ERR("%s: RX INX not set", __func__);
else if (test_bit(msm_uport->rx.rx_inx,
&msm_uport->rx.pending_flag) &&
!test_bit(msm_uport->rx.rx_inx,
@@ -1748,14 +1755,14 @@ static void msm_serial_hs_rx_work(struct kthread_work *work)
rx_count);
if (retval != rx_count) {
- MSM_HS_DBG("%s(): ret %d rx_count %d",
+ MSM_HS_INFO("%s(): ret %d rx_count %d",
__func__, retval, rx_count);
msm_uport->rx.buffer_pending |=
CHARS_NORMAL | retval << 5 |
(rx_count - retval) << 16;
}
} else
- MSM_HS_ERR("Error in inx %d",
+ MSM_HS_ERR("%s: Error in inx %d", __func__,
msm_uport->rx.rx_inx);
}
@@ -1778,7 +1785,7 @@ static void msm_serial_hs_rx_work(struct kthread_work *work)
}
out:
if (msm_uport->rx.buffer_pending) {
- MSM_HS_WARN("tty buffer exhausted. Stalling\n");
+ MSM_HS_WARN("%s: tty buffer exhausted. Stalling\n", __func__);
schedule_delayed_work(&msm_uport->rx.flip_insert_work
, msecs_to_jiffies(RETRY_TIMEOUT));
}
@@ -1796,7 +1803,7 @@ static void msm_hs_start_tx_locked(struct uart_port *uport)
/* Bail if transfer in progress */
if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
- MSM_HS_DBG("%s(): retry, flush %d, dma_in_flight %d\n",
+ MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
__func__, tx->flush, tx->dma_in_flight);
return;
}
@@ -1826,11 +1833,9 @@ static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
notify->data.transfer.iovec.addr);
msm_uport->notify = *notify;
- MSM_HS_DBG("%s: ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x, line=%d\n",
- __func__, notify->event_id, &addr,
- notify->data.transfer.iovec.size,
- notify->data.transfer.iovec.flags,
- msm_uport->uport.line);
+ MSM_HS_INFO("tx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+ &addr, notify->data.transfer.iovec.size,
+ notify->data.transfer.iovec.flags);
del_timer(&msm_uport->tx.tx_timeout_timer);
MSM_HS_DBG("%s(): Queue kthread work", __func__);
@@ -1931,9 +1936,8 @@ static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
uport = &(msm_uport->uport);
msm_uport->notify = *notify;
- MSM_HS_DBG("\n%s: sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x\n",
- __func__, notify->event_id, &addr,
- notify->data.transfer.iovec.size,
+ MSM_HS_INFO("rx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+ &addr, notify->data.transfer.iovec.size,
notify->data.transfer.iovec.flags);
spin_lock_irqsave(&uport->lock, flags);
@@ -1985,13 +1989,13 @@ void msm_hs_set_mctrl_locked(struct uart_port *uport,
unsigned int set_rts;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- MSM_HS_DBG("%s()", __func__);
if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
MSM_HS_WARN("%s(): Clocks are off\n", __func__);
return;
}
/* RTS is active low */
set_rts = TIOCM_RTS & mctrl ? 0 : 1;
+ MSM_HS_INFO("%s: set_rts %d\n", __func__, set_rts);
if (set_rts)
msm_hs_disable_flow_control(uport, false);
@@ -2186,7 +2190,7 @@ static struct msm_hs_port *msm_hs_get_hs_port(int port_index)
return NULL;
}
-void toggle_wakeup_interrupt(struct msm_hs_port *msm_uport)
+void enable_wakeup_interrupt(struct msm_hs_port *msm_uport)
{
unsigned long flags;
struct uart_port *uport = &(msm_uport->uport);
@@ -2197,7 +2201,6 @@ void toggle_wakeup_interrupt(struct msm_hs_port *msm_uport)
return;
if (!(msm_uport->wakeup.enabled)) {
- MSM_HS_DBG("%s(): Enable Wakeup IRQ", __func__);
enable_irq(msm_uport->wakeup.irq);
disable_irq(uport->irq);
spin_lock_irqsave(&uport->lock, flags);
@@ -2205,12 +2208,28 @@ void toggle_wakeup_interrupt(struct msm_hs_port *msm_uport)
msm_uport->wakeup.enabled = true;
spin_unlock_irqrestore(&uport->lock, flags);
} else {
+ MSM_HS_WARN("%s:Wake up IRQ already enabled", __func__);
+ }
+}
+
+void disable_wakeup_interrupt(struct msm_hs_port *msm_uport)
+{
+ unsigned long flags;
+ struct uart_port *uport = &(msm_uport->uport);
+
+ if (!is_use_low_power_wakeup(msm_uport))
+ return;
+ if (msm_uport->wakeup.freed)
+ return;
+
+ if (msm_uport->wakeup.enabled) {
disable_irq_nosync(msm_uport->wakeup.irq);
enable_irq(uport->irq);
spin_lock_irqsave(&uport->lock, flags);
msm_uport->wakeup.enabled = false;
spin_unlock_irqrestore(&uport->lock, flags);
- MSM_HS_DBG("%s(): Disable Wakeup IRQ", __func__);
+ } else {
+ MSM_HS_WARN("%s:Wake up IRQ already disabled", __func__);
}
}
@@ -2267,6 +2286,7 @@ int msm_hs_request_clock_off(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
int ret = 0;
+ int client_count = 0;
mutex_lock(&msm_uport->mtx);
/*
@@ -2293,8 +2313,10 @@ int msm_hs_request_clock_off(struct uart_port *uport)
atomic_set(&msm_uport->client_req_state, 1);
msm_hs_resource_unvote(msm_uport);
atomic_dec(&msm_uport->client_count);
- MSM_HS_INFO("%s():DISABLE UART CLOCK: ioc %d\n",
- __func__, atomic_read(&msm_uport->client_count));
+ client_count = atomic_read(&msm_uport->client_count);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s: Client_Count %d\n", __func__,
+ client_count);
exit_request_clock_off:
return ret;
}
@@ -2323,8 +2345,9 @@ int msm_hs_request_clock_on(struct uart_port *uport)
msm_hs_resource_vote(UARTDM_TO_MSM(uport));
atomic_inc(&msm_uport->client_count);
client_count = atomic_read(&msm_uport->client_count);
- MSM_HS_INFO("%s():ENABLE UART CLOCK: ioc %d\n",
- __func__, client_count);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s: Client_Count %d\n", __func__,
+ client_count);
/* Clear the flag */
if (msm_uport->obs)
@@ -2342,11 +2365,8 @@ static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
struct uart_port *uport = &msm_uport->uport;
struct tty_struct *tty = NULL;
- msm_hs_resource_vote(msm_uport);
spin_lock_irqsave(&uport->lock, flags);
- MSM_HS_DBG("%s(): ignore %d\n", __func__,
- msm_uport->wakeup.ignore);
if (msm_uport->wakeup.ignore)
msm_uport->wakeup.ignore = 0;
else
@@ -2362,13 +2382,15 @@ static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
tty_insert_flip_char(tty->port,
msm_uport->wakeup.rx_to_inject,
TTY_NORMAL);
- MSM_HS_DBG("%s(): Inject 0x%x", __func__,
- msm_uport->wakeup.rx_to_inject);
+ hex_dump_ipc(msm_uport, msm_uport->rx.ipc_rx_ctxt,
+ "Rx Inject",
+ &msm_uport->wakeup.rx_to_inject, 0, 1);
+ MSM_HS_INFO("Wakeup ISR.Ignore%d\n",
+ msm_uport->wakeup.ignore);
}
}
spin_unlock_irqrestore(&uport->lock, flags);
- msm_hs_resource_unvote(msm_uport);
if (wakeup && msm_uport->wakeup.inject_rx)
tty_flip_buffer_push(tty->port);
@@ -2396,7 +2418,7 @@ static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
ret = pinctrl_select_state(msm_uport->pinctrl,
msm_uport->gpio_state_suspend);
if (ret)
- MSM_HS_ERR("%s(): Failed to pinctrl set_state",
+ MSM_HS_ERR("%s():Failed to pinctrl set_state",
__func__);
} else if (pdata) {
if (gpio_is_valid(pdata->uart_tx_gpio))
@@ -2674,6 +2696,8 @@ static int msm_hs_startup(struct uart_port *uport)
spin_lock_irqsave(&uport->lock, flags);
atomic_set(&msm_uport->client_count, 0);
atomic_set(&msm_uport->client_req_state, 0);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s: Client_Count 0\n", __func__);
msm_hs_start_rx_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
@@ -3092,17 +3116,19 @@ static void msm_hs_pm_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
int ret;
+ int client_count = 0;
if (!msm_uport)
goto err_suspend;
mutex_lock(&msm_uport->mtx);
+ client_count = atomic_read(&msm_uport->client_count);
/* For OBS, don't use wakeup interrupt, set gpio to suspended state */
if (msm_uport->obs) {
ret = pinctrl_select_state(msm_uport->pinctrl,
msm_uport->gpio_state_suspend);
if (ret)
- MSM_HS_ERR("%s(): Error selecting suspend state",
+ MSM_HS_ERR("%s():Error selecting pinctrl suspend state",
__func__);
}
@@ -3111,8 +3137,10 @@ static void msm_hs_pm_suspend(struct device *dev)
obs_manage_irq(msm_uport, false);
msm_hs_clk_bus_unvote(msm_uport);
if (!atomic_read(&msm_uport->client_req_state))
- toggle_wakeup_interrupt(msm_uport);
- MSM_HS_DBG("%s(): return suspend\n", __func__);
+ enable_wakeup_interrupt(msm_uport);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s: PM State Suspended client_count %d\n", __func__,
+ client_count);
mutex_unlock(&msm_uport->mtx);
return;
err_suspend:
@@ -3124,17 +3152,26 @@ static int msm_hs_pm_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
- int ret;
+ int ret = 0;
+ int client_count = 0;
- if (!msm_uport)
- goto err_resume;
+ if (!msm_uport) {
+ dev_err(dev, "%s:Invalid uport\n", __func__);
+ return -ENODEV;
+ }
mutex_lock(&msm_uport->mtx);
+ client_count = atomic_read(&msm_uport->client_count);
if (msm_uport->pm_state == MSM_HS_PM_ACTIVE)
goto exit_pm_resume;
if (!atomic_read(&msm_uport->client_req_state))
- toggle_wakeup_interrupt(msm_uport);
- msm_hs_clk_bus_vote(msm_uport);
+ disable_wakeup_interrupt(msm_uport);
+ ret = msm_hs_clk_bus_vote(msm_uport);
+ if (ret) {
+ MSM_HS_ERR("%s:Failed clock vote %d\n", __func__, ret);
+ dev_err(dev, "%s:Failed clock vote %d\n", __func__, ret);
+ goto exit_pm_resume;
+ }
obs_manage_irq(msm_uport, true);
msm_uport->pm_state = MSM_HS_PM_ACTIVE;
msm_hs_resource_on(msm_uport);
@@ -3144,17 +3181,15 @@ static int msm_hs_pm_resume(struct device *dev)
ret = pinctrl_select_state(msm_uport->pinctrl,
msm_uport->gpio_state_active);
if (ret)
- MSM_HS_ERR("%s(): Error selecting active state",
+ MSM_HS_ERR("%s():Error selecting active state",
__func__);
}
- MSM_HS_DBG("%s(): return resume\n", __func__);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s:PM State:Active client_count %d\n", __func__, client_count);
exit_pm_resume:
mutex_unlock(&msm_uport->mtx);
- return 0;
-err_resume:
- pr_err("%s(): invalid uport", __func__);
- return 0;
+ return ret;
}
#ifdef CONFIG_PM
@@ -3174,20 +3209,20 @@ static int msm_hs_pm_sys_suspend_noirq(struct device *dev)
* If there is an active clk request or an impending userspace request
* fail the suspend callback.
*/
- clk_cnt = atomic_read(&msm_uport->clk_count);
+ clk_cnt = atomic_read(&msm_uport->resource_count);
client_count = atomic_read(&msm_uport->client_count);
- if (clk_cnt || (pm_runtime_enabled(dev) &&
- !pm_runtime_suspended(dev))) {
- MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d,RPM:%d\n",
- __func__, clk_cnt, client_count,
- dev->power.runtime_status);
+ if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
+ MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d\n",
+ __func__, clk_cnt, client_count);
ret = -EBUSY;
goto exit_suspend_noirq;
}
prev_pwr_state = msm_uport->pm_state;
msm_uport->pm_state = MSM_HS_PM_SYS_SUSPENDED;
- MSM_HS_DBG("%s(): suspending", __func__);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s:PM State:Sys-Suspended client_count %d\n", __func__,
+ client_count);
exit_suspend_noirq:
mutex_unlock(&msm_uport->mtx);
return ret;
@@ -3207,9 +3242,10 @@ static int msm_hs_pm_sys_resume_noirq(struct device *dev)
*/
mutex_lock(&msm_uport->mtx);
- MSM_HS_DBG("%s(): system resume", __func__);
if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED)
msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s:PM State: Suspended\n", __func__);
mutex_unlock(&msm_uport->mtx);
return 0;
}
@@ -3257,6 +3293,7 @@ static int msm_hs_probe(struct platform_device *pdev)
int core_irqres, bam_irqres, wakeup_irqres;
struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
unsigned long data;
+ char name[30];
if (pdev->dev.of_node) {
dev_dbg(&pdev->dev, "device tree enabled\n");
@@ -3350,11 +3387,13 @@ static int msm_hs_probe(struct platform_device *pdev)
iounmap(uport->membase);
return -ENOMEM;
}
+
+ memset(name, 0, sizeof(name));
+ scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+ "_state");
msm_uport->ipc_msm_hs_log_ctxt =
- ipc_log_context_create(IPC_MSM_HS_LOG_PAGES,
- dev_name(msm_uport->uport.dev), 0);
- pr_debug("%s: Device name is %s\n", __func__,
- dev_name(msm_uport->uport.dev));
+ ipc_log_context_create(IPC_MSM_HS_LOG_STATE_PAGES,
+ name, 0);
if (!msm_uport->ipc_msm_hs_log_ctxt) {
dev_err(&pdev->dev, "%s: error creating logging context",
__func__);
@@ -3439,6 +3478,36 @@ static int msm_hs_probe(struct platform_device *pdev)
msm_uport->tx.flush = FLUSH_SHUTDOWN;
msm_uport->rx.flush = FLUSH_SHUTDOWN;
+ memset(name, 0, sizeof(name));
+ scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+ "_tx");
+ msm_uport->tx.ipc_tx_ctxt =
+ ipc_log_context_create(IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+ if (!msm_uport->tx.ipc_tx_ctxt)
+ dev_err(&pdev->dev, "%s: error creating tx logging context",
+ __func__);
+
+ memset(name, 0, sizeof(name));
+ scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+ "_rx");
+ msm_uport->rx.ipc_rx_ctxt = ipc_log_context_create(
+ IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
+ if (!msm_uport->rx.ipc_rx_ctxt)
+ dev_err(&pdev->dev, "%s: error creating rx logging context",
+ __func__);
+
+ memset(name, 0, sizeof(name));
+ scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
+ "_pwr");
+ msm_uport->ipc_msm_hs_pwr_ctxt = ipc_log_context_create(
+ IPC_MSM_HS_LOG_USER_PAGES, name, 0);
+ if (!msm_uport->ipc_msm_hs_pwr_ctxt)
+ dev_err(&pdev->dev, "%s: error creating usr logging context",
+ __func__);
+
+ uport->irq = core_irqres;
+ msm_uport->bam_irq = bam_irqres;
+
clk_set_rate(msm_uport->clk, msm_uport->uport.uartclk);
msm_hs_clk_bus_vote(msm_uport);
ret = uartdm_init_port(uport);
@@ -3622,9 +3691,9 @@ static void msm_hs_shutdown(struct uart_port *uport)
UART_XMIT_SIZE, DMA_TO_DEVICE);
msm_hs_resource_unvote(msm_uport);
- rc = atomic_read(&msm_uport->clk_count);
+ rc = atomic_read(&msm_uport->resource_count);
if (rc) {
- atomic_set(&msm_uport->clk_count, 1);
+ atomic_set(&msm_uport->resource_count, 1);
MSM_HS_WARN("%s(): removing extra vote\n", __func__);
msm_hs_resource_unvote(msm_uport);
}
@@ -3635,6 +3704,8 @@ static void msm_hs_shutdown(struct uart_port *uport)
if (atomic_read(&msm_uport->client_count)) {
MSM_HS_WARN("%s: Client vote on, forcing to 0\n", __func__);
atomic_set(&msm_uport->client_count, 0);
+ LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
+ "%s: Client_Count 0\n", __func__);
}
msm_hs_unconfig_uart_gpios(uport);
MSM_HS_INFO("%s:UART port closed successfully\n", __func__);
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index 0a9a3afd72dd..b22ea656367e 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -529,6 +529,14 @@ static int usb_cser_notify(struct f_cdev *port, u8 type, u16 value,
const unsigned len = sizeof(*notify) + length;
void *buf;
int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->is_connected) {
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ pr_debug("%s: port disconnected\n", __func__);
+ return -ENODEV;
+ }
req = port->port_usb.notify_req;
port->port_usb.notify_req = NULL;
@@ -544,7 +552,9 @@ static int usb_cser_notify(struct f_cdev *port, u8 type, u16 value,
notify->wValue = cpu_to_le16(value);
notify->wIndex = cpu_to_le16(port->port_usb.data_id);
notify->wLength = cpu_to_le16(length);
+ /* 2 byte data copy */
memcpy(buf, data, length);
+ spin_unlock_irqrestore(&port->port_lock, flags);
status = usb_ep_queue(ep, req, GFP_ATOMIC);
if (status < 0) {
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
index fb59d0b03afe..9ce47ccb5e09 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
@@ -1041,6 +1041,7 @@ void *hdmi_hdcp2p2_init(struct hdmi_hdcp_init_data *init_data)
register_data.hdcp_ctx = &ctrl->lib_ctx;
register_data.client_ops = &client_ops;
register_data.txmtr_ops = &txmtr_ops;
+ register_data.device_type = HDCP_TXMTR_HDMI;
register_data.client_ctx = ctrl;
register_data.tethered = ctrl->tethered;