summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/capability.c36
-rw-r--r--kernel/cpu.c52
-rw-r--r--kernel/debug/debug_core.c4
-rw-r--r--kernel/events/core.c42
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/jump_label.c7
-rw-r--r--kernel/locking/rtmutex.c68
-rw-r--r--kernel/locking/rtmutex_common.h5
-rw-r--r--kernel/membarrier.c4
-rw-r--r--kernel/memremap.c4
-rw-r--r--kernel/power/hibernate.c17
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/ptrace.c28
-rw-r--r--kernel/rcu/tree.c12
-rw-r--r--kernel/sched/core.c25
-rw-r--r--kernel/sched/core_ctl.c160
-rw-r--r--kernel/sched/fair.c5
-rw-r--r--kernel/sched/walt.c45
-rw-r--r--kernel/sysctl.c1
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/time/timekeeping.c4
-rw-r--r--kernel/trace/ipc_logging.c6
-rw-r--r--kernel/trace/msm_rtb.c10
-rw-r--r--kernel/trace/trace.c3
-rw-r--r--kernel/trace/trace_functions_graph.c17
-rw-r--r--kernel/watchdog.c17
27 files changed, 398 insertions, 190 deletions
diff --git a/kernel/capability.c b/kernel/capability.c
index 00411c82dac5..4984e1f552eb 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -457,6 +457,19 @@ bool file_ns_capable(const struct file *file, struct user_namespace *ns,
EXPORT_SYMBOL(file_ns_capable);
/**
+ * privileged_wrt_inode_uidgid - Do capabilities in the namespace work over the inode?
+ * @ns: The user namespace in question
+ * @inode: The inode in question
+ *
+ * Return true if the inode uid and gid are within the namespace.
+ */
+bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode)
+{
+ return kuid_has_mapping(ns, inode->i_uid) &&
+ kgid_has_mapping(ns, inode->i_gid);
+}
+
+/**
* capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
* @inode: The inode in question
* @cap: The capability in question
@@ -469,7 +482,26 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
{
struct user_namespace *ns = current_user_ns();
- return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
- kgid_has_mapping(ns, inode->i_gid);
+ return ns_capable(ns, cap) && privileged_wrt_inode_uidgid(ns, inode);
}
EXPORT_SYMBOL(capable_wrt_inode_uidgid);
+
+/**
+ * ptracer_capable - Determine if the ptracer holds CAP_SYS_PTRACE in the namespace
+ * @tsk: The task that may be ptraced
+ * @ns: The user namespace to search for CAP_SYS_PTRACE in
+ *
+ * Return true if the task that is ptracing the current task had CAP_SYS_PTRACE
+ * in the specified user namespace.
+ */
+bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns)
+{
+ int ret = 0; /* An absent tracer adds no restrictions */
+ const struct cred *cred;
+ rcu_read_lock();
+ cred = rcu_dereference(tsk->ptracer_cred);
+ if (cred)
+ ret = security_capable_noaudit(cred, ns, CAP_SYS_PTRACE);
+ rcu_read_unlock();
+ return (ret == 0);
+}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 2432cc630ffb..e822cb0e18d5 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -232,12 +232,6 @@ static int cpu_notify(unsigned long val, void *v)
return __cpu_notify(val, v, -1, NULL);
}
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void cpu_notify_nofail(unsigned long val, void *v)
-{
- BUG_ON(cpu_notify(val, v));
-}
EXPORT_SYMBOL(register_cpu_notifier);
EXPORT_SYMBOL(__register_cpu_notifier);
@@ -255,6 +249,12 @@ void __unregister_cpu_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(__unregister_cpu_notifier);
+#ifdef CONFIG_HOTPLUG_CPU
+static void cpu_notify_nofail(unsigned long val, void *v)
+{
+ BUG_ON(cpu_notify(val, v));
+}
+
/**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
* @cpu: a CPU id
@@ -530,9 +530,41 @@ out:
return ret;
}
+static int switch_to_rt_policy(void)
+{
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+ unsigned int policy = current->policy;
+ int err;
+
+ /* Nobody should be attempting hotplug from these policy contexts. */
+ if (policy == SCHED_BATCH || policy == SCHED_IDLE ||
+ policy == SCHED_DEADLINE)
+ return -EPERM;
+
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
+ return 1;
+
+ /* Only SCHED_NORMAL left. */
+ err = sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+ return err;
+
+}
+
+static int switch_to_fair_policy(void)
+{
+ struct sched_param param = { .sched_priority = 0 };
+
+ return sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
+}
+
int cpu_up(unsigned int cpu)
{
int err = 0;
+ int switch_err = 0;
+
+ switch_err = switch_to_rt_policy();
+ if (switch_err < 0)
+ return switch_err;
if (!cpu_possible(cpu)) {
pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
@@ -558,6 +590,14 @@ int cpu_up(unsigned int cpu)
out:
cpu_maps_update_done();
+
+ if (!switch_err) {
+ switch_err = switch_to_fair_policy();
+ if (switch_err)
+ pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
+ switch_err, current->comm, current->pid);
+ }
+
return err;
}
EXPORT_SYMBOL_GPL(cpu_up);
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 0874e2edd275..79517e5549f1 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -598,11 +598,11 @@ return_normal:
/*
* Wait for the other CPUs to be notified and be waiting for us:
*/
- time_left = loops_per_jiffy * HZ;
+ time_left = MSEC_PER_SEC;
while (kgdb_do_roundup && --time_left &&
(atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
online_cpus)
- cpu_relax();
+ udelay(1000);
if (!time_left)
pr_crit("Timed out waiting for secondary CPUs.\n");
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8d2482d77c04..f9c6f554460e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6224,6 +6224,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
char *buf = NULL;
char *name;
+ if (vma->vm_flags & VM_READ)
+ prot |= PROT_READ;
+ if (vma->vm_flags & VM_WRITE)
+ prot |= PROT_WRITE;
+ if (vma->vm_flags & VM_EXEC)
+ prot |= PROT_EXEC;
+
+ if (vma->vm_flags & VM_MAYSHARE)
+ flags = MAP_SHARED;
+ else
+ flags = MAP_PRIVATE;
+
+ if (vma->vm_flags & VM_DENYWRITE)
+ flags |= MAP_DENYWRITE;
+ if (vma->vm_flags & VM_MAYEXEC)
+ flags |= MAP_EXECUTABLE;
+ if (vma->vm_flags & VM_LOCKED)
+ flags |= MAP_LOCKED;
+ if (vma->vm_flags & VM_HUGETLB)
+ flags |= MAP_HUGETLB;
+
if (file) {
struct inode *inode;
dev_t dev;
@@ -6250,27 +6271,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
maj = MAJOR(dev);
min = MINOR(dev);
- if (vma->vm_flags & VM_READ)
- prot |= PROT_READ;
- if (vma->vm_flags & VM_WRITE)
- prot |= PROT_WRITE;
- if (vma->vm_flags & VM_EXEC)
- prot |= PROT_EXEC;
-
- if (vma->vm_flags & VM_MAYSHARE)
- flags = MAP_SHARED;
- else
- flags = MAP_PRIVATE;
-
- if (vma->vm_flags & VM_DENYWRITE)
- flags |= MAP_DENYWRITE;
- if (vma->vm_flags & VM_MAYEXEC)
- flags |= MAP_EXECUTABLE;
- if (vma->vm_flags & VM_LOCKED)
- flags |= MAP_LOCKED;
- if (vma->vm_flags & VM_HUGETLB)
- flags |= MAP_HUGETLB;
-
goto got_name;
} else {
if (vma->vm_ops && vma->vm_ops->name) {
diff --git a/kernel/fork.c b/kernel/fork.c
index 7de03658692b..75573eeb49b2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -587,7 +587,8 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
#endif
}
-static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
+static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+ struct user_namespace *user_ns)
{
mm->mmap = NULL;
mm->mm_rb = RB_ROOT;
@@ -627,6 +628,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
if (init_new_context(p, mm))
goto fail_nocontext;
+ mm->user_ns = get_user_ns(user_ns);
return mm;
fail_nocontext:
@@ -672,7 +674,7 @@ struct mm_struct *mm_alloc(void)
return NULL;
memset(mm, 0, sizeof(*mm));
- return mm_init(mm, current);
+ return mm_init(mm, current, current_user_ns());
}
/*
@@ -687,6 +689,7 @@ void __mmdrop(struct mm_struct *mm)
destroy_context(mm);
mmu_notifier_mm_destroy(mm);
check_mm(mm);
+ put_user_ns(mm->user_ns);
free_mm(mm);
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -948,7 +951,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
memcpy(mm, oldmm, sizeof(*mm));
- if (!mm_init(mm, tsk))
+ if (!mm_init(mm, tsk, mm->user_ns))
goto fail_nomem;
err = dup_mmap(mm, oldmm);
diff --git a/kernel/futex.c b/kernel/futex.c
index e8af73cc51a7..beb042dcc332 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -3199,4 +3199,4 @@ static int __init futex_init(void)
return 0;
}
-__initcall(futex_init);
+core_initcall(futex_init);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 4b353e0be121..453ec4232852 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -138,6 +138,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
}
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
+void static_key_deferred_flush(struct static_key_deferred *key)
+{
+ STATIC_KEY_CHECK_USE();
+ flush_delayed_work(&key->work);
+}
+EXPORT_SYMBOL_GPL(static_key_deferred_flush);
+
void jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 8251e75dd9c0..b066724d7a5b 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
{
- if (!rt_mutex_has_waiters(lock))
- clear_rt_mutex_waiters(lock);
+ unsigned long owner, *p = (unsigned long *) &lock->owner;
+
+ if (rt_mutex_has_waiters(lock))
+ return;
+
+ /*
+ * The rbtree has no waiters enqueued, now make sure that the
+ * lock->owner still has the waiters bit set, otherwise the
+ * following can happen:
+ *
+ * CPU 0 CPU 1 CPU2
+ * l->owner=T1
+ * rt_mutex_lock(l)
+ * lock(l->lock)
+ * l->owner = T1 | HAS_WAITERS;
+ * enqueue(T2)
+ * boost()
+ * unlock(l->lock)
+ * block()
+ *
+ * rt_mutex_lock(l)
+ * lock(l->lock)
+ * l->owner = T1 | HAS_WAITERS;
+ * enqueue(T3)
+ * boost()
+ * unlock(l->lock)
+ * block()
+ * signal(->T2) signal(->T3)
+ * lock(l->lock)
+ * dequeue(T2)
+ * deboost()
+ * unlock(l->lock)
+ * lock(l->lock)
+ * dequeue(T3)
+ * ==> wait list is empty
+ * deboost()
+ * unlock(l->lock)
+ * lock(l->lock)
+ * fixup_rt_mutex_waiters()
+ * if (wait_list_empty(l) {
+ * l->owner = owner
+ * owner = l->owner & ~HAS_WAITERS;
+ * ==> l->owner = T1
+ * }
+ * lock(l->lock)
+ * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
+ * if (wait_list_empty(l) {
+ * owner = l->owner & ~HAS_WAITERS;
+ * cmpxchg(l->owner, T1, NULL)
+ * ===> Success (l->owner = NULL)
+ *
+ * l->owner = owner
+ * ==> l->owner = T1
+ * }
+ *
+ * With the check for the waiter bit in place T3 on CPU2 will not
+ * overwrite. All tasks fiddling with the waiters bit are
+ * serialized by l->lock, so nothing else can modify the waiters
+ * bit. If the bit is set then nothing can change l->owner either
+ * so the simple RMW is safe. The cmpxchg() will simply fail if it
+ * happens in the middle of the RMW because the waiters bit is
+ * still set.
+ */
+ owner = READ_ONCE(*p);
+ if (owner & RT_MUTEX_HAS_WAITERS)
+ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
/*
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 4f5f83c7d2d3..e317e1cbb3eb 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -75,8 +75,9 @@ task_top_pi_waiter(struct task_struct *p)
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
{
- return (struct task_struct *)
- ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
+ unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
+
+ return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
}
/*
diff --git a/kernel/membarrier.c b/kernel/membarrier.c
index 536c727a56e9..9f9284f37f8d 100644
--- a/kernel/membarrier.c
+++ b/kernel/membarrier.c
@@ -16,6 +16,7 @@
#include <linux/syscalls.h>
#include <linux/membarrier.h>
+#include <linux/tick.h>
/*
* Bitmask made from a "or" of all commands within enum membarrier_cmd,
@@ -51,6 +52,9 @@
*/
SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
{
+ /* MEMBARRIER_CMD_SHARED is not compatible with nohz_full. */
+ if (tick_nohz_full_enabled())
+ return -ENOSYS;
if (unlikely(flags))
return -EINVAL;
switch (cmd) {
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 25ced161ebeb..f719c925cb54 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -159,7 +159,9 @@ static void devm_memremap_pages_release(struct device *dev, void *res)
struct page_map *page_map = res;
/* pages are dead and unused, undo the arch mapping */
+ mem_hotplug_begin();
arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
+ mem_hotplug_done();
}
void *devm_memremap_pages(struct device *dev, struct resource *res)
@@ -189,7 +191,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
if (nid < 0)
nid = numa_mem_id();
+ mem_hotplug_begin();
error = arch_add_memory(nid, res->start, resource_size(res), true);
+ mem_hotplug_done();
if (error) {
devres_free(page_map);
return ERR_PTR(error);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 3124cebaec31..2fc1177383a0 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -1159,6 +1159,22 @@ static int __init kaslr_nohibernate_setup(char *str)
return nohibernate_setup(str);
}
+static int __init page_poison_nohibernate_setup(char *str)
+{
+#ifdef CONFIG_PAGE_POISONING_ZERO
+ /*
+ * The zeroing option for page poison skips the checks on alloc.
+ * since hibernation doesn't save free pages there's no way to
+ * guarantee the pages will still be zeroed.
+ */
+ if (!strcmp(str, "on")) {
+ pr_info("Disabling hibernation due to page poisoning\n");
+ return nohibernate_setup(str);
+ }
+#endif
+ return 1;
+}
+
__setup("noresume", noresume_setup);
__setup("resume_offset=", resume_offset_setup);
__setup("resume=", resume_setup);
@@ -1167,3 +1183,4 @@ __setup("resumewait", resumewait_setup);
__setup("resumedelay=", resumedelay_setup);
__setup("nohibernate", nohibernate_setup);
__setup("kaslr", kaslr_nohibernate_setup);
+__setup("page_poison=", page_poison_nohibernate_setup);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 7b884dc55bd0..9fcb521fab0e 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1440,7 +1440,7 @@ static void call_console_drivers(int level,
{
struct console *con;
- trace_console(text, len);
+ trace_console_rcuidle(text, len);
if (level >= console_loglevel && !ignore_loglevel)
return;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 3189e51db7e8..a46c40bfb5f6 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -39,6 +39,9 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
BUG_ON(!list_empty(&child->ptrace_entry));
list_add(&child->ptrace_entry, &new_parent->ptraced);
child->parent = new_parent;
+ rcu_read_lock();
+ child->ptracer_cred = get_cred(__task_cred(new_parent));
+ rcu_read_unlock();
}
/**
@@ -71,11 +74,15 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
*/
void __ptrace_unlink(struct task_struct *child)
{
+ const struct cred *old_cred;
BUG_ON(!child->ptrace);
child->ptrace = 0;
child->parent = child->real_parent;
list_del_init(&child->ptrace_entry);
+ old_cred = child->ptracer_cred;
+ child->ptracer_cred = NULL;
+ put_cred(old_cred);
spin_lock(&child->sighand->siglock);
@@ -219,7 +226,7 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
{
const struct cred *cred = current_cred(), *tcred;
- int dumpable = 0;
+ struct mm_struct *mm;
kuid_t caller_uid;
kgid_t caller_gid;
@@ -270,16 +277,11 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
return -EPERM;
ok:
rcu_read_unlock();
- smp_rmb();
- if (task->mm)
- dumpable = get_dumpable(task->mm);
- rcu_read_lock();
- if (dumpable != SUID_DUMP_USER &&
- !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
- rcu_read_unlock();
- return -EPERM;
- }
- rcu_read_unlock();
+ mm = task->mm;
+ if (mm &&
+ ((get_dumpable(mm) != SUID_DUMP_USER) &&
+ !ptrace_has_cap(mm->user_ns, mode)))
+ return -EPERM;
return security_ptrace_access_check(task, mode);
}
@@ -343,10 +345,6 @@ static int ptrace_attach(struct task_struct *task, long request,
if (seize)
flags |= PT_SEIZED;
- rcu_read_lock();
- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
- flags |= PT_PTRACE_CAP;
- rcu_read_unlock();
task->ptrace = flags;
__ptrace_link(task, current);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f07343b54fe5..2cb46d51d715 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -57,6 +57,8 @@
#include <linux/trace_events.h>
#include <linux/suspend.h>
+#include <soc/qcom/watchdog.h>
+
#include "tree.h"
#include "rcu.h"
@@ -1298,6 +1300,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
rcu_check_gp_kthread_starvation(rsp);
+#ifdef CONFIG_RCU_STALL_WATCHDOG_BITE
+ /* Induce watchdog bite */
+ msm_trigger_wdog_bite();
+#endif
+
force_quiescent_state(rsp); /* Kick them all. */
}
@@ -1333,6 +1340,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
+#ifdef CONFIG_RCU_STALL_WATCHDOG_BITE
+ /* Induce non secure watchdog bite to collect context */
+ msm_trigger_wdog_bite();
+#endif
+
/*
* Attempt to revive the RCU machinery by forcing a context switch.
*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 312ffdad034a..e107c4d6b385 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -88,6 +88,7 @@
#include "sched.h"
#include "../workqueue_internal.h"
#include "../smpboot.h"
+#include "../time/tick-internal.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
@@ -3260,7 +3261,8 @@ void scheduler_tick(void)
if (curr->sched_class == &fair_sched_class)
check_for_migration(rq, curr);
- core_ctl_check(wallclock);
+ if (cpu == tick_do_timer_cpu)
+ core_ctl_check(wallclock);
}
#ifdef CONFIG_NO_HZ_FULL
@@ -3362,6 +3364,9 @@ NOKPROBE_SYMBOL(preempt_count_sub);
*/
static noinline void __schedule_bug(struct task_struct *prev)
{
+ /* Save this before calling printk(), since that will clobber it */
+ unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
+
if (oops_in_progress)
return;
@@ -3372,13 +3377,12 @@ static noinline void __schedule_bug(struct task_struct *prev)
print_modules();
if (irqs_disabled())
print_irqtrace_events(prev);
-#ifdef CONFIG_DEBUG_PREEMPT
- if (in_atomic_preempt_off()) {
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
+ && in_atomic_preempt_off()) {
pr_err("Preemption disabled at:");
- print_ip_sym(current->preempt_disable_ip);
+ print_ip_sym(preempt_disable_ip);
pr_cont("\n");
}
-#endif
#ifdef CONFIG_PANIC_ON_SCHED_BUG
BUG();
#endif
@@ -8511,6 +8515,7 @@ EXPORT_SYMBOL(__might_sleep);
void ___might_sleep(const char *file, int line, int preempt_offset)
{
static unsigned long prev_jiffy; /* ratelimiting */
+ unsigned long preempt_disable_ip;
rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
@@ -8523,6 +8528,9 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
return;
prev_jiffy = jiffies;
+ /* Save this before calling printk(), since that will clobber it */
+ preempt_disable_ip = get_preempt_disable_ip(current);
+
printk(KERN_ERR
"BUG: sleeping function called from invalid context at %s:%d\n",
file, line);
@@ -8537,13 +8545,12 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
-#ifdef CONFIG_DEBUG_PREEMPT
- if (!preempt_count_equals(preempt_offset)) {
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
+ && !preempt_count_equals(preempt_offset)) {
pr_err("Preemption disabled at:");
- print_ip_sym(current->preempt_disable_ip);
+ print_ip_sym(preempt_disable_ip);
pr_cont("\n");
}
-#endif
#ifdef CONFIG_PANIC_ON_SCHED_BUG
BUG();
#endif
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 1e3accddd103..0b5f2dea18a1 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -10,6 +10,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) "core_ctl: " fmt
+
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
@@ -33,6 +35,7 @@ struct cluster_data {
unsigned int busy_down_thres[MAX_CPUS_PER_CLUSTER];
unsigned int active_cpus;
unsigned int num_cpus;
+ unsigned int nr_isolated_cpus;
cpumask_t cpu_mask;
unsigned int need_cpus;
unsigned int task_thres;
@@ -50,7 +53,6 @@ struct cluster_data {
};
struct cpu_data {
- bool online;
bool is_busy;
unsigned int busy;
unsigned int cpu;
@@ -242,22 +244,6 @@ static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
}
-static ssize_t show_cpus(const struct cluster_data *state, char *buf)
-{
- struct cpu_data *c;
- ssize_t count = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&state_lock, flags);
- list_for_each_entry(c, &state->lru, sib) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "CPU%u (%s)\n", c->cpu,
- c->online ? "Online" : "Offline");
- }
- spin_unlock_irqrestore(&state_lock, flags);
- return count;
-}
-
static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
@@ -275,6 +261,7 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
ssize_t count = 0;
unsigned int cpu;
+ spin_lock_irq(&state_lock);
for_each_possible_cpu(cpu) {
c = &per_cpu(cpu_state, cpu);
cluster = c->cluster;
@@ -286,10 +273,11 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
count += snprintf(buf + count, PAGE_SIZE - count,
"\tCPU: %u\n", c->cpu);
count += snprintf(buf + count, PAGE_SIZE - count,
- "\tOnline: %u\n", c->online);
+ "\tOnline: %u\n",
+ cpu_online(c->cpu));
count += snprintf(buf + count, PAGE_SIZE - count,
- "\tActive: %u\n",
- !cpu_isolated(c->cpu));
+ "\tIsolated: %u\n",
+ cpu_isolated(c->cpu));
count += snprintf(buf + count, PAGE_SIZE - count,
"\tFirst CPU: %u\n",
cluster->first_cpu);
@@ -307,8 +295,12 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
count += snprintf(buf + count, PAGE_SIZE - count,
"\tNeed CPUs: %u\n", cluster->need_cpus);
count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tNr isolated CPUs: %u\n",
+ cluster->nr_isolated_cpus);
+ count += snprintf(buf + count, PAGE_SIZE - count,
"\tBoost: %u\n", (unsigned int) cluster->boost);
}
+ spin_unlock_irq(&state_lock);
return count;
}
@@ -376,7 +368,6 @@ core_ctl_attr_rw(busy_up_thres);
core_ctl_attr_rw(busy_down_thres);
core_ctl_attr_rw(task_thres);
core_ctl_attr_rw(is_big_cluster);
-core_ctl_attr_ro(cpus);
core_ctl_attr_ro(need_cpus);
core_ctl_attr_ro(active_cpus);
core_ctl_attr_ro(global_state);
@@ -390,7 +381,6 @@ static struct attribute *default_attrs[] = {
&busy_down_thres.attr,
&task_thres.attr,
&is_big_cluster.attr,
- &cpus.attr,
&need_cpus.attr,
&active_cpus.attr,
&global_state.attr,
@@ -534,14 +524,14 @@ static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
static bool is_active(const struct cpu_data *state)
{
- return state->online && !cpu_isolated(state->cpu);
+ return cpu_online(state->cpu) && !cpu_isolated(state->cpu);
}
static bool adjustment_possible(const struct cluster_data *cluster,
unsigned int need)
{
return (need < cluster->active_cpus || (need > cluster->active_cpus &&
- sched_isolate_count(&cluster->cpu_mask, false)));
+ cluster->nr_isolated_cpus));
}
static bool eval_need(struct cluster_data *cluster)
@@ -551,9 +541,8 @@ static bool eval_need(struct cluster_data *cluster)
unsigned int need_cpus = 0, last_need, thres_idx;
int ret = 0;
bool need_flag = false;
- unsigned int active_cpus;
unsigned int new_need;
- s64 now;
+ s64 now, elapsed;
if (unlikely(!cluster->inited))
return 0;
@@ -563,8 +552,8 @@ static bool eval_need(struct cluster_data *cluster)
if (cluster->boost) {
need_cpus = cluster->max_cpus;
} else {
- active_cpus = get_active_cpu_count(cluster);
- thres_idx = active_cpus ? active_cpus - 1 : 0;
+ cluster->active_cpus = get_active_cpu_count(cluster);
+ thres_idx = cluster->active_cpus ? cluster->active_cpus - 1 : 0;
list_for_each_entry(c, &cluster->lru, sib) {
if (c->busy >= cluster->busy_up_thres[thres_idx])
c->is_busy = true;
@@ -580,17 +569,16 @@ static bool eval_need(struct cluster_data *cluster)
last_need = cluster->need_cpus;
now = ktime_to_ms(ktime_get());
- if (new_need == last_need) {
- cluster->need_ts = now;
- spin_unlock_irqrestore(&state_lock, flags);
- return 0;
- }
-
- if (need_cpus > cluster->active_cpus) {
+ if (new_need > cluster->active_cpus) {
ret = 1;
- } else if (need_cpus < cluster->active_cpus) {
- s64 elapsed = now - cluster->need_ts;
+ } else {
+ if (new_need == last_need) {
+ cluster->need_ts = now;
+ spin_unlock_irqrestore(&state_lock, flags);
+ return 0;
+ }
+ elapsed = now - cluster->need_ts;
ret = elapsed >= cluster->offline_delay_ms;
}
@@ -598,7 +586,7 @@ static bool eval_need(struct cluster_data *cluster)
cluster->need_ts = now;
cluster->need_cpus = new_need;
}
- trace_core_ctl_eval_need(cluster->first_cpu, last_need, need_cpus,
+ trace_core_ctl_eval_need(cluster->first_cpu, last_need, new_need,
ret && need_flag);
spin_unlock_irqrestore(&state_lock, flags);
@@ -669,6 +657,9 @@ int core_ctl_set_boost(bool boost)
int ret = 0;
bool boost_state_changed = false;
+ if (unlikely(!initialized))
+ return 0;
+
spin_lock_irqsave(&state_lock, flags);
for_each_cluster(cluster, index) {
if (cluster->is_big_cluster) {
@@ -731,6 +722,7 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
struct cpu_data *c, *tmp;
unsigned long flags;
unsigned int num_cpus = cluster->num_cpus;
+ unsigned int nr_isolated = 0;
/*
* Protect against entry being removed (and added at tail) by other
@@ -755,12 +747,14 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
if (!sched_isolate_cpu(c->cpu)) {
c->isolated_by_us = true;
move_cpu_lru(c);
+ nr_isolated++;
} else {
pr_debug("Unable to isolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus += nr_isolated;
spin_unlock_irqrestore(&state_lock, flags);
/*
@@ -770,6 +764,7 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
if (cluster->active_cpus <= cluster->max_cpus)
return;
+ nr_isolated = 0;
num_cpus = cluster->num_cpus;
spin_lock_irqsave(&state_lock, flags);
list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
@@ -787,12 +782,14 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
if (!sched_isolate_cpu(c->cpu)) {
c->isolated_by_us = true;
move_cpu_lru(c);
+ nr_isolated++;
} else {
pr_debug("Unable to isolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus += nr_isolated;
spin_unlock_irqrestore(&state_lock, flags);
}
@@ -803,6 +800,7 @@ static void __try_to_unisolate(struct cluster_data *cluster,
struct cpu_data *c, *tmp;
unsigned long flags;
unsigned int num_cpus = cluster->num_cpus;
+ unsigned int nr_unisolated = 0;
/*
* Protect against entry being removed (and added at tail) by other
@@ -815,7 +813,7 @@ static void __try_to_unisolate(struct cluster_data *cluster,
if (!c->isolated_by_us)
continue;
- if ((c->online && !cpu_isolated(c->cpu)) ||
+ if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) ||
(!force && c->not_preferred))
continue;
if (cluster->active_cpus == need)
@@ -827,12 +825,14 @@ static void __try_to_unisolate(struct cluster_data *cluster,
if (!sched_unisolate_cpu(c->cpu)) {
c->isolated_by_us = false;
move_cpu_lru(c);
+ nr_unisolated++;
} else {
pr_debug("Unable to unisolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
spin_lock_irqsave(&state_lock, flags);
}
+ cluster->nr_isolated_cpus -= nr_unisolated;
spin_unlock_irqrestore(&state_lock, flags);
}
@@ -898,25 +898,14 @@ static int __ref cpu_callback(struct notifier_block *nfb,
struct cpu_data *state = &per_cpu(cpu_state, cpu);
struct cluster_data *cluster = state->cluster;
unsigned int need;
- int ret = NOTIFY_OK;
+ bool do_wakeup, unisolated = false;
+ unsigned long flags;
if (unlikely(!cluster || !cluster->inited))
- return NOTIFY_OK;
+ return NOTIFY_DONE;
switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
-
- /* If online state of CPU somehow got out of sync, fix it. */
- if (state->online) {
- state->online = false;
- cluster->active_cpus = get_active_cpu_count(cluster);
- pr_warn("CPU%d offline when state is online\n", cpu);
- }
- break;
-
case CPU_ONLINE:
-
- state->online = true;
cluster->active_cpus = get_active_cpu_count(cluster);
/*
@@ -936,30 +925,29 @@ static int __ref cpu_callback(struct notifier_block *nfb,
if (state->isolated_by_us) {
sched_unisolate_cpu_unlocked(cpu);
state->isolated_by_us = false;
+ unisolated = true;
}
/* Move a CPU to the end of the LRU when it goes offline. */
move_cpu_lru(state);
- /* Fall through */
-
- case CPU_UP_CANCELED:
-
- /* If online state of CPU somehow got out of sync, fix it. */
- if (!state->online)
- pr_warn("CPU%d online when state is offline\n", cpu);
-
- state->online = false;
state->busy = 0;
cluster->active_cpus = get_active_cpu_count(cluster);
break;
+ default:
+ return NOTIFY_DONE;
}
need = apply_limits(cluster, cluster->need_cpus);
- if (adjustment_possible(cluster, need))
+ spin_lock_irqsave(&state_lock, flags);
+ if (unisolated)
+ cluster->nr_isolated_cpus--;
+ do_wakeup = adjustment_possible(cluster, need);
+ spin_unlock_irqrestore(&state_lock, flags);
+ if (do_wakeup)
wake_up_core_ctl_thread(cluster);
- return ret;
+ return NOTIFY_OK;
}
static struct notifier_block __refdata cpu_notifier = {
@@ -968,6 +956,42 @@ static struct notifier_block __refdata cpu_notifier = {
/* ============================ init code ============================== */
+static cpumask_var_t core_ctl_disable_cpumask;
+static bool core_ctl_disable_cpumask_present;
+
+static int __init core_ctl_disable_setup(char *str)
+{
+ if (!*str)
+ return -EINVAL;
+
+ alloc_bootmem_cpumask_var(&core_ctl_disable_cpumask);
+
+ if (cpulist_parse(str, core_ctl_disable_cpumask) < 0) {
+ free_bootmem_cpumask_var(core_ctl_disable_cpumask);
+ return -EINVAL;
+ }
+
+ core_ctl_disable_cpumask_present = true;
+ pr_info("disable_cpumask=%*pbl\n",
+ cpumask_pr_args(core_ctl_disable_cpumask));
+
+ return 0;
+}
+early_param("core_ctl_disable_cpumask", core_ctl_disable_setup);
+
+static bool should_skip(const struct cpumask *mask)
+{
+ if (!core_ctl_disable_cpumask_present)
+ return false;
+
+ /*
+ * We operate on a cluster basis. Disable the core_ctl for
+ * a cluster, if all of it's cpus are specified in
+ * core_ctl_disable_cpumask
+ */
+ return cpumask_subset(mask, core_ctl_disable_cpumask);
+}
+
static struct cluster_data *find_cluster_by_first_cpu(unsigned int first_cpu)
{
unsigned int i;
@@ -989,6 +1013,9 @@ static int cluster_init(const struct cpumask *mask)
unsigned int cpu;
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+ if (should_skip(mask))
+ return 0;
+
if (find_cluster_by_first_cpu(first_cpu))
return 0;
@@ -1028,8 +1055,6 @@ static int cluster_init(const struct cpumask *mask)
state = &per_cpu(cpu_state, cpu);
state->cluster = cluster;
state->cpu = cpu;
- if (cpu_online(cpu))
- state->online = true;
list_add_tail(&state->sib, &cluster->lru);
}
cluster->active_cpus = get_active_cpu_count(cluster);
@@ -1091,6 +1116,9 @@ static int __init core_ctl_init(void)
{
unsigned int cpu;
+ if (should_skip(cpu_possible_mask))
+ return 0;
+
core_ctl_check_interval = (rq_avg_period_ms - RQ_AVG_TOLERANCE)
* NSEC_PER_MSEC;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cf55fc2663fb..8d5353906c8d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3844,6 +3844,10 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif
+ /* Trace CPU load, unless cfs_rq belongs to a non-root task_group */
+ if (cfs_rq == &rq_of(cfs_rq)->cfs)
+ trace_sched_load_avg_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
+
return decayed || removed;
}
@@ -3867,7 +3871,6 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
if (entity_is_task(se))
trace_sched_load_avg_task(task_of(se), &se->avg);
- trace_sched_load_avg_cpu(cpu, cfs_rq);
}
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 2ffb1680b380..6e053bd9830c 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -62,8 +62,6 @@ static unsigned int max_possible_freq = 1;
*/
static unsigned int min_max_freq = 1;
-static unsigned int max_capacity = 1024;
-static unsigned int min_capacity = 1024;
static unsigned int max_load_scale_factor = 1024;
static unsigned int max_possible_capacity = 1024;
@@ -869,39 +867,6 @@ void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
double_rq_unlock(src_rq, dest_rq);
}
-/* Keep track of max/min capacity possible across CPUs "currently" */
-static void __update_min_max_capacity(void)
-{
- int i;
- int max = 0, min = INT_MAX;
-
- for_each_online_cpu(i) {
- if (cpu_rq(i)->capacity > max)
- max = cpu_rq(i)->capacity;
- if (cpu_rq(i)->capacity < min)
- min = cpu_rq(i)->capacity;
- }
-
- max_capacity = max;
- min_capacity = min;
-}
-
-static void update_min_max_capacity(void)
-{
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for_each_possible_cpu(i)
- raw_spin_lock(&cpu_rq(i)->lock);
-
- __update_min_max_capacity();
-
- for_each_possible_cpu(i)
- raw_spin_unlock(&cpu_rq(i)->lock);
- local_irq_restore(flags);
-}
-
/*
* Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
* least efficient cpu gets capacity of 1024
@@ -984,15 +949,9 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
/* Initialized to policy->max in case policy->related_cpus is empty! */
unsigned int orig_max_freq = policy->max;
- if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
- val != CPUFREQ_CREATE_POLICY)
+ if (val != CPUFREQ_NOTIFY)
return 0;
- if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
- update_min_max_capacity();
- return 0;
- }
-
for_each_cpu(i, policy->related_cpus) {
cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
policy->related_cpus);
@@ -1082,8 +1041,6 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
max_load_scale_factor = highest_mplsf;
}
- __update_min_max_capacity();
-
return 0;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 51eef8e7df39..8cc5167e4b04 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2734,6 +2734,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
break;
if (neg)
continue;
+ val = convmul * val / convdiv;
if ((min && val < *min) || (max && val > *max))
continue;
*i = val;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f6aae7977824..d2a20e83ebae 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -871,6 +871,9 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
int cpu = smp_processor_id();
+ if (!bc)
+ return;
+
/* Set it up only once ! */
if (bc->event_handler != tick_handle_oneshot_broadcast) {
int was_periodic = clockevent_state_periodic(bc);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index ede4bf13d3e9..5fa544f3f560 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -298,10 +298,10 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
static inline u32 arch_gettimeoffset(void) { return 0; }
#endif
-static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
+static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
cycle_t delta)
{
- s64 nsec;
+ u64 nsec;
nsec = delta * tkr->mult + tkr->xtime_nsec;
nsec >>= tkr->shift;
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
index 2c3e0998d400..ed29c38cd7fb 100644
--- a/kernel/trace/ipc_logging.c
+++ b/kernel/trace/ipc_logging.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -507,8 +507,8 @@ int ipc_log_string(void *ilctxt, const char *fmt, ...)
tsv_qtimer_write(&ectxt);
avail_size = (MAX_MSG_SIZE - (ectxt.offset + hdr_size));
va_start(arg_list, fmt);
- data_size = vsnprintf((ectxt.buff + ectxt.offset + hdr_size),
- avail_size, fmt, arg_list);
+ data_size = vscnprintf((ectxt.buff + ectxt.offset + hdr_size),
+ avail_size, fmt, arg_list);
va_end(arg_list);
tsv_write_header(&ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
ectxt.offset += data_size;
diff --git a/kernel/trace/msm_rtb.c b/kernel/trace/msm_rtb.c
index 80058b544cb5..587082117842 100644
--- a/kernel/trace/msm_rtb.c
+++ b/kernel/trace/msm_rtb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,7 @@
#include <asm-generic/sizes.h>
#include <linux/msm_rtb.h>
#include <asm/timex.h>
+#include <soc/qcom/minidump.h>
#define SENTINEL_BYTE_1 0xFF
#define SENTINEL_BYTE_2 0xAA
@@ -243,6 +244,7 @@ EXPORT_SYMBOL(uncached_logk);
static int msm_rtb_probe(struct platform_device *pdev)
{
struct msm_rtb_platform_data *d = pdev->dev.platform_data;
+ struct md_region md_entry;
#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
unsigned int cpu;
#endif
@@ -294,6 +296,12 @@ static int msm_rtb_probe(struct platform_device *pdev)
memset(msm_rtb.rtb, 0, msm_rtb.size);
+ strlcpy(md_entry.name, "KRTB_BUF", sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t)msm_rtb.rtb;
+ md_entry.phys_addr = msm_rtb.phys;
+ md_entry.size = msm_rtb.size;
+ if (msm_minidump_add_region(&md_entry))
+ pr_info("Failed to add RTB in Minidump\n");
#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
for_each_possible_cpu(cpu) {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9b15d6eb1622..c0c10a335b3b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1634,7 +1634,7 @@ static void __trace_find_cmdline(int pid, char comm[])
map = savedcmd->map_pid_to_cmdline[pid];
if (map != NO_CMDLINE_MAP)
- strcpy(comm, get_saved_cmdlines(map));
+ strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN - 1);
else
strcpy(comm, "<...>");
}
@@ -3991,6 +3991,7 @@ static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
{
kfree(s->saved_cmdlines);
kfree(s->map_cmdline_to_pid);
+ kfree(s->saved_tgids);
kfree(s);
}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4641bdb40f8f..96c75b0e9831 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -785,6 +785,10 @@ print_graph_entry_leaf(struct trace_iterator *iter,
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
+ /* If a graph tracer ignored set_graph_notrace */
+ if (call->depth < -1)
+ call->depth += FTRACE_NOTRACE_DEPTH;
+
/*
* Comments display at + 1 to depth. Since
* this is a leaf function, keep the comments
@@ -793,7 +797,8 @@ print_graph_entry_leaf(struct trace_iterator *iter,
cpu_data->depth = call->depth - 1;
/* No need to keep this function around for this depth */
- if (call->depth < FTRACE_RETFUNC_DEPTH)
+ if (call->depth < FTRACE_RETFUNC_DEPTH &&
+ !WARN_ON_ONCE(call->depth < 0))
cpu_data->enter_funcs[call->depth] = 0;
}
@@ -823,11 +828,16 @@ print_graph_entry_nested(struct trace_iterator *iter,
struct fgraph_cpu_data *cpu_data;
int cpu = iter->cpu;
+ /* If a graph tracer ignored set_graph_notrace */
+ if (call->depth < -1)
+ call->depth += FTRACE_NOTRACE_DEPTH;
+
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
cpu_data->depth = call->depth;
/* Save this function pointer to see if the exit matches */
- if (call->depth < FTRACE_RETFUNC_DEPTH)
+ if (call->depth < FTRACE_RETFUNC_DEPTH &&
+ !WARN_ON_ONCE(call->depth < 0))
cpu_data->enter_funcs[call->depth] = call->func;
}
@@ -1057,7 +1067,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
*/
cpu_data->depth = trace->depth - 1;
- if (trace->depth < FTRACE_RETFUNC_DEPTH) {
+ if (trace->depth < FTRACE_RETFUNC_DEPTH &&
+ !WARN_ON_ONCE(trace->depth < 0)) {
if (cpu_data->enter_funcs[trace->depth] != trace->func)
func_match = 0;
cpu_data->enter_funcs[trace->depth] = 0;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index f2813e137b23..f527d8e65d69 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -27,6 +27,7 @@
#include <linux/kvm_para.h>
#include <linux/perf_event.h>
#include <linux/kthread.h>
+#include <soc/qcom/watchdog.h>
/*
* The run state of the lockup detectors is controlled by the content of the
@@ -122,9 +123,7 @@ static unsigned long soft_lockup_nmi_warn;
#ifdef CONFIG_HARDLOCKUP_DETECTOR
unsigned int __read_mostly hardlockup_panic =
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
static unsigned long __maybe_unused hardlockup_allcpu_dumped;
-#endif
/*
* We may not want to enable hard lockup detection by default in all cases,
* for example when running the kernel as a guest on a hypervisor. In these
@@ -366,8 +365,11 @@ static void watchdog_check_hardlockup_other_cpu(void)
if (per_cpu(hard_watchdog_warn, next_cpu) == true)
return;
- if (hardlockup_panic)
- panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+ if (hardlockup_panic) {
+ pr_err("Watchdog detected hard LOCKUP on cpu %u",
+ next_cpu);
+ msm_trigger_wdog_bite();
+ }
else
WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
@@ -423,13 +425,15 @@ static void watchdog_overflow_callback(struct perf_event *event,
*/
if (is_hardlockup()) {
int this_cpu = smp_processor_id();
- struct pt_regs *regs = get_irq_regs();
/* only print hardlockups once */
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+ if (hardlockup_panic)
+ msm_trigger_wdog_bite();
+
print_modules();
print_irqtrace_events(current);
if (regs)
@@ -552,6 +556,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
smp_processor_id(), duration,
current->comm, task_pid_nr(current));
+
+ if (softlockup_panic)
+ msm_trigger_wdog_bite();
__this_cpu_write(softlockup_task_ptr_saved, current);
print_modules();
print_irqtrace_events(current);