summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/capability.c36
-rw-r--r--kernel/cpu.c12
-rw-r--r--kernel/debug/debug_core.c4
-rw-r--r--kernel/events/core.c42
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/jump_label.c7
-rw-r--r--kernel/locking/rtmutex.c68
-rw-r--r--kernel/locking/rtmutex_common.h5
-rw-r--r--kernel/memremap.c4
-rw-r--r--kernel/ptrace.c28
-rw-r--r--kernel/sched/core_ctl.c55
-rw-r--r--kernel/sched/fair.c5
-rw-r--r--kernel/sched/walt.c45
-rw-r--r--kernel/sysctl.c1
-rw-r--r--kernel/time/tick-broadcast.c3
-rw-r--r--kernel/time/timekeeping.c4
-rw-r--r--kernel/trace/trace_functions_graph.c17
-rw-r--r--kernel/watchdog.c1
18 files changed, 195 insertions, 151 deletions
diff --git a/kernel/capability.c b/kernel/capability.c
index 00411c82dac5..4984e1f552eb 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -457,6 +457,19 @@ bool file_ns_capable(const struct file *file, struct user_namespace *ns,
EXPORT_SYMBOL(file_ns_capable);
/**
+ * privileged_wrt_inode_uidgid - Do capabilities in the namespace work over the inode?
+ * @ns: The user namespace in question
+ * @inode: The inode in question
+ *
+ * Return true if the inode uid and gid are within the namespace.
+ */
+bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode)
+{
+ return kuid_has_mapping(ns, inode->i_uid) &&
+ kgid_has_mapping(ns, inode->i_gid);
+}
+
+/**
* capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
* @inode: The inode in question
* @cap: The capability in question
@@ -469,7 +482,26 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
{
struct user_namespace *ns = current_user_ns();
- return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
- kgid_has_mapping(ns, inode->i_gid);
+ return ns_capable(ns, cap) && privileged_wrt_inode_uidgid(ns, inode);
}
EXPORT_SYMBOL(capable_wrt_inode_uidgid);
+
+/**
+ * ptracer_capable - Determine if the ptracer holds CAP_SYS_PTRACE in the namespace
+ * @tsk: The task that may be ptraced
+ * @ns: The user namespace to search for CAP_SYS_PTRACE in
+ *
+ * Return true if the task that is ptracing the current task had CAP_SYS_PTRACE
+ * in the specified user namespace.
+ */
+bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns)
+{
+ int ret = 0; /* An absent tracer adds no restrictions */
+ const struct cred *cred;
+ rcu_read_lock();
+ cred = rcu_dereference(tsk->ptracer_cred);
+ if (cred)
+ ret = security_capable_noaudit(cred, ns, CAP_SYS_PTRACE);
+ rcu_read_unlock();
+ return (ret == 0);
+}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 2432cc630ffb..8b6940755e4a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -232,12 +232,6 @@ static int cpu_notify(unsigned long val, void *v)
return __cpu_notify(val, v, -1, NULL);
}
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void cpu_notify_nofail(unsigned long val, void *v)
-{
- BUG_ON(cpu_notify(val, v));
-}
EXPORT_SYMBOL(register_cpu_notifier);
EXPORT_SYMBOL(__register_cpu_notifier);
@@ -255,6 +249,12 @@ void __unregister_cpu_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(__unregister_cpu_notifier);
+#ifdef CONFIG_HOTPLUG_CPU
+static void cpu_notify_nofail(unsigned long val, void *v)
+{
+ BUG_ON(cpu_notify(val, v));
+}
+
/**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
* @cpu: a CPU id
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 0874e2edd275..79517e5549f1 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -598,11 +598,11 @@ return_normal:
/*
* Wait for the other CPUs to be notified and be waiting for us:
*/
- time_left = loops_per_jiffy * HZ;
+ time_left = MSEC_PER_SEC;
while (kgdb_do_roundup && --time_left &&
(atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
online_cpus)
- cpu_relax();
+ udelay(1000);
if (!time_left)
pr_crit("Timed out waiting for secondary CPUs.\n");
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8d2482d77c04..f9c6f554460e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6224,6 +6224,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
char *buf = NULL;
char *name;
+ if (vma->vm_flags & VM_READ)
+ prot |= PROT_READ;
+ if (vma->vm_flags & VM_WRITE)
+ prot |= PROT_WRITE;
+ if (vma->vm_flags & VM_EXEC)
+ prot |= PROT_EXEC;
+
+ if (vma->vm_flags & VM_MAYSHARE)
+ flags = MAP_SHARED;
+ else
+ flags = MAP_PRIVATE;
+
+ if (vma->vm_flags & VM_DENYWRITE)
+ flags |= MAP_DENYWRITE;
+ if (vma->vm_flags & VM_MAYEXEC)
+ flags |= MAP_EXECUTABLE;
+ if (vma->vm_flags & VM_LOCKED)
+ flags |= MAP_LOCKED;
+ if (vma->vm_flags & VM_HUGETLB)
+ flags |= MAP_HUGETLB;
+
if (file) {
struct inode *inode;
dev_t dev;
@@ -6250,27 +6271,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
maj = MAJOR(dev);
min = MINOR(dev);
- if (vma->vm_flags & VM_READ)
- prot |= PROT_READ;
- if (vma->vm_flags & VM_WRITE)
- prot |= PROT_WRITE;
- if (vma->vm_flags & VM_EXEC)
- prot |= PROT_EXEC;
-
- if (vma->vm_flags & VM_MAYSHARE)
- flags = MAP_SHARED;
- else
- flags = MAP_PRIVATE;
-
- if (vma->vm_flags & VM_DENYWRITE)
- flags |= MAP_DENYWRITE;
- if (vma->vm_flags & VM_MAYEXEC)
- flags |= MAP_EXECUTABLE;
- if (vma->vm_flags & VM_LOCKED)
- flags |= MAP_LOCKED;
- if (vma->vm_flags & VM_HUGETLB)
- flags |= MAP_HUGETLB;
-
goto got_name;
} else {
if (vma->vm_ops && vma->vm_ops->name) {
diff --git a/kernel/fork.c b/kernel/fork.c
index 7de03658692b..75573eeb49b2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -587,7 +587,8 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
#endif
}
-static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
+static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
+ struct user_namespace *user_ns)
{
mm->mmap = NULL;
mm->mm_rb = RB_ROOT;
@@ -627,6 +628,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
if (init_new_context(p, mm))
goto fail_nocontext;
+ mm->user_ns = get_user_ns(user_ns);
return mm;
fail_nocontext:
@@ -672,7 +674,7 @@ struct mm_struct *mm_alloc(void)
return NULL;
memset(mm, 0, sizeof(*mm));
- return mm_init(mm, current);
+ return mm_init(mm, current, current_user_ns());
}
/*
@@ -687,6 +689,7 @@ void __mmdrop(struct mm_struct *mm)
destroy_context(mm);
mmu_notifier_mm_destroy(mm);
check_mm(mm);
+ put_user_ns(mm->user_ns);
free_mm(mm);
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -948,7 +951,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
memcpy(mm, oldmm, sizeof(*mm));
- if (!mm_init(mm, tsk))
+ if (!mm_init(mm, tsk, mm->user_ns))
goto fail_nomem;
err = dup_mmap(mm, oldmm);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 4b353e0be121..453ec4232852 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -138,6 +138,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
}
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
+void static_key_deferred_flush(struct static_key_deferred *key)
+{
+ STATIC_KEY_CHECK_USE();
+ flush_delayed_work(&key->work);
+}
+EXPORT_SYMBOL_GPL(static_key_deferred_flush);
+
void jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 8251e75dd9c0..b066724d7a5b 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
{
- if (!rt_mutex_has_waiters(lock))
- clear_rt_mutex_waiters(lock);
+ unsigned long owner, *p = (unsigned long *) &lock->owner;
+
+ if (rt_mutex_has_waiters(lock))
+ return;
+
+ /*
+ * The rbtree has no waiters enqueued, now make sure that the
+ * lock->owner still has the waiters bit set, otherwise the
+ * following can happen:
+ *
+ * CPU 0 CPU 1 CPU2
+ * l->owner=T1
+ * rt_mutex_lock(l)
+ * lock(l->lock)
+ * l->owner = T1 | HAS_WAITERS;
+ * enqueue(T2)
+ * boost()
+ * unlock(l->lock)
+ * block()
+ *
+ * rt_mutex_lock(l)
+ * lock(l->lock)
+ * l->owner = T1 | HAS_WAITERS;
+ * enqueue(T3)
+ * boost()
+ * unlock(l->lock)
+ * block()
+ * signal(->T2) signal(->T3)
+ * lock(l->lock)
+ * dequeue(T2)
+ * deboost()
+ * unlock(l->lock)
+ * lock(l->lock)
+ * dequeue(T3)
+ * ==> wait list is empty
+ * deboost()
+ * unlock(l->lock)
+ * lock(l->lock)
+ * fixup_rt_mutex_waiters()
+ * if (wait_list_empty(l) {
+ * l->owner = owner
+ * owner = l->owner & ~HAS_WAITERS;
+ * ==> l->owner = T1
+ * }
+ * lock(l->lock)
+ * rt_mutex_unlock(l) fixup_rt_mutex_waiters()
+ * if (wait_list_empty(l) {
+ * owner = l->owner & ~HAS_WAITERS;
+ * cmpxchg(l->owner, T1, NULL)
+ * ===> Success (l->owner = NULL)
+ *
+ * l->owner = owner
+ * ==> l->owner = T1
+ * }
+ *
+ * With the check for the waiter bit in place T3 on CPU2 will not
+ * overwrite. All tasks fiddling with the waiters bit are
+ * serialized by l->lock, so nothing else can modify the waiters
+ * bit. If the bit is set then nothing can change l->owner either
+ * so the simple RMW is safe. The cmpxchg() will simply fail if it
+ * happens in the middle of the RMW because the waiters bit is
+ * still set.
+ */
+ owner = READ_ONCE(*p);
+ if (owner & RT_MUTEX_HAS_WAITERS)
+ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
/*
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 4f5f83c7d2d3..e317e1cbb3eb 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -75,8 +75,9 @@ task_top_pi_waiter(struct task_struct *p)
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
{
- return (struct task_struct *)
- ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
+ unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
+
+ return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
}
/*
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 25ced161ebeb..f719c925cb54 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -159,7 +159,9 @@ static void devm_memremap_pages_release(struct device *dev, void *res)
struct page_map *page_map = res;
/* pages are dead and unused, undo the arch mapping */
+ mem_hotplug_begin();
arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
+ mem_hotplug_done();
}
void *devm_memremap_pages(struct device *dev, struct resource *res)
@@ -189,7 +191,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
if (nid < 0)
nid = numa_mem_id();
+ mem_hotplug_begin();
error = arch_add_memory(nid, res->start, resource_size(res), true);
+ mem_hotplug_done();
if (error) {
devres_free(page_map);
return ERR_PTR(error);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 3189e51db7e8..a46c40bfb5f6 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -39,6 +39,9 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
BUG_ON(!list_empty(&child->ptrace_entry));
list_add(&child->ptrace_entry, &new_parent->ptraced);
child->parent = new_parent;
+ rcu_read_lock();
+ child->ptracer_cred = get_cred(__task_cred(new_parent));
+ rcu_read_unlock();
}
/**
@@ -71,11 +74,15 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
*/
void __ptrace_unlink(struct task_struct *child)
{
+ const struct cred *old_cred;
BUG_ON(!child->ptrace);
child->ptrace = 0;
child->parent = child->real_parent;
list_del_init(&child->ptrace_entry);
+ old_cred = child->ptracer_cred;
+ child->ptracer_cred = NULL;
+ put_cred(old_cred);
spin_lock(&child->sighand->siglock);
@@ -219,7 +226,7 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
{
const struct cred *cred = current_cred(), *tcred;
- int dumpable = 0;
+ struct mm_struct *mm;
kuid_t caller_uid;
kgid_t caller_gid;
@@ -270,16 +277,11 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
return -EPERM;
ok:
rcu_read_unlock();
- smp_rmb();
- if (task->mm)
- dumpable = get_dumpable(task->mm);
- rcu_read_lock();
- if (dumpable != SUID_DUMP_USER &&
- !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
- rcu_read_unlock();
- return -EPERM;
- }
- rcu_read_unlock();
+ mm = task->mm;
+ if (mm &&
+ ((get_dumpable(mm) != SUID_DUMP_USER) &&
+ !ptrace_has_cap(mm->user_ns, mode)))
+ return -EPERM;
return security_ptrace_access_check(task, mode);
}
@@ -343,10 +345,6 @@ static int ptrace_attach(struct task_struct *task, long request,
if (seize)
flags |= PT_SEIZED;
- rcu_read_lock();
- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
- flags |= PT_PTRACE_CAP;
- rcu_read_unlock();
task->ptrace = flags;
__ptrace_link(task, current);
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 1e3accddd103..983159cc0646 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -10,6 +10,8 @@
* GNU General Public License for more details.
*/
+#define pr_fmt(fmt) "core_ctl: " fmt
+
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
@@ -50,7 +52,6 @@ struct cluster_data {
};
struct cpu_data {
- bool online;
bool is_busy;
unsigned int busy;
unsigned int cpu;
@@ -242,22 +243,6 @@ static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
}
-static ssize_t show_cpus(const struct cluster_data *state, char *buf)
-{
- struct cpu_data *c;
- ssize_t count = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&state_lock, flags);
- list_for_each_entry(c, &state->lru, sib) {
- count += snprintf(buf + count, PAGE_SIZE - count,
- "CPU%u (%s)\n", c->cpu,
- c->online ? "Online" : "Offline");
- }
- spin_unlock_irqrestore(&state_lock, flags);
- return count;
-}
-
static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
@@ -286,10 +271,11 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
count += snprintf(buf + count, PAGE_SIZE - count,
"\tCPU: %u\n", c->cpu);
count += snprintf(buf + count, PAGE_SIZE - count,
- "\tOnline: %u\n", c->online);
+ "\tOnline: %u\n",
+ cpu_online(c->cpu));
count += snprintf(buf + count, PAGE_SIZE - count,
- "\tActive: %u\n",
- !cpu_isolated(c->cpu));
+ "\tIsolated: %u\n",
+ cpu_isolated(c->cpu));
count += snprintf(buf + count, PAGE_SIZE - count,
"\tFirst CPU: %u\n",
cluster->first_cpu);
@@ -376,7 +362,6 @@ core_ctl_attr_rw(busy_up_thres);
core_ctl_attr_rw(busy_down_thres);
core_ctl_attr_rw(task_thres);
core_ctl_attr_rw(is_big_cluster);
-core_ctl_attr_ro(cpus);
core_ctl_attr_ro(need_cpus);
core_ctl_attr_ro(active_cpus);
core_ctl_attr_ro(global_state);
@@ -390,7 +375,6 @@ static struct attribute *default_attrs[] = {
&busy_down_thres.attr,
&task_thres.attr,
&is_big_cluster.attr,
- &cpus.attr,
&need_cpus.attr,
&active_cpus.attr,
&global_state.attr,
@@ -534,7 +518,7 @@ static unsigned int get_active_cpu_count(const struct cluster_data *cluster)
static bool is_active(const struct cpu_data *state)
{
- return state->online && !cpu_isolated(state->cpu);
+ return cpu_online(state->cpu) && !cpu_isolated(state->cpu);
}
static bool adjustment_possible(const struct cluster_data *cluster,
@@ -815,7 +799,7 @@ static void __try_to_unisolate(struct cluster_data *cluster,
if (!c->isolated_by_us)
continue;
- if ((c->online && !cpu_isolated(c->cpu)) ||
+ if ((cpu_online(c->cpu) && !cpu_isolated(c->cpu)) ||
(!force && c->not_preferred))
continue;
if (cluster->active_cpus == need)
@@ -904,19 +888,7 @@ static int __ref cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_UP_PREPARE:
-
- /* If online state of CPU somehow got out of sync, fix it. */
- if (state->online) {
- state->online = false;
- cluster->active_cpus = get_active_cpu_count(cluster);
- pr_warn("CPU%d offline when state is online\n", cpu);
- }
- break;
-
case CPU_ONLINE:
-
- state->online = true;
cluster->active_cpus = get_active_cpu_count(cluster);
/*
@@ -941,15 +913,6 @@ static int __ref cpu_callback(struct notifier_block *nfb,
/* Move a CPU to the end of the LRU when it goes offline. */
move_cpu_lru(state);
- /* Fall through */
-
- case CPU_UP_CANCELED:
-
- /* If online state of CPU somehow got out of sync, fix it. */
- if (!state->online)
- pr_warn("CPU%d online when state is offline\n", cpu);
-
- state->online = false;
state->busy = 0;
cluster->active_cpus = get_active_cpu_count(cluster);
break;
@@ -1028,8 +991,6 @@ static int cluster_init(const struct cpumask *mask)
state = &per_cpu(cpu_state, cpu);
state->cluster = cluster;
state->cpu = cpu;
- if (cpu_online(cpu))
- state->online = true;
list_add_tail(&state->sib, &cluster->lru);
}
cluster->active_cpus = get_active_cpu_count(cluster);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cf55fc2663fb..8d5353906c8d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3844,6 +3844,10 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
cfs_rq->load_last_update_time_copy = sa->last_update_time;
#endif
+ /* Trace CPU load, unless cfs_rq belongs to a non-root task_group */
+ if (cfs_rq == &rq_of(cfs_rq)->cfs)
+ trace_sched_load_avg_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
+
return decayed || removed;
}
@@ -3867,7 +3871,6 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
if (entity_is_task(se))
trace_sched_load_avg_task(task_of(se), &se->avg);
- trace_sched_load_avg_cpu(cpu, cfs_rq);
}
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 2ffb1680b380..6e053bd9830c 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -62,8 +62,6 @@ static unsigned int max_possible_freq = 1;
*/
static unsigned int min_max_freq = 1;
-static unsigned int max_capacity = 1024;
-static unsigned int min_capacity = 1024;
static unsigned int max_load_scale_factor = 1024;
static unsigned int max_possible_capacity = 1024;
@@ -869,39 +867,6 @@ void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
double_rq_unlock(src_rq, dest_rq);
}
-/* Keep track of max/min capacity possible across CPUs "currently" */
-static void __update_min_max_capacity(void)
-{
- int i;
- int max = 0, min = INT_MAX;
-
- for_each_online_cpu(i) {
- if (cpu_rq(i)->capacity > max)
- max = cpu_rq(i)->capacity;
- if (cpu_rq(i)->capacity < min)
- min = cpu_rq(i)->capacity;
- }
-
- max_capacity = max;
- min_capacity = min;
-}
-
-static void update_min_max_capacity(void)
-{
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for_each_possible_cpu(i)
- raw_spin_lock(&cpu_rq(i)->lock);
-
- __update_min_max_capacity();
-
- for_each_possible_cpu(i)
- raw_spin_unlock(&cpu_rq(i)->lock);
- local_irq_restore(flags);
-}
-
/*
* Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
* least efficient cpu gets capacity of 1024
@@ -984,15 +949,9 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
/* Initialized to policy->max in case policy->related_cpus is empty! */
unsigned int orig_max_freq = policy->max;
- if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
- val != CPUFREQ_CREATE_POLICY)
+ if (val != CPUFREQ_NOTIFY)
return 0;
- if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
- update_min_max_capacity();
- return 0;
- }
-
for_each_cpu(i, policy->related_cpus) {
cpumask_copy(&cpu_rq(i)->freq_domain_cpumask,
policy->related_cpus);
@@ -1082,8 +1041,6 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
max_load_scale_factor = highest_mplsf;
}
- __update_min_max_capacity();
-
return 0;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 51eef8e7df39..8cc5167e4b04 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2734,6 +2734,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
break;
if (neg)
continue;
+ val = convmul * val / convdiv;
if ((min && val < *min) || (max && val > *max))
continue;
*i = val;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f6aae7977824..d2a20e83ebae 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -871,6 +871,9 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
int cpu = smp_processor_id();
+ if (!bc)
+ return;
+
/* Set it up only once ! */
if (bc->event_handler != tick_handle_oneshot_broadcast) {
int was_periodic = clockevent_state_periodic(bc);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index ede4bf13d3e9..5fa544f3f560 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -298,10 +298,10 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
static inline u32 arch_gettimeoffset(void) { return 0; }
#endif
-static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
+static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
cycle_t delta)
{
- s64 nsec;
+ u64 nsec;
nsec = delta * tkr->mult + tkr->xtime_nsec;
nsec >>= tkr->shift;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4641bdb40f8f..96c75b0e9831 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -785,6 +785,10 @@ print_graph_entry_leaf(struct trace_iterator *iter,
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
+ /* If a graph tracer ignored set_graph_notrace */
+ if (call->depth < -1)
+ call->depth += FTRACE_NOTRACE_DEPTH;
+
/*
* Comments display at + 1 to depth. Since
* this is a leaf function, keep the comments
@@ -793,7 +797,8 @@ print_graph_entry_leaf(struct trace_iterator *iter,
cpu_data->depth = call->depth - 1;
/* No need to keep this function around for this depth */
- if (call->depth < FTRACE_RETFUNC_DEPTH)
+ if (call->depth < FTRACE_RETFUNC_DEPTH &&
+ !WARN_ON_ONCE(call->depth < 0))
cpu_data->enter_funcs[call->depth] = 0;
}
@@ -823,11 +828,16 @@ print_graph_entry_nested(struct trace_iterator *iter,
struct fgraph_cpu_data *cpu_data;
int cpu = iter->cpu;
+ /* If a graph tracer ignored set_graph_notrace */
+ if (call->depth < -1)
+ call->depth += FTRACE_NOTRACE_DEPTH;
+
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
cpu_data->depth = call->depth;
/* Save this function pointer to see if the exit matches */
- if (call->depth < FTRACE_RETFUNC_DEPTH)
+ if (call->depth < FTRACE_RETFUNC_DEPTH &&
+ !WARN_ON_ONCE(call->depth < 0))
cpu_data->enter_funcs[call->depth] = call->func;
}
@@ -1057,7 +1067,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
*/
cpu_data->depth = trace->depth - 1;
- if (trace->depth < FTRACE_RETFUNC_DEPTH) {
+ if (trace->depth < FTRACE_RETFUNC_DEPTH &&
+ !WARN_ON_ONCE(trace->depth < 0)) {
if (cpu_data->enter_funcs[trace->depth] != trace->func)
func_match = 0;
cpu_data->enter_funcs[trace->depth] = 0;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 91fa701b4a24..1de2ef8ec926 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -427,7 +427,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
*/
if (is_hardlockup()) {
int this_cpu = smp_processor_id();
- struct pt_regs *regs = get_irq_regs();
/* only print hardlockups once */
if (__this_cpu_read(hard_watchdog_warn) == true)