summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/elfcore.c1
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/kprobes.c3
-rw-r--r--kernel/locking/lockdep.c3
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/fair.c5
-rw-r--r--kernel/time/alarmtimer.c8
-rw-r--r--kernel/trace/trace.c18
10 files changed, 33 insertions, 16 deletions
diff --git a/kernel/elfcore.c b/kernel/elfcore.c
index e556751d15d9..a2b29b9bdfcb 100644
--- a/kernel/elfcore.c
+++ b/kernel/elfcore.c
@@ -2,6 +2,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/binfmts.h>
+#include <linux/elfcore.h>
Elf_Half __weak elf_core_extra_phdrs(void)
{
diff --git a/kernel/fork.c b/kernel/fork.c
index b3899603f88e..58514ce46152 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2190,7 +2190,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
struct ctl_table t;
int ret;
int threads = max_threads;
- int min = MIN_THREADS;
+ int min = 1;
int max = MAX_THREADS;
t = *table;
@@ -2202,7 +2202,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
if (ret || !write)
return ret;
- set_max_threads(threads);
+ max_threads = threads;
return 0;
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index a53998cba804..fdde50d39a46 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1454,7 +1454,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
/* Ensure it is not in reserved area nor out of text */
if (!kernel_text_address((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
- jump_label_text_reserved(p->addr, p->addr)) {
+ jump_label_text_reserved(p->addr, p->addr) ||
+ find_bug((unsigned long)p->addr)) {
ret = -EINVAL;
goto out;
}
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index f2df5f86af28..a419696709a1 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3314,6 +3314,9 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
unsigned int depth;
int i;
+ if (unlikely(!debug_locks))
+ return 0;
+
depth = curr->lockdep_depth;
/*
* This function is about (re)setting the class of a held lock,
diff --git a/kernel/panic.c b/kernel/panic.c
index 75f564a94a82..aceb10bcfc2f 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -90,6 +90,7 @@ void panic(const char *fmt, ...)
* after the panic_lock is acquired) from invoking panic again.
*/
local_irq_disable();
+ preempt_disable_notrace();
/*
* It's possible to come here directly from a panic-assertion and
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index eb6a190c7a3a..2deedfc6db54 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3066,7 +3066,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
seq = dumper->cur_seq;
idx = dumper->cur_idx;
prev = 0;
- while (l > size && seq < dumper->next_seq) {
+ while (l >= size && seq < dumper->next_seq) {
struct printk_log *msg = log_from_idx(idx);
l -= msg_print_text(msg, prev, true, NULL, 0);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 543f7113b1d2..f6f8bb2f0d95 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9339,10 +9339,6 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL;
-#else
- /* We don't support RT-tasks being in separate groups */
- if (task->sched_class != &fair_sched_class)
- return -EINVAL;
#endif
/*
* Serialize against wake_up_new_task() such that if its
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e497ef064ab0..f01eb276835d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10729,9 +10729,10 @@ no_move:
out_balanced:
/*
* We reach balance although we may have faced some affinity
- * constraints. Clear the imbalance flag if it was set.
+ * constraints. Clear the imbalance flag only if other tasks got
+ * a chance to move and fix the imbalance.
*/
- if (sd_parent) {
+ if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
int *group_imbalance = &sd_parent->groups->sgc->imbalance;
if (*group_imbalance)
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 4ac0a040e4ef..4171fee2d4ec 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -640,7 +640,7 @@ static int alarm_timer_create(struct k_itimer *new_timer)
struct alarm_base *base;
if (!alarmtimer_get_rtcdev())
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!capable(CAP_WAKE_ALARM))
return -EPERM;
@@ -683,7 +683,7 @@ static void alarm_timer_get(struct k_itimer *timr,
static int alarm_timer_del(struct k_itimer *timr)
{
if (!rtcdev)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (alarm_try_to_cancel(&timr->it.alarm.alarmtimer) < 0)
return TIMER_RETRY;
@@ -707,7 +707,7 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
ktime_t exp;
if (!rtcdev)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (flags & ~TIMER_ABSTIME)
return -EINVAL;
@@ -869,7 +869,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
struct restart_block *restart;
if (!alarmtimer_get_rtcdev())
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (flags & ~TIMER_ABSTIME)
return -EINVAL;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 12f8bf9bf85d..4509cf51fbfd 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3448,9 +3448,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
if (tracing_disabled)
return -ENODEV;
+ if (trace_array_get(tr) < 0)
+ return -ENODEV;
+
ret = seq_open(file, &show_traces_seq_ops);
- if (ret)
+ if (ret) {
+ trace_array_put(tr);
return ret;
+ }
m = file->private_data;
m->private = tr;
@@ -3458,6 +3463,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
return 0;
}
+static int show_traces_release(struct inode *inode, struct file *file)
+{
+ struct trace_array *tr = inode->i_private;
+
+ trace_array_put(tr);
+ return seq_release(inode, file);
+}
+
static ssize_t
tracing_write_stub(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
@@ -3488,8 +3501,8 @@ static const struct file_operations tracing_fops = {
static const struct file_operations show_traces_fops = {
.open = show_traces_open,
.read = seq_read,
- .release = seq_release,
.llseek = seq_lseek,
+ .release = show_traces_release,
};
static ssize_t
@@ -4948,6 +4961,7 @@ waitagain:
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
cpumask_clear(iter->started);
+ trace_seq_init(&iter->seq);
iter->pos = -1;
trace_event_read_lock();