diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpu_pm.c | 4 | ||||
| -rw-r--r-- | kernel/debug/debug_core.c | 5 | ||||
| -rw-r--r-- | kernel/events/uprobes.c | 14 | ||||
| -rw-r--r-- | kernel/kprobes.c | 27 | ||||
| -rw-r--r-- | kernel/sched/core.c | 8 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 12 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 1 | ||||
| -rw-r--r-- | kernel/sysctl.c | 2 | ||||
| -rw-r--r-- | kernel/trace/blktrace.c | 13 | ||||
| -rw-r--r-- | kernel/trace/trace_events_trigger.c | 21 |
10 files changed, 92 insertions, 15 deletions
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c index 774bfe7a2893..38dcb7c1c02c 100644 --- a/kernel/cpu_pm.c +++ b/kernel/cpu_pm.c @@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); */ int cpu_pm_enter(void) { - int nr_calls; + int nr_calls = 0; int ret = 0; read_lock(&cpu_pm_notifier_lock); @@ -159,7 +159,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit); */ int cpu_cluster_pm_enter(unsigned long aff_level) { - int nr_calls; + int nr_calls = 0; int ret = 0; read_lock(&cpu_pm_notifier_lock); diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 79517e5549f1..321ccdbb7364 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -443,6 +443,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) if (exception_level > 1) { dump_stack(); + kgdb_io_module_registered = false; panic("Recursive entry to debugger"); } @@ -487,6 +488,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, arch_kgdb_ops.disable_hw_break(regs); acquirelock: + rcu_read_lock(); /* * Interrupts will be restored by the 'trap return' code, except when * single stepping. @@ -541,6 +543,7 @@ return_normal: atomic_dec(&slaves_in_kgdb); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); return 0; } cpu_relax(); @@ -559,6 +562,7 @@ return_normal: raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); goto acquirelock; } @@ -676,6 +680,7 @@ kgdb_restore: raw_spin_unlock(&dbg_master_lock); dbg_touch_watchdogs(); local_irq_restore(flags); + rcu_read_unlock(); return kgdb_info[cpu].ret_state; } diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 2a9154b727da..f206a3bc431e 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -602,10 +602,6 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file, if (ret) goto out; - /* uprobe_write_opcode() assumes we don't cross page boundary */ - BUG_ON((uprobe->offset & ~PAGE_MASK) + - UPROBE_SWBP_INSN_SIZE > PAGE_SIZE); - smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */ set_bit(UPROBE_COPY_INSN, &uprobe->flags); @@ -884,6 +880,13 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * if (offset > i_size_read(inode)) return -EINVAL; + /* + * This ensures that copy_from_page() and copy_to_page() + * can't cross page boundary. + */ + if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE)) + return -EINVAL; + retry: uprobe = alloc_uprobe(inode, offset); if (!uprobe) @@ -1692,6 +1695,9 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) uprobe_opcode_t opcode; int result; + if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE))) + return -EINVAL; + pagefault_disable(); result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr); pagefault_enable(); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index f59f49bc2a5d..5bda113a3116 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -561,11 +561,12 @@ static void kprobe_optimizer(struct work_struct *work) do_free_cleaned_kprobes(); mutex_unlock(&module_mutex); - mutex_unlock(&kprobe_mutex); /* Step 5: Kick optimizer again if needed */ if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) kick_kprobe_optimizer(); + + mutex_unlock(&kprobe_mutex); } /* Wait for completing optimization and unoptimization */ @@ -1149,6 +1150,26 @@ __releases(hlist_lock) } NOKPROBE_SYMBOL(kretprobe_table_unlock); +struct kprobe kprobe_busy = { + .addr = (void *) get_kprobe, +}; + +void kprobe_busy_begin(void) +{ + struct kprobe_ctlblk *kcb; + + preempt_disable(); + __this_cpu_write(current_kprobe, &kprobe_busy); + kcb = get_kprobe_ctlblk(); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; +} + +void kprobe_busy_end(void) +{ + __this_cpu_write(current_kprobe, NULL); + preempt_enable(); +} + /* * This function is called from finish_task_switch when task tk becomes dead, * so that we can recycle any function-return probe instances associated @@ -1166,6 +1187,8 @@ void kprobe_flush_task(struct task_struct *tk) /* Early boot. kretprobe_table_locks not yet initialized. */ return; + kprobe_busy_begin(); + INIT_HLIST_HEAD(&empty_rp); hash = hash_ptr(tk, KPROBE_HASH_BITS); head = &kretprobe_inst_table[hash]; @@ -1179,6 +1202,8 @@ void kprobe_flush_task(struct task_struct *tk) hlist_del(&ri->hlist); kfree(ri); } + + kprobe_busy_end(); } NOKPROBE_SYMBOL(kprobe_flush_task); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 72e1ffe809f0..473293dd40a3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3825,7 +3825,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) if (dl_prio(prio)) { struct task_struct *pi_task = rt_mutex_get_top_task(p); if (!dl_prio(p->normal_prio) || - (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { + (pi_task && dl_prio(pi_task->prio) && + dl_entity_preempt(&pi_task->dl, &p->dl))) { p->dl.dl_boosted = 1; queue_flag |= ENQUEUE_REPLENISH; } else @@ -9258,8 +9259,9 @@ int sched_rr_handler(struct ctl_table *table, int write, /* make sure that internally we keep jiffies */ /* also, writing zero resets timeslice to default */ if (!ret && write) { - sched_rr_timeslice = sched_rr_timeslice <= 0 ? - RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); + sched_rr_timeslice = + sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : + msecs_to_jiffies(sysctl_sched_rr_timeslice); } mutex_unlock(&mutex); return ret; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3f2b5c04623c..29d80146eb8a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2504,7 +2504,7 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr) /* * We don't care about NUMA placement if we don't have memory. */ - if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) + if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) return; /* @@ -8910,7 +8910,15 @@ redo: if (!can_migrate_task(p, env)) goto next; - load = task_h_load(p); + /* + * Depending of the number of CPUs and tasks and the + * cgroup hierarchy, task_h_load() can return a null + * value. Make sure that env->imbalance decreases + * otherwise detach_tasks() will stop only after + * detaching up to loop_max tasks. + */ + load = max_t(unsigned long, task_h_load(p), 1); + if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) goto next; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index af21389466b8..391ec29c71c0 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -14,6 +14,7 @@ #include "tune.h" int sched_rr_timeslice = RR_TIMESLICE; +int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ac0569f7d56a..fa543fd3a6db 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -663,7 +663,7 @@ static struct ctl_table kern_table[] = { }, { .procname = "sched_rr_timeslice_ms", - .data = &sched_rr_timeslice, + .data = &sysctl_sched_rr_timeslice, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sched_rr_handler, diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 54be8790941e..267def89a2db 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -15,6 +15,9 @@ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/blktrace_api.h> @@ -481,6 +484,16 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, */ strreplace(buts->name, '/', '_'); + /* + * bdev can be NULL, as with scsi-generic, this is a helpful as + * we can be. + */ + if (q->blk_trace) { + pr_warn("Concurrent blktraces are not allowed on %s\n", + buts->name); + return -EBUSY; + } + bt = kzalloc(sizeof(*bt), GFP_KERNEL); if (!bt) return -ENOMEM; diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 78346aba6980..94fca4d687ad 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -204,11 +204,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file) static int trigger_process_regex(struct trace_event_file *file, char *buff) { - char *command, *next = buff; + char *command, *next; struct event_command *p; int ret = -EINVAL; + next = buff = skip_spaces(buff); command = strsep(&next, ": \t"); + if (next) { + next = skip_spaces(next); + if (!*next) + next = NULL; + } command = (command[0] != '!') ? command : command + 1; mutex_lock(&trigger_cmd_mutex); @@ -615,8 +621,14 @@ event_trigger_callback(struct event_command *cmd_ops, int ret; /* separate the trigger from the filter (t:n [if filter]) */ - if (param && isdigit(param[0])) + if (param && isdigit(param[0])) { trigger = strsep(¶m, " \t"); + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } + } trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger); @@ -1185,6 +1197,11 @@ event_enable_trigger_func(struct event_command *cmd_ops, trigger = strsep(¶m, " \t"); if (!trigger) return -EINVAL; + if (param) { + param = skip_spaces(param); + if (!*param) + param = NULL; + } system = strsep(&trigger, ":"); if (!trigger) |
