diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/auditfilter.c | 2 | ||||
| -rw-r--r-- | kernel/auditsc.c | 2 | ||||
| -rw-r--r-- | kernel/bpf/verifier.c | 4 | ||||
| -rw-r--r-- | kernel/cgroup.c | 6 | ||||
| -rw-r--r-- | kernel/exit.c | 4 | ||||
| -rw-r--r-- | kernel/fork.c | 6 | ||||
| -rw-r--r-- | kernel/power/user.c | 5 | ||||
| -rw-r--r-- | kernel/power/wakeup_reason.c | 62 | ||||
| -rw-r--r-- | kernel/sched/core.c | 4 | ||||
| -rw-r--r-- | kernel/sched/cputime.c | 5 | ||||
| -rw-r--r-- | kernel/sched/hmp.c | 11 | ||||
| -rw-r--r-- | kernel/seccomp.c | 21 | ||||
| -rw-r--r-- | kernel/sys.c | 21 | ||||
| -rw-r--r-- | kernel/trace/trace_events_trigger.c | 18 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 15 |
15 files changed, 149 insertions, 37 deletions
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index b8ff9e193753..b57f929f1b46 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -406,7 +406,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f) return -EINVAL; break; case AUDIT_EXE: - if (f->op != Audit_equal) + if (f->op != Audit_not_equal && f->op != Audit_equal) return -EINVAL; if (entry->rule.listnr != AUDIT_FILTER_EXIT) return -EINVAL; diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 34a57d57bcb1..b7aac4c5c9a1 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -470,6 +470,8 @@ static int audit_filter_rules(struct task_struct *tsk, break; case AUDIT_EXE: result = audit_exe_compare(tsk, rule->exe); + if (f->op == Audit_not_equal) + result = !result; break; case AUDIT_UID: result = audit_uid_comparator(cred->uid, f->op, f->uid); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 79e3c21a35d0..35dfa9e9d69e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2101,7 +2101,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded - * and all maps are released in free_bpf_prog_info() + * and all maps are released in free_used_maps() */ map = bpf_map_inc(map, false); if (IS_ERR(map)) { @@ -2487,7 +2487,7 @@ free_log_buf: vfree(log_buf); if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release - * them now. Otherwise free_bpf_prog_info() will release them. + * them now. Otherwise free_used_maps() will release them. */ release_maps(env); *prog = env->prog; diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3fdb7545852e..cd3d81961cc2 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -4092,7 +4092,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) */ do { css_task_iter_start(&from->self, &it); - task = css_task_iter_next(&it); + + do { + task = css_task_iter_next(&it); + } while (task && (task->flags & PF_EXITING)); + if (task) get_task_struct(task); css_task_iter_end(&it); diff --git a/kernel/exit.c b/kernel/exit.c index f75f7cef0760..fc82e495b729 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -54,7 +54,6 @@ #include <linux/writeback.h> #include <linux/shm.h> #include <linux/kcov.h> -#include <linux/cpufreq_times.h> #include "sched/tune.h" @@ -174,9 +173,6 @@ void release_task(struct task_struct *p) { struct task_struct *leader; int zap_leader; -#ifdef CONFIG_CPU_FREQ_TIMES - cpufreq_task_times_exit(p); -#endif repeat: /* don't need to get the RCU readlock here - the process is dead and * can't be modifying its own credentials. But shut RCU-lockdep up */ diff --git a/kernel/fork.c b/kernel/fork.c index 4251e3806640..caa23ca489bb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -78,6 +78,7 @@ #include <linux/compiler.h> #include <linux/sysctl.h> #include <linux/kcov.h> +#include <linux/cpufreq_times.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -228,6 +229,7 @@ static void account_kernel_stack(unsigned long *stack, int account) void free_task(struct task_struct *tsk) { + cpufreq_task_times_exit(tsk); account_kernel_stack(tsk->stack, -1); arch_release_thread_stack(tsk->stack); free_thread_stack(tsk->stack); @@ -1366,6 +1368,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, if (!p) goto fork_out; + cpufreq_task_times_init(p); + ftrace_graph_init_task(p); rt_mutex_init_task(p); @@ -1798,6 +1802,8 @@ long _do_fork(unsigned long clone_flags, struct completion vfork; struct pid *pid; + cpufreq_task_times_alloc(p); + trace_sched_process_fork(current, p); pid = get_task_pid(p, PIDTYPE_PID); diff --git a/kernel/power/user.c b/kernel/power/user.c index 35310b627388..bc6dde1f1567 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -186,6 +186,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf, res = PAGE_SIZE - pg_offp; } + if (!data_of(data->handle)) { + res = -EINVAL; + goto unlock; + } + res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp, buf, count); if (res > 0) diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c index 252611fad2fe..54697e28ba38 100644 --- a/kernel/power/wakeup_reason.c +++ b/kernel/power/wakeup_reason.c @@ -3,8 +3,14 @@ * * Logs the reasons which caused the kernel to resume from * the suspend mode. + * Sends uevent to user space when enter or out of suspend, + * the modules of user space can use it to do some necessary + * operation. for example, sending a special signal to modem + * or controling the brightness of a lamp before or after suspend. * * Copyright (C) 2014 Google, Inc. + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. @@ -26,7 +32,8 @@ #include <linux/spinlock.h> #include <linux/notifier.h> #include <linux/suspend.h> - +#include <linux/kobject.h> +#include <linux/suspend.h> #define MAX_WAKEUP_REASON_IRQS 32 static int irq_list[MAX_WAKEUP_REASON_IRQS]; @@ -41,6 +48,9 @@ static ktime_t curr_monotime; /* monotonic time after last suspend */ static ktime_t last_stime; /* monotonic boottime offset before last suspend */ static ktime_t curr_stime; /* monotonic boottime offset after last suspend */ +static struct class *wake_uevent_class; +static struct device *wake_uevent_device; + static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -168,12 +178,22 @@ void log_suspend_abort_reason(const char *fmt, ...) static int wakeup_reason_pm_event(struct notifier_block *notifier, unsigned long pm_event, void *unused) { + int ret = 0; + static char envp[32] = {0}; + static const char * const evp[] = {envp, NULL}; + switch (pm_event) { case PM_SUSPEND_PREPARE: spin_lock(&resume_reason_lock); irqcount = 0; suspend_abort = false; spin_unlock(&resume_reason_lock); + /* send the uevent to userspace */ + snprintf(envp, 32, "STATE=%s", "suspend start"); + ret = kobject_uevent_env(&wake_uevent_device->kobj, + KOBJ_CHANGE, (char **)evp); + if (ret) + pr_warn("Send uevent failed"); /* monotonic time since boot */ last_monotime = ktime_get(); /* monotonic time since boot including the time spent in suspend */ @@ -184,6 +204,12 @@ static int wakeup_reason_pm_event(struct notifier_block *notifier, curr_monotime = ktime_get(); /* monotonic time since boot including the time spent in suspend */ curr_stime = ktime_get_boottime(); + /* send the uevent to userspace */ + snprintf(envp, 32, "STATE=%s", "resume complete"); + ret = kobject_uevent_env(&wake_uevent_device->kobj, + KOBJ_CHANGE, (char **)evp); + if (ret) + pr_warn("Send uevent failed"); break; default: break; @@ -195,12 +221,18 @@ static struct notifier_block wakeup_reason_pm_notifier_block = { .notifier_call = wakeup_reason_pm_event, }; +static const struct file_operations wakeup_uevent = { + .owner = THIS_MODULE, +}; + /* Initializes the sysfs parameter * registers the pm_event notifier + * register the wake_uevent device */ int __init wakeup_reason_init(void) { int retval; + int major; retval = register_pm_notifier(&wakeup_reason_pm_notifier_block); if (retval) @@ -218,8 +250,36 @@ int __init wakeup_reason_init(void) kobject_put(wakeup_reason); printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n", __func__, retval); + return retval; + } + major = register_chrdev(0, "wake_uevent", &wakeup_uevent); + if (major < 0) { + sysfs_remove_group(wakeup_reason, &attr_group); + kobject_put(wakeup_reason); + return major; + } + wake_uevent_class = class_create(THIS_MODULE, "wake_uevent"); + if (IS_ERR(wake_uevent_class)) { + retval = PTR_ERR(wake_uevent_class); + goto fail_class; } + wake_uevent_device = device_create(wake_uevent_class, NULL, + MKDEV(major, 0), + NULL, "wake_uevent"); + if (IS_ERR(wake_uevent_device)) { + retval = PTR_ERR(wake_uevent_device); + goto fail_device; + } + return 0; + +fail_device: + class_destroy(wake_uevent_class); +fail_class: + unregister_chrdev(major, "wake_uevent"); + sysfs_remove_group(wakeup_reason, &attr_group); + kobject_put(wakeup_reason); + return retval; } late_initcall(wakeup_reason_init); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c1ecb07de762..6a5671bdb792 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2366,10 +2366,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) memset(&p->se.statistics, 0, sizeof(p->se.statistics)); #endif -#ifdef CONFIG_CPU_FREQ_TIMES - cpufreq_task_times_init(p); -#endif - RB_CLEAR_NODE(&p->dl.rb_node); init_dl_task_timer(&p->dl); __dl_clear_params(p); diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index bd4ef2bb551e..e6ec68c15aa3 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -162,10 +162,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime, /* Account for user time used */ acct_account_cputime(p); -#ifdef CONFIG_CPU_FREQ_TIMES /* Account power usage for user time */ cpufreq_acct_update_power(p, cputime); -#endif } /* @@ -216,10 +214,9 @@ void __account_system_time(struct task_struct *p, cputime_t cputime, /* Account for system time used */ acct_account_cputime(p); -#ifdef CONFIG_CPU_FREQ_TIMES + /* Account power usage for system time */ cpufreq_acct_update_power(p, cputime); -#endif } /* diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index d9f0669ff683..ddcf7cfb7248 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -2081,14 +2081,11 @@ static u32 top_task_load(struct rq *rq) } } -static int load_to_index(u32 load) +static u32 load_to_index(u32 load) { - if (load < sched_load_granule) - return 0; - else if (load >= sched_ravg_window) - return NUM_LOAD_INDICES - 1; - else - return load / sched_load_granule; + u32 index = load / sched_load_granule; + + return min(index, (u32)(NUM_LOAD_INDICES - 1)); } static void update_top_tasks(struct task_struct *p, struct rq *rq, diff --git a/kernel/seccomp.c b/kernel/seccomp.c index efd384f3f852..9a9203b15cde 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -16,6 +16,8 @@ #include <linux/atomic.h> #include <linux/audit.h> #include <linux/compat.h> +#include <linux/nospec.h> +#include <linux/prctl.h> #include <linux/sched.h> #include <linux/seccomp.h> #include <linux/slab.h> @@ -214,8 +216,11 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode) return true; } +void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { } + static inline void seccomp_assign_mode(struct task_struct *task, - unsigned long seccomp_mode) + unsigned long seccomp_mode, + unsigned long flags) { assert_spin_locked(&task->sighand->siglock); @@ -225,6 +230,9 @@ static inline void seccomp_assign_mode(struct task_struct *task, * filter) is set. */ smp_mb__before_atomic(); + /* Assume default seccomp processes want spec flaw mitigation. */ + if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0) + arch_seccomp_spec_mitigate(task); set_tsk_thread_flag(task, TIF_SECCOMP); } @@ -292,7 +300,7 @@ static inline pid_t seccomp_can_sync_threads(void) * without dropping the locks. * */ -static inline void seccomp_sync_threads(void) +static inline void seccomp_sync_threads(unsigned long flags) { struct task_struct *thread, *caller; @@ -333,7 +341,8 @@ static inline void seccomp_sync_threads(void) * allow one thread to transition the other. */ if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) - seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); + seccomp_assign_mode(thread, SECCOMP_MODE_FILTER, + flags); } } @@ -452,7 +461,7 @@ static long seccomp_attach_filter(unsigned int flags, /* Now that the new filter is in place, synchronize to all threads. */ if (flags & SECCOMP_FILTER_FLAG_TSYNC) - seccomp_sync_threads(); + seccomp_sync_threads(flags); return 0; } @@ -747,7 +756,7 @@ static long seccomp_set_mode_strict(void) #ifdef TIF_NOTSC disable_TSC(); #endif - seccomp_assign_mode(current, seccomp_mode); + seccomp_assign_mode(current, seccomp_mode, 0); ret = 0; out: @@ -805,7 +814,7 @@ static long seccomp_set_mode_filter(unsigned int flags, /* Do not free the successfully attached filter. */ prepared = NULL; - seccomp_assign_mode(current, seccomp_mode); + seccomp_assign_mode(current, seccomp_mode, flags); out: spin_unlock_irq(¤t->sighand->siglock); if (flags & SECCOMP_FILTER_FLAG_TSYNC) diff --git a/kernel/sys.c b/kernel/sys.c index cf40663a54c2..0df4753d4969 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2224,6 +2224,17 @@ static int prctl_set_vma(unsigned long opt, unsigned long start, } #endif +int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which) +{ + return -EINVAL; +} + +int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which, + unsigned long ctrl) +{ + return -EINVAL; +} + SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5) { @@ -2445,6 +2456,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, case PR_SET_VMA: error = prctl_set_vma(arg2, arg3, arg4, arg5); break; + case PR_GET_SPECULATION_CTRL: + if (arg3 || arg4 || arg5) + return -EINVAL; + error = arch_prctl_spec_ctrl_get(me, arg2); + break; + case PR_SET_SPECULATION_CTRL: + if (arg4 || arg5) + return -EINVAL; + error = arch_prctl_spec_ctrl_set(me, arg2, arg3); + break; default: error = -EINVAL; break; diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 2172dd61577e..b8a894adab2c 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -663,6 +663,8 @@ event_trigger_callback(struct event_command *cmd_ops, goto out_free; out_reg: + /* Up the trigger_data count to make sure reg doesn't free it on failure */ + event_trigger_init(trigger_ops, trigger_data); ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); /* * The above returns on success the # of functions enabled, @@ -670,11 +672,13 @@ event_trigger_callback(struct event_command *cmd_ops, * Consider no functions a failure too. */ if (!ret) { + cmd_ops->unreg(glob, trigger_ops, trigger_data, file); ret = -ENOENT; - goto out_free; - } else if (ret < 0) - goto out_free; - ret = 0; + } else if (ret > 0) + ret = 0; + + /* Down the counter of trigger_data or free it if not used anymore */ + event_trigger_free(trigger_ops, trigger_data); out: return ret; @@ -1227,6 +1231,9 @@ event_enable_trigger_func(struct event_command *cmd_ops, goto out; } + /* Up the trigger_data count to make sure nothing frees it on failure */ + event_trigger_init(trigger_ops, trigger_data); + if (trigger) { number = strsep(&trigger, ":"); @@ -1277,6 +1284,7 @@ event_enable_trigger_func(struct event_command *cmd_ops, goto out_disable; /* Just return zero, not the number of enabled functions */ ret = 0; + event_trigger_free(trigger_ops, trigger_data); out: return ret; @@ -1287,7 +1295,7 @@ event_enable_trigger_func(struct event_command *cmd_ops, out_free: if (cmd_ops->set_filter) cmd_ops->set_filter(NULL, trigger_data, NULL); - kfree(trigger_data); + event_trigger_free(trigger_ops, trigger_data); kfree(enable_data); goto out; } diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f2682799c215..f0ee722be520 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -349,11 +349,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, static int enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) { + struct event_file_link *link = NULL; int ret = 0; if (file) { - struct event_file_link *link; - link = kmalloc(sizeof(*link), GFP_KERNEL); if (!link) { ret = -ENOMEM; @@ -373,6 +372,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) else ret = enable_kprobe(&tk->rp.kp); } + + if (ret) { + if (file) { + /* Notice the if is true on not WARN() */ + if (!WARN_ON_ONCE(!link)) + list_del_rcu(&link->list); + kfree(link); + tk->tp.flags &= ~TP_FLAG_TRACE; + } else { + tk->tp.flags &= ~TP_FLAG_PROFILE; + } + } out: return ret; } |
