diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpuset.c | 1 | ||||
| -rw-r--r-- | kernel/fork.c | 53 | ||||
| -rw-r--r-- | kernel/gcov/base.c | 6 | ||||
| -rw-r--r-- | kernel/gcov/gcc_4_7.c | 4 | ||||
| -rw-r--r-- | kernel/locking/locktorture.c | 4 | ||||
| -rw-r--r-- | kernel/sched/core.c | 8 | ||||
| -rw-r--r-- | kernel/sched/cpufreq_sched.c | 14 | ||||
| -rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 62 | ||||
| -rw-r--r-- | kernel/sched/deadline.c | 2 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 44 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 19 | ||||
| -rw-r--r-- | kernel/sched/walt.c | 8 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 10 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_selftest.c | 2 |
15 files changed, 175 insertions, 64 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 1656a48d5bee..f92ec9a71af3 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1916,6 +1916,7 @@ static struct cftype files[] = { { .name = "memory_pressure", .read_u64 = cpuset_read_u64, + .private = FILE_MEMORY_PRESSURE, }, { diff --git a/kernel/fork.c b/kernel/fork.c index 07cd0d68ee02..1d168ba55118 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -178,13 +178,13 @@ static inline void free_thread_stack(unsigned long *stack) # else static struct kmem_cache *thread_stack_cache; -static struct thread_info *alloc_thread_stack_node(struct task_struct *tsk, +static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) { return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); } -static void free_stack(unsigned long *stack) +static void free_thread_stack(unsigned long *stack) { kmem_cache_free(thread_stack_cache, stack); } @@ -697,6 +697,26 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); +static inline void __mmput(struct mm_struct *mm) +{ + VM_BUG_ON(atomic_read(&mm->mm_users)); + + uprobe_clear_state(mm); + exit_aio(mm); + ksm_exit(mm); + khugepaged_exit(mm); /* must run before exit_mmap */ + exit_mmap(mm); + set_mm_exe_file(mm, NULL); + if (!list_empty(&mm->mmlist)) { + spin_lock(&mmlist_lock); + list_del(&mm->mmlist); + spin_unlock(&mmlist_lock); + } + if (mm->binfmt) + module_put(mm->binfmt->module); + mmdrop(mm); +} + /* * Decrement the use count and release all resources for an mm. */ @@ -706,26 +726,27 @@ int mmput(struct mm_struct *mm) might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) { - uprobe_clear_state(mm); - exit_aio(mm); - ksm_exit(mm); - khugepaged_exit(mm); /* must run before exit_mmap */ - exit_mmap(mm); - set_mm_exe_file(mm, NULL); - if (!list_empty(&mm->mmlist)) { - spin_lock(&mmlist_lock); - list_del(&mm->mmlist); - spin_unlock(&mmlist_lock); - } - if (mm->binfmt) - module_put(mm->binfmt->module); - mmdrop(mm); + __mmput(mm); mm_freed = 1; } return mm_freed; } EXPORT_SYMBOL_GPL(mmput); +static void mmput_async_fn(struct work_struct *work) +{ + struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); + __mmput(mm); +} + +void mmput_async(struct mm_struct *mm) +{ + if (atomic_dec_and_test(&mm->mm_users)) { + INIT_WORK(&mm->async_put_work, mmput_async_fn); + schedule_work(&mm->async_put_work); + } +} + /** * set_mm_exe_file - change a reference to the mm's executable file * diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c index 7080ae1eb6c1..f850e906564b 100644 --- a/kernel/gcov/base.c +++ b/kernel/gcov/base.c @@ -98,6 +98,12 @@ void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) } EXPORT_SYMBOL(__gcov_merge_icall_topn); +void __gcov_exit(void) +{ + /* Unused. */ +} +EXPORT_SYMBOL(__gcov_exit); + /** * gcov_enable_events - enable event reporting through gcov_event() * diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index e25e92fb44fa..46a18e72bce6 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -18,7 +18,9 @@ #include <linux/vmalloc.h> #include "gcov.h" -#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1 +#if (__GNUC__ >= 7) +#define GCOV_COUNTERS 9 +#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) #define GCOV_COUNTERS 10 #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 #define GCOV_COUNTERS 9 diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index 8ef1919d63b2..d580b7d6ee6d 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -776,6 +776,8 @@ static void lock_torture_cleanup(void) else lock_torture_print_module_parms(cxt.cur_ops, "End of test: SUCCESS"); + kfree(cxt.lwsa); + kfree(cxt.lrsa); torture_cleanup_end(); } @@ -917,6 +919,8 @@ static int __init lock_torture_init(void) GFP_KERNEL); if (reader_tasks == NULL) { VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory"); + kfree(writer_tasks); + writer_tasks = NULL; firsterr = -ENOMEM; goto unwind; } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2dbe599d34d5..0271e8e2c020 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3176,20 +3176,20 @@ static void sched_freq_tick_pelt(int cpu) #ifdef CONFIG_SCHED_WALT static void sched_freq_tick_walt(int cpu) { - unsigned long cpu_utilization = cpu_util(cpu); + unsigned long cpu_utilization = cpu_util_freq(cpu); unsigned long capacity_curr = capacity_curr_of(cpu); if (walt_disabled || !sysctl_sched_use_walt_cpu_util) return sched_freq_tick_pelt(cpu); /* - * Add a margin to the WALT utilization. + * Add a margin to the WALT utilization to check if we will need to + * increase frequency. * NOTE: WALT tracks a single CPU signal for all the scheduling * classes, thus this margin is going to be added to the DL class as * well, which is something we do not do in sched_freq_tick_pelt case. */ - cpu_utilization = add_capacity_margin(cpu_utilization); - if (cpu_utilization <= capacity_curr) + if (add_capacity_margin(cpu_utilization) <= capacity_curr) return; /* diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c index f10d9f7d6d07..6ffb23adbcef 100644 --- a/kernel/sched/cpufreq_sched.c +++ b/kernel/sched/cpufreq_sched.c @@ -235,6 +235,18 @@ out: cpufreq_cpu_put(policy); } +#ifdef CONFIG_SCHED_WALT +static inline unsigned long +requested_capacity(struct sched_capacity_reqs *scr) +{ + if (!walt_disabled && sysctl_sched_use_walt_cpu_util) + return scr->cfs; + return scr->cfs + scr->rt; +} +#else +#define requested_capacity(scr) (scr->cfs + scr->rt) +#endif + void update_cpu_capacity_request(int cpu, bool request) { unsigned long new_capacity; @@ -245,7 +257,7 @@ void update_cpu_capacity_request(int cpu, bool request) scr = &per_cpu(cpu_sched_capacity_reqs, cpu); - new_capacity = scr->cfs + scr->rt; + new_capacity = requested_capacity(scr); new_capacity = new_capacity * capacity_margin / SCHED_CAPACITY_SCALE; new_capacity += scr->dl; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index e12309c1b07b..b90f7434e13b 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -64,8 +64,9 @@ struct sugov_cpu { struct update_util_data update_util; struct sugov_policy *sg_policy; - unsigned long iowait_boost; - unsigned long iowait_boost_max; + bool iowait_boost_pending; + unsigned int iowait_boost; + unsigned int iowait_boost_max; u64 last_update; /* The fields below are only needed when sharing a policy. */ @@ -224,30 +225,54 @@ static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags) { if (flags & SCHED_CPUFREQ_IOWAIT) { - sg_cpu->iowait_boost = sg_cpu->iowait_boost_max; + if (sg_cpu->iowait_boost_pending) + return; + + sg_cpu->iowait_boost_pending = true; + + if (sg_cpu->iowait_boost) { + sg_cpu->iowait_boost <<= 1; + if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max) + sg_cpu->iowait_boost = sg_cpu->iowait_boost_max; + } else { + sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min; + } } else if (sg_cpu->iowait_boost) { s64 delta_ns = time - sg_cpu->last_update; /* Clear iowait_boost if the CPU apprears to have been idle. */ - if (delta_ns > TICK_NSEC) + if (delta_ns > TICK_NSEC) { sg_cpu->iowait_boost = 0; + sg_cpu->iowait_boost_pending = false; + } } } static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, unsigned long *max) { - unsigned long boost_util = sg_cpu->iowait_boost; - unsigned long boost_max = sg_cpu->iowait_boost_max; + unsigned int boost_util, boost_max; - if (!boost_util) + if (!sg_cpu->iowait_boost) return; + if (sg_cpu->iowait_boost_pending) { + sg_cpu->iowait_boost_pending = false; + } else { + sg_cpu->iowait_boost >>= 1; + if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) { + sg_cpu->iowait_boost = 0; + return; + } + } + + boost_util = sg_cpu->iowait_boost; + boost_max = sg_cpu->iowait_boost_max; + if (*util * boost_max < *max * boost_util) { *util = boost_util; *max = boost_max; } - sg_cpu->iowait_boost >>= 1; } #ifdef CONFIG_NO_HZ_COMMON @@ -320,6 +345,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu) delta_ns = last_freq_update_time - j_sg_cpu->last_update; if (delta_ns > TICK_NSEC) { j_sg_cpu->iowait_boost = 0; + j_sg_cpu->iowait_boost_pending = false; continue; } if (j_sg_cpu->flags & SCHED_CPUFREQ_DL) @@ -589,7 +615,6 @@ static int sugov_init(struct cpufreq_policy *policy) { struct sugov_policy *sg_policy; struct sugov_tunables *tunables; - unsigned int lat; int ret = 0; /* State should be equivalent to EXIT */ @@ -628,12 +653,19 @@ static int sugov_init(struct cpufreq_policy *policy) goto stop_kthread; } - tunables->up_rate_limit_us = LATENCY_MULTIPLIER; - tunables->down_rate_limit_us = LATENCY_MULTIPLIER; - lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC; - if (lat) { - tunables->up_rate_limit_us *= lat; - tunables->down_rate_limit_us *= lat; + if (policy->up_transition_delay_us && policy->down_transition_delay_us) { + tunables->up_rate_limit_us = policy->up_transition_delay_us; + tunables->down_rate_limit_us = policy->down_transition_delay_us; + } else { + unsigned int lat; + + tunables->up_rate_limit_us = LATENCY_MULTIPLIER; + tunables->down_rate_limit_us = LATENCY_MULTIPLIER; + lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC; + if (lat) { + tunables->up_rate_limit_us *= lat; + tunables->down_rate_limit_us *= lat; + } } policy->governor_data = sg_policy; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index a105e97ab6bf..905d8cb5a6eb 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -18,6 +18,8 @@ #include <linux/slab.h> +#include "walt.h" + struct dl_bandwidth def_dl_bandwidth; static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 853064319b0d..aa016919eab8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5809,10 +5809,11 @@ static inline void hrtick_update(struct rq *rq) #endif #ifdef CONFIG_SMP +static bool __cpu_overutilized(int cpu, int delta); static bool cpu_overutilized(int cpu); unsigned long boosted_cpu_util(int cpu); #else -#define boosted_cpu_util(cpu) cpu_util(cpu) +#define boosted_cpu_util(cpu) cpu_util_freq(cpu) #endif #if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ_GOV_SCHED) @@ -6626,10 +6627,8 @@ end: */ static int sched_group_energy(struct energy_env *eenv) { - struct sched_domain *sd; - int cpu, total_energy = 0; struct cpumask visit_cpus; - struct sched_group *sg; + u64 total_energy = 0; WARN_ON(!eenv->sg_top->sge); @@ -6637,8 +6636,8 @@ static int sched_group_energy(struct energy_env *eenv) while (!cpumask_empty(&visit_cpus)) { struct sched_group *sg_shared_cap = NULL; - - cpu = cpumask_first(&visit_cpus); + int cpu = cpumask_first(&visit_cpus); + struct sched_domain *sd; /* * Is the group utilization affected by cpus outside this @@ -6650,7 +6649,7 @@ static int sched_group_energy(struct energy_env *eenv) sg_shared_cap = sd->parent->groups; for_each_domain(cpu, sd) { - sg = sd->groups; + struct sched_group *sg = sd->groups; /* Has this sched_domain already been visited? */ if (sd->child && group_first_cpu(sg) != cpu) @@ -6686,11 +6685,9 @@ static int sched_group_energy(struct energy_env *eenv) idle_idx = group_idle_state(eenv, sg); group_util = group_norm_util(eenv, sg); - sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power) - >> SCHED_CAPACITY_SHIFT; + sg_busy_energy = (group_util * sg->sge->cap_states[cap_idx].power); sg_idle_energy = ((SCHED_LOAD_SCALE-group_util) - * sg->sge->idle_states[idle_idx].power) - >> SCHED_CAPACITY_SHIFT; + * sg->sge->idle_states[idle_idx].power); total_energy += sg_busy_energy + sg_idle_energy; @@ -6715,7 +6712,7 @@ next_cpu: continue; } - eenv->energy = total_energy; + eenv->energy = total_energy >> SCHED_CAPACITY_SHIFT; return 0; } @@ -7004,9 +7001,14 @@ static inline bool task_fits_max(struct task_struct *p, int cpu) return __task_fits(p, cpu, 0); } +static bool __cpu_overutilized(int cpu, int delta) +{ + return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin); +} + static bool cpu_overutilized(int cpu) { - return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin); + return __cpu_overutilized(cpu, 0); } #ifdef CONFIG_SCHED_TUNE @@ -7085,7 +7087,7 @@ schedtune_task_margin(struct task_struct *task) unsigned long boosted_cpu_util(int cpu) { - unsigned long util = cpu_util(cpu); + unsigned long util = cpu_util_freq(cpu); long margin = schedtune_cpu_margin(util, cpu); trace_sched_boost_cpu(cpu, util, margin); @@ -7729,6 +7731,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync } if (target_cpu != prev_cpu) { + int delta = 0; struct energy_env eenv = { .util_delta = task_util(p), .src_cpu = prev_cpu, @@ -7736,8 +7739,13 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync .task = p, }; + +#ifdef CONFIG_SCHED_WALT + if (!walt_disabled && sysctl_sched_use_walt_cpu_util) + delta = task_util(p); +#endif /* Not enough spare capacity on previous cpu */ - if (cpu_overutilized(prev_cpu)) { + if (__cpu_overutilized(prev_cpu, delta)) { schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap); schedstat_inc(this_rq(), eas_stats.secb_insuff_cap); goto unlock; @@ -11348,8 +11356,8 @@ static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type) return true; /* Do idle load balance if there have misfit task */ - if (energy_aware() && rq->misfit_task) - return 1; + if (energy_aware()) + return rq->misfit_task; return (rq->nr_running >= 2); } @@ -11391,7 +11399,7 @@ static inline bool nohz_kick_needed(struct rq *rq, int *type) #ifndef CONFIG_SCHED_HMP rcu_read_lock(); sd = rcu_dereference(per_cpu(sd_busy, cpu)); - if (sd && !energy_aware()) { + if (sd) { sgc = sd->groups->sgc; nr_busy = atomic_read(&sgc->nr_busy_cpus); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 33bf0c07e757..c53970b5a8f0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2363,6 +2363,12 @@ static inline unsigned long __cpu_util(int cpu, int delta) unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg; unsigned long capacity = capacity_orig_of(cpu); +#ifdef CONFIG_SCHED_WALT + if (!walt_disabled && sysctl_sched_use_walt_cpu_util) + util = div64_u64(cpu_rq(cpu)->cumulative_runnable_avg, + walt_ravg_window >> SCHED_LOAD_SHIFT); +#endif + delta += util; if (delta < 0) return 0; @@ -2375,6 +2381,19 @@ static inline unsigned long cpu_util(int cpu) return __cpu_util(cpu, 0); } +static inline unsigned long cpu_util_freq(int cpu) +{ + unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg; + unsigned long capacity = capacity_orig_of(cpu); + +#ifdef CONFIG_SCHED_WALT + if (!walt_disabled && sysctl_sched_use_walt_cpu_util) + util = div64_u64(cpu_rq(cpu)->prev_runnable_sum, + walt_ravg_window >> SCHED_LOAD_SHIFT); +#endif + return (util >= capacity) ? capacity : util; +} + #endif #ifdef CONFIG_CPU_FREQ_GOV_SCHED diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 92c3aae8e056..28e999554463 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -111,8 +111,10 @@ walt_dec_cumulative_runnable_avg(struct rq *rq, static void fixup_cumulative_runnable_avg(struct rq *rq, - struct task_struct *p, s64 task_load_delta) + struct task_struct *p, u64 new_task_load) { + s64 task_load_delta = (s64)new_task_load - task_load(p); + rq->cumulative_runnable_avg += task_load_delta; if ((s64)rq->cumulative_runnable_avg < 0) panic("cra less than zero: tld: %lld, task_load(p) = %u\n", @@ -802,11 +804,11 @@ void walt_set_window_start(struct rq *rq) int cpu = cpu_of(rq); struct rq *sync_rq = cpu_rq(sync_cpu); - if (rq->window_start) + if (likely(rq->window_start)) return; if (cpu == sync_cpu) { - rq->window_start = walt_ktime_clock(); + rq->window_start = 1; } else { raw_spin_unlock(&rq->lock); double_rq_lock(rq, sync_rq); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index eba904bae48c..38d73a6e2857 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2667,13 +2667,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) if (!command || !ftrace_enabled) { /* - * If these are control ops, they still need their - * per_cpu field freed. Since, function tracing is + * If these are dynamic or control ops, they still + * need their data freed. Since, function tracing is * not currently active, we can just free them * without synchronizing all CPUs. */ - if (ops->flags & FTRACE_OPS_FL_CONTROL) - control_ops_free(ops); + if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) + goto free_ops; + return 0; } @@ -2728,6 +2729,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { schedule_on_each_cpu(ftrace_sync); + free_ops: arch_ftrace_trampoline_free(ops); if (ops->flags & FTRACE_OPS_FL_CONTROL) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index a579a874045b..35613a2a5164 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5397,7 +5397,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr) tracing_reset_online_cpus(&tr->trace_buffer); #ifdef CONFIG_TRACER_MAX_TRACE - if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) + if (tr->max_buffer.buffer) ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); tracing_reset_online_cpus(&tr->max_buffer); #endif diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index b0f86ea77881..ca70d11b8aa7 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -272,7 +272,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt) goto out_free; if (cnt > 1) { if (trace_selftest_test_global_cnt == 0) - goto out; + goto out_free; } if (trace_selftest_test_dyn_cnt == 0) goto out_free; |
