diff options
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 72 |
1 files changed, 57 insertions, 15 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 24ca2963754b..e822cb0e18d5 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -372,21 +372,6 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) goto out_release; } - /* - * By now we've cleared cpu_active_mask, wait for all preempt-disabled - * and RCU users of this state to go away such that all new such users - * will observe it. - * - * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might - * not imply sync_sched(), so wait for both. - * - * Do sync before park smpboot threads to take care the rcu boost case. - */ - if (IS_ENABLED(CONFIG_PREEMPT)) - synchronize_rcu_mult(call_rcu, call_rcu_sched); - else - synchronize_rcu(); - smpboot_park_threads(cpu); /* @@ -545,9 +530,41 @@ out: return ret; } +static int switch_to_rt_policy(void) +{ + struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; + unsigned int policy = current->policy; + int err; + + /* Nobody should be attempting hotplug from these policy contexts. */ + if (policy == SCHED_BATCH || policy == SCHED_IDLE || + policy == SCHED_DEADLINE) + return -EPERM; + + if (policy == SCHED_FIFO || policy == SCHED_RR) + return 1; + + /* Only SCHED_NORMAL left. */ + err = sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m); + return err; + +} + +static int switch_to_fair_policy(void) +{ + struct sched_param param = { .sched_priority = 0 }; + + return sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m); +} + int cpu_up(unsigned int cpu) { int err = 0; + int switch_err = 0; + + switch_err = switch_to_rt_policy(); + if (switch_err < 0) + return switch_err; if (!cpu_possible(cpu)) { pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", @@ -573,6 +590,14 @@ int cpu_up(unsigned int cpu) out: cpu_maps_update_done(); + + if (!switch_err) { + switch_err = switch_to_fair_policy(); + if (switch_err) + pr_err("Hotplug policy switch err=%d Task %s pid=%d\n", + switch_err, current->comm, current->pid); + } + return err; } EXPORT_SYMBOL_GPL(cpu_up); @@ -797,6 +822,10 @@ static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); EXPORT_SYMBOL(cpu_active_mask); +static DECLARE_BITMAP(cpu_isolated_bits, CONFIG_NR_CPUS) __read_mostly; +const struct cpumask *const cpu_isolated_mask = to_cpumask(cpu_isolated_bits); +EXPORT_SYMBOL(cpu_isolated_mask); + void set_cpu_possible(unsigned int cpu, bool possible) { if (possible) @@ -831,6 +860,14 @@ void set_cpu_active(unsigned int cpu, bool active) cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); } +void set_cpu_isolated(unsigned int cpu, bool isolated) +{ + if (isolated) + cpumask_set_cpu(cpu, to_cpumask(cpu_isolated_bits)); + else + cpumask_clear_cpu(cpu, to_cpumask(cpu_isolated_bits)); +} + void init_cpu_present(const struct cpumask *src) { cpumask_copy(to_cpumask(cpu_present_bits), src); @@ -846,6 +883,11 @@ void init_cpu_online(const struct cpumask *src) cpumask_copy(to_cpumask(cpu_online_bits), src); } +void init_cpu_isolated(const struct cpumask *src) +{ + cpumask_copy(to_cpumask(cpu_isolated_bits), src); +} + static ATOMIC_NOTIFIER_HEAD(idle_notifier); void idle_notifier_register(struct notifier_block *n) |