summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/msi.c2
-rw-r--r--kernel/sched/core_ctl.c79
-rw-r--r--kernel/sched/fair.c12
-rw-r--r--kernel/sched/hmp.c78
-rw-r--r--kernel/sched/sched.h9
-rw-r--r--kernel/sched/sched_avg.c40
-rw-r--r--kernel/time/hrtimer.c49
-rw-r--r--kernel/time/posix-cpu-timers.c2
8 files changed, 186 insertions, 85 deletions
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index cd6009006510..41b40f310c28 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -268,7 +268,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
struct msi_domain_ops *ops = info->ops;
msi_alloc_info_t arg;
struct msi_desc *desc;
- int i, ret, virq;
+ int i, ret, virq = 0;
ret = ops->msi_check(domain, info, dev);
if (ret == 0)
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 0b5f2dea18a1..ce15ae7fe76b 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -39,11 +39,13 @@ struct cluster_data {
cpumask_t cpu_mask;
unsigned int need_cpus;
unsigned int task_thres;
+ unsigned int max_nr;
s64 need_ts;
struct list_head lru;
bool pending;
spinlock_t pending_lock;
bool is_big_cluster;
+ bool enable;
int nrrun;
bool nrrun_changed;
struct task_struct *core_ctl_thread;
@@ -60,6 +62,7 @@ struct cpu_data {
struct cluster_data *cluster;
struct list_head sib;
bool isolated_by_us;
+ unsigned int max_nr;
};
static DEFINE_PER_CPU(struct cpu_data, cpu_state);
@@ -244,6 +247,29 @@ static ssize_t show_is_big_cluster(const struct cluster_data *state, char *buf)
return snprintf(buf, PAGE_SIZE, "%u\n", state->is_big_cluster);
}
+static ssize_t store_enable(struct cluster_data *state,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ bool bval;
+
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+
+ bval = !!val;
+ if (bval != state->enable) {
+ state->enable = bval;
+ apply_need(state);
+ }
+
+ return count;
+}
+
+static ssize_t show_enable(const struct cluster_data *state, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%u\n", state->enable);
+}
+
static ssize_t show_need_cpus(const struct cluster_data *state, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%u\n", state->need_cpus);
@@ -372,6 +398,7 @@ core_ctl_attr_ro(need_cpus);
core_ctl_attr_ro(active_cpus);
core_ctl_attr_ro(global_state);
core_ctl_attr_rw(not_preferred);
+core_ctl_attr_rw(enable);
static struct attribute *default_attrs[] = {
&min_cpus.attr,
@@ -381,6 +408,7 @@ static struct attribute *default_attrs[] = {
&busy_down_thres.attr,
&task_thres.attr,
&is_big_cluster.attr,
+ &enable.attr,
&need_cpus.attr,
&active_cpus.attr,
&global_state.attr,
@@ -429,7 +457,6 @@ static struct kobj_type ktype_core_ctl = {
#define RQ_AVG_TOLERANCE 2
#define RQ_AVG_DEFAULT_MS 20
-#define NR_RUNNING_TOLERANCE 5
static unsigned int rq_avg_period_ms = RQ_AVG_DEFAULT_MS;
static s64 rq_avg_timestamp_ms;
@@ -437,6 +464,7 @@ static s64 rq_avg_timestamp_ms;
static void update_running_avg(bool trigger_update)
{
int avg, iowait_avg, big_avg, old_nrrun;
+ int old_max_nr, max_nr, big_max_nr;
s64 now;
unsigned long flags;
struct cluster_data *cluster;
@@ -450,40 +478,23 @@ static void update_running_avg(bool trigger_update)
return;
}
rq_avg_timestamp_ms = now;
- sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg);
+ sched_get_nr_running_avg(&avg, &iowait_avg, &big_avg,
+ &max_nr, &big_max_nr);
spin_unlock_irqrestore(&state_lock, flags);
- /*
- * Round up to the next integer if the average nr running tasks
- * is within NR_RUNNING_TOLERANCE/100 of the next integer.
- * If normal rounding up is used, it will allow a transient task
- * to trigger online event. By the time core is onlined, the task
- * has finished.
- * Rounding to closest suffers same problem because scheduler
- * might only provide running stats per jiffy, and a transient
- * task could skew the number for one jiffy. If core control
- * samples every 2 jiffies, it will observe 0.5 additional running
- * average which rounds up to 1 task.
- */
- avg = (avg + NR_RUNNING_TOLERANCE) / 100;
- big_avg = (big_avg + NR_RUNNING_TOLERANCE) / 100;
-
for_each_cluster(cluster, index) {
if (!cluster->inited)
continue;
+
old_nrrun = cluster->nrrun;
- /*
- * Big cluster only need to take care of big tasks, but if
- * there are not enough big cores, big tasks need to be run
- * on little as well. Thus for little's runqueue stat, it
- * has to use overall runqueue average, or derive what big
- * tasks would have to be run on little. The latter approach
- * is not easy to get given core control reacts much slower
- * than scheduler, and can't predict scheduler's behavior.
- */
+ old_max_nr = cluster->max_nr;
cluster->nrrun = cluster->is_big_cluster ? big_avg : avg;
- if (cluster->nrrun != old_nrrun) {
+ cluster->max_nr = cluster->is_big_cluster ? big_max_nr : max_nr;
+
+ if (cluster->nrrun != old_nrrun ||
+ cluster->max_nr != old_max_nr) {
+
if (trigger_update)
apply_need(cluster);
else
@@ -493,6 +504,7 @@ static void update_running_avg(bool trigger_update)
return;
}
+#define MAX_NR_THRESHOLD 4
/* adjust needed CPUs based on current runqueue information */
static unsigned int apply_task_need(const struct cluster_data *cluster,
unsigned int new_need)
@@ -503,7 +515,15 @@ static unsigned int apply_task_need(const struct cluster_data *cluster,
/* only unisolate more cores if there are tasks to run */
if (cluster->nrrun > new_need)
- return new_need + 1;
+ new_need = new_need + 1;
+
+ /*
+ * We don't want tasks to be overcrowded in a cluster.
+ * If any CPU has more than MAX_NR_THRESHOLD in the last
+ * window, bring another CPU to help out.
+ */
+ if (cluster->max_nr > MAX_NR_THRESHOLD)
+ new_need = new_need + 1;
return new_need;
}
@@ -549,7 +569,7 @@ static bool eval_need(struct cluster_data *cluster)
spin_lock_irqsave(&state_lock, flags);
- if (cluster->boost) {
+ if (cluster->boost || !cluster->enable) {
need_cpus = cluster->max_cpus;
} else {
cluster->active_cpus = get_active_cpu_count(cluster);
@@ -1046,6 +1066,7 @@ static int cluster_init(const struct cpumask *mask)
cluster->offline_delay_ms = 100;
cluster->task_thres = UINT_MAX;
cluster->nrrun = cluster->num_cpus;
+ cluster->enable = true;
INIT_LIST_HEAD(&cluster->lru);
spin_lock_init(&cluster->pending_lock);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4d96380b35e8..9abc3e65dbd9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3536,6 +3536,16 @@ kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
static DEFINE_RAW_SPINLOCK(migration_lock);
+static bool do_migration(int reason, int new_cpu, int cpu)
+{
+ if ((reason == UP_MIGRATION || reason == DOWN_MIGRATION)
+ && same_cluster(new_cpu, cpu))
+ return false;
+
+ /* Inter cluster high irqload migrations are OK */
+ return new_cpu != cpu;
+}
+
/*
* Check if currently running task should be migrated to a better cpu.
*
@@ -3553,7 +3563,7 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
raw_spin_lock(&migration_lock);
new_cpu = select_best_cpu(p, cpu, reason, 0);
- if (new_cpu != cpu) {
+ if (do_migration(reason, new_cpu, cpu)) {
active_balance = kick_active_balance(rq, p, new_cpu);
if (active_balance)
mark_reserved(new_cpu);
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index df47c26ab6d2..ae6876e62c0f 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -1602,7 +1602,7 @@ unsigned int nr_eligible_big_tasks(int cpu)
int nr_big = rq->hmp_stats.nr_big_tasks;
int nr = rq->nr_running;
- if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
+ if (!is_max_capacity_cpu(cpu))
return nr_big;
return nr;
@@ -2521,10 +2521,42 @@ static inline u32 predict_and_update_buckets(struct rq *rq,
return pred_demand;
}
-static void update_task_cpu_cycles(struct task_struct *p, int cpu)
+#define THRESH_CC_UPDATE (2 * NSEC_PER_USEC)
+
+/*
+ * Assumes rq_lock is held and wallclock was recorded in the same critical
+ * section as this function's invocation.
+ */
+static inline u64 read_cycle_counter(int cpu, u64 wallclock)
+{
+ struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
+ u64 delta;
+
+ if (unlikely(!cluster))
+ return cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+
+ /*
+ * Why don't we need locking here? Let's say that delta is negative
+ * because some other CPU happened to update last_cc_update with a
+ * more recent timestamp. We simply read the conter again in that case
+ * with no harmful side effects. This can happen if there is an FIQ
+ * between when we read the wallclock and when we use it here.
+ */
+ delta = wallclock - atomic64_read(&cluster->last_cc_update);
+ if (delta > THRESH_CC_UPDATE) {
+ atomic64_set(&cluster->cycles,
+ cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu));
+ atomic64_set(&cluster->last_cc_update, wallclock);
+ }
+
+ return atomic64_read(&cluster->cycles);
+}
+
+static void update_task_cpu_cycles(struct task_struct *p, int cpu,
+ u64 wallclock)
{
if (use_cycle_counter)
- p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+ p->cpu_cycles = read_cycle_counter(cpu, wallclock);
}
static void
@@ -2542,7 +2574,7 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
return;
}
- cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+ cur_cycles = read_cycle_counter(cpu, wallclock);
/*
* If current task is idle task and irqtime == 0 CPU was
@@ -2579,7 +2611,8 @@ update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
}
-static int account_busy_for_task_demand(struct task_struct *p, int event)
+static int
+account_busy_for_task_demand(struct rq *rq, struct task_struct *p, int event)
{
/*
* No need to bother updating task demand for exiting tasks
@@ -2598,6 +2631,17 @@ static int account_busy_for_task_demand(struct task_struct *p, int event)
(event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
return 0;
+ /*
+ * TASK_UPDATE can be called on sleeping task, when its moved between
+ * related groups
+ */
+ if (event == TASK_UPDATE) {
+ if (rq->curr == p)
+ return 1;
+
+ return p->on_rq ? SCHED_ACCOUNT_WAIT_TIME : 0;
+ }
+
return 1;
}
@@ -2738,7 +2782,7 @@ static u64 update_task_demand(struct task_struct *p, struct rq *rq,
u64 runtime;
new_window = mark_start < window_start;
- if (!account_busy_for_task_demand(p, event)) {
+ if (!account_busy_for_task_demand(rq, p, event)) {
if (new_window)
/*
* If the time accounted isn't being accounted as
@@ -2822,7 +2866,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
update_window_start(rq, wallclock);
if (!p->ravg.mark_start) {
- update_task_cpu_cycles(p, cpu_of(rq));
+ update_task_cpu_cycles(p, cpu_of(rq), wallclock);
goto done;
}
@@ -2890,7 +2934,7 @@ void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
if (is_idle_task(curr)) {
/* We're here without rq->lock held, IRQ disabled */
raw_spin_lock(&rq->lock);
- update_task_cpu_cycles(curr, cpu);
+ update_task_cpu_cycles(curr, cpu, sched_ktime_clock());
raw_spin_unlock(&rq->lock);
}
}
@@ -2935,7 +2979,7 @@ void mark_task_starting(struct task_struct *p)
p->ravg.mark_start = p->last_wake_ts = wallclock;
p->last_cpu_selected_ts = wallclock;
p->last_switch_out_ts = 0;
- update_task_cpu_cycles(p, cpu_of(rq));
+ update_task_cpu_cycles(p, cpu_of(rq), wallclock);
}
void set_window_start(struct rq *rq)
@@ -3548,7 +3592,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
update_task_ravg(p, task_rq(p), TASK_MIGRATE,
wallclock, 0);
- update_task_cpu_cycles(p, new_cpu);
+ update_task_cpu_cycles(p, new_cpu, wallclock);
new_task = is_new_task(p);
/* Protected by rq_lock */
@@ -4303,8 +4347,20 @@ void note_task_waking(struct task_struct *p, u64 wallclock)
{
u64 sleep_time = wallclock - p->last_switch_out_ts;
- p->last_wake_ts = wallclock;
+ /*
+ * When a short burst and short sleeping task goes for a long
+ * sleep, the task's avg_sleep_time gets boosted. It will not
+ * come below short_sleep threshold for a lot of time and it
+ * results in incorrect packing. The idead behind tracking
+ * avg_sleep_time is to detect if a task is short sleeping
+ * or not. So limit the sleep time to twice the short sleep
+ * threshold. For regular long sleeping tasks, the avg_sleep_time
+ * would be higher than threshold, and packing happens correctly.
+ */
+ sleep_time = min_t(u64, sleep_time, 2 * sysctl_sched_short_sleep);
update_avg(&p->ravg.avg_sleep_time, sleep_time);
+
+ p->last_wake_ts = wallclock;
}
#ifdef CONFIG_CGROUP_SCHED
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 276a2387f06f..a6733b57bcbc 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -397,6 +397,8 @@ struct sched_cluster {
unsigned int static_cluster_pwr_cost;
int notifier_sent;
bool wake_up_idle;
+ atomic64_t last_cc_update;
+ atomic64_t cycles;
};
extern unsigned long all_cluster_ids[];
@@ -1225,6 +1227,11 @@ static inline bool hmp_capable(void)
return max_possible_capacity != min_max_possible_capacity;
}
+static inline bool is_max_capacity_cpu(int cpu)
+{
+ return cpu_max_possible_capacity(cpu) == max_possible_capacity;
+}
+
/*
* 'load' is in reference to "best cpu" at its best frequency.
* Scale that in reference to a given cpu, accounting for how bad it is
@@ -1601,6 +1608,8 @@ static inline unsigned int nr_eligible_big_tasks(int cpu)
return 0;
}
+static inline bool is_max_capacity_cpu(int cpu) { return true; }
+
static inline int pct_task_load(struct task_struct *p) { return 0; }
static inline int cpu_capacity(int cpu)
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 29d8a26a78ed..ba5a326a9fd8 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,11 +26,13 @@ static DEFINE_PER_CPU(u64, nr_prod_sum);
static DEFINE_PER_CPU(u64, last_time);
static DEFINE_PER_CPU(u64, nr_big_prod_sum);
static DEFINE_PER_CPU(u64, nr);
+static DEFINE_PER_CPU(u64, nr_max);
static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
static s64 last_get_time;
+#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
/**
* sched_get_nr_running_avg
* @return: Average nr_running, iowait and nr_big_tasks value since last poll.
@@ -40,7 +42,8 @@ static s64 last_get_time;
* Obtains the average nr_running value since the last poll.
* This function may not be called concurrently with itself
*/
-void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
+void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg,
+ unsigned int *max_nr, unsigned int *big_max_nr)
{
int cpu;
u64 curr_time = sched_clock();
@@ -50,6 +53,8 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
*avg = 0;
*iowait_avg = 0;
*big_avg = 0;
+ *max_nr = 0;
+ *big_max_nr = 0;
if (!diff)
return;
@@ -78,17 +83,35 @@ void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
per_cpu(nr_big_prod_sum, cpu) = 0;
per_cpu(iowait_prod_sum, cpu) = 0;
+ if (*max_nr < per_cpu(nr_max, cpu))
+ *max_nr = per_cpu(nr_max, cpu);
+
+ if (is_max_capacity_cpu(cpu)) {
+ if (*big_max_nr < per_cpu(nr_max, cpu))
+ *big_max_nr = per_cpu(nr_max, cpu);
+ }
+
+ per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
}
diff = curr_time - last_get_time;
last_get_time = curr_time;
- *avg = (int)div64_u64(tmp_avg * 100, diff);
- *big_avg = (int)div64_u64(tmp_big_avg * 100, diff);
- *iowait_avg = (int)div64_u64(tmp_iowait * 100, diff);
-
- trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg);
+ /*
+ * Any task running on BIG cluster and BIG tasks running on little
+ * cluster contributes to big_avg. Small or medium tasks can also
+ * run on BIG cluster when co-location and scheduler boost features
+ * are activated. We don't want these tasks to downmigrate to little
+ * cluster when BIG CPUs are available but isolated. Round up the
+ * average values so that core_ctl aggressively unisolate BIG CPUs.
+ */
+ *avg = (int)DIV64_U64_ROUNDUP(tmp_avg, diff);
+ *big_avg = (int)DIV64_U64_ROUNDUP(tmp_big_avg, diff);
+ *iowait_avg = (int)DIV64_U64_ROUNDUP(tmp_iowait, diff);
+
+ trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg,
+ *max_nr, *big_max_nr);
BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0);
pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n",
@@ -121,6 +144,9 @@ void sched_update_nr_prod(int cpu, long delta, bool inc)
BUG_ON((s64)per_cpu(nr, cpu) < 0);
+ if (per_cpu(nr, cpu) > per_cpu(nr_max, cpu))
+ per_cpu(nr_max, cpu) = per_cpu(nr, cpu);
+
per_cpu(nr_prod_sum, cpu) += nr_running * diff;
per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 01a49614e942..e7c2392666cb 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -49,7 +49,6 @@
#include <linux/sched/deadline.h>
#include <linux/timer.h>
#include <linux/freezer.h>
-#include <linux/delay.h>
#include <asm/uaccess.h>
@@ -1593,42 +1592,22 @@ static void init_hrtimers_cpu(int cpu)
}
#if defined(CONFIG_HOTPLUG_CPU)
-static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
- struct hrtimer_cpu_base *new_base,
- unsigned int i,
- bool wait,
+static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
+ struct hrtimer_clock_base *new_base,
bool remove_pinned)
{
struct hrtimer *timer;
struct timerqueue_node *node;
struct timerqueue_head pinned;
int is_pinned;
- struct hrtimer_clock_base *old_c_base = &old_base->clock_base[i];
- struct hrtimer_clock_base *new_c_base = &new_base->clock_base[i];
+ bool is_hotplug = !cpu_online(old_base->cpu_base->cpu);
timerqueue_init_head(&pinned);
- while ((node = timerqueue_getnext(&old_c_base->active))) {
+ while ((node = timerqueue_getnext(&old_base->active))) {
timer = container_of(node, struct hrtimer, node);
- if (wait) {
- /* Ensure timers are done running before continuing */
- while (hrtimer_callback_running(timer)) {
- raw_spin_unlock(&old_base->lock);
- raw_spin_unlock(&new_base->lock);
- cpu_relax();
- /*
- * cpu_relax may just be a barrier. Grant the
- * run_hrtimer_list code some time to obtain the
- * spinlock.
- */
- udelay(2);
- raw_spin_lock(&new_base->lock);
- raw_spin_lock_nested(&old_base->lock,
- SINGLE_DEPTH_NESTING);
- }
- } else {
+ if (is_hotplug)
BUG_ON(hrtimer_callback_running(timer));
- }
debug_deactivate(timer);
/*
@@ -1636,7 +1615,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
* timer could be seen as !active and just vanish away
* under us on another CPU
*/
- __remove_hrtimer(timer, old_c_base, HRTIMER_STATE_ENQUEUED, 0);
+ __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
is_pinned = timer->state & HRTIMER_STATE_PINNED;
if (!remove_pinned && is_pinned) {
@@ -1644,7 +1623,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
continue;
}
- timer->base = new_c_base;
+ timer->base = new_base;
/*
* Enqueue the timers on the new cpu. This does not
* reprogram the event device in case the timer
@@ -1653,7 +1632,7 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
* sort out already expired timers and reprogram the
* event device.
*/
- enqueue_hrtimer(timer, new_c_base);
+ enqueue_hrtimer(timer, new_base);
}
/* Re-queue pinned timers for non-hotplug usecase */
@@ -1661,11 +1640,11 @@ static void migrate_hrtimer_list(struct hrtimer_cpu_base *old_base,
timer = container_of(node, struct hrtimer, node);
timerqueue_del(&pinned, &timer->node);
- enqueue_hrtimer(timer, old_c_base);
+ enqueue_hrtimer(timer, old_base);
}
}
-static void __migrate_hrtimers(int scpu, bool wait, bool remove_pinned)
+static void __migrate_hrtimers(int scpu, bool remove_pinned)
{
struct hrtimer_cpu_base *old_base, *new_base;
unsigned long flags;
@@ -1682,8 +1661,8 @@ static void __migrate_hrtimers(int scpu, bool wait, bool remove_pinned)
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- migrate_hrtimer_list(old_base, new_base, i, wait,
- remove_pinned);
+ migrate_hrtimer_list(&old_base->clock_base[i],
+ &new_base->clock_base[i], remove_pinned);
}
raw_spin_unlock(&old_base->lock);
@@ -1699,12 +1678,12 @@ static void migrate_hrtimers(int scpu)
BUG_ON(cpu_online(scpu));
tick_cancel_sched_timer(scpu);
- __migrate_hrtimers(scpu, false, true);
+ __migrate_hrtimers(scpu, true);
}
void hrtimer_quiesce_cpu(void *cpup)
{
- __migrate_hrtimers(*(int *)cpup, true, false);
+ __migrate_hrtimers(*(int *)cpup, false);
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 80016b329d94..051544aec37c 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1250,7 +1250,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
cputime_t *newval, cputime_t *oldval)
{
- unsigned long long now;
+ unsigned long long now = 0;
WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
cpu_timer_sample_group(clock_idx, tsk, &now);