diff options
| author | Alex Shi <alex.shi@linaro.org> | 2016-08-11 12:15:55 +0800 |
|---|---|---|
| committer | Alex Shi <alex.shi@linaro.org> | 2016-08-11 12:15:55 +0800 |
| commit | e01035c1a71d7cceebc3df92f79577df7438523a (patch) | |
| tree | f745442c513b343e0a5d8fc84c4c0eb6603ec7b2 /kernel/sched | |
| parent | da9a92f0cde9ce34a930d7edcaef30429e41d14a (diff) | |
| parent | 3d27bcb804a6889e8f16daf95751764907f1e100 (diff) | |
Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
Diffstat (limited to 'kernel/sched')
| -rw-r--r-- | kernel/sched/core.c | 6 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 15 |
2 files changed, 13 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a28b638ec74f..6f6c7cc13f33 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4951,14 +4951,16 @@ void show_state_filter(unsigned long state_filter) /* * reset the NMI-timeout, listing all files on a slow * console might take a lot of time: + * Also, reset softlockup watchdogs on all CPUs, because + * another CPU might be blocked waiting for us to process + * an IPI. */ touch_nmi_watchdog(); + touch_all_softlockup_watchdogs(); if (!state_filter || (p->state & state_filter)) sched_show_task(p); } - touch_all_softlockup_watchdogs(); - #ifdef CONFIG_SCHED_DEBUG sysrq_sched_debug_show(); #endif diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b46edb7447bb..44a6f3e539f2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -687,8 +687,6 @@ void init_entity_runnable_average(struct sched_entity *se) /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ } -static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq); -static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq); #else void init_entity_runnable_average(struct sched_entity *se) { @@ -4595,19 +4593,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) return wl; for_each_sched_entity(se) { - long w, W; + struct cfs_rq *cfs_rq = se->my_q; + long W, w = cfs_rq_load_avg(cfs_rq); - tg = se->my_q->tg; + tg = cfs_rq->tg; /* * W = @wg + \Sum rw_j */ - W = wg + calc_tg_weight(tg, se->my_q); + W = wg + atomic_long_read(&tg->load_avg); + + /* Ensure \Sum rw_j >= rw_i */ + W -= cfs_rq->tg_load_avg_contrib; + W += w; /* * w = rw_i + @wl */ - w = cfs_rq_load_avg(se->my_q) + wl; + w += wl; /* * wl = S * s'_i; see (2) |
