summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2016-06-10 11:21:52 -0600
committerLinux Build Service Account <lnxbuild@localhost>2016-06-10 11:21:52 -0600
commitcc4502ce833996b835f986fe0443474b947e5ef4 (patch)
treebd49cd44fea5fcd2356e6cb0c44e8f6d9c97f62a /kernel
parent9876c3c76af89f339ffff05d37507ee72685b794 (diff)
parent338cc814ce4ae9e9ea5e1a364ad1a18866e90235 (diff)
Promotion of kernel.lnx.4.4-160609.
CRs Change ID Subject -------------------------------------------------------------------------------------------------------------- 1026450 I0e7df68587e1a212e6c2ed7171fcd683cf115604 defconfig: msm8996: enable SD card support for 8996 boar 1018542 I4e56982700f3444f96e1100c0ce272d36d3423a5 soc: qcom: msm_glink_pkt: Register and wait for link up 1022772 Ief169f5c456dbdb989788a0147342fe91f0a836f ARM: dts: msm: setup external clock sources for HDMI clo 1012351 Iba2ebe38c4ec1422931f2a696d9e5ca01b11a987 scsi: ufs-qcom: add svs2 support 992989 Ied4c818b0012c733e73ff894470594f9429d9882 Bluetooth: Add ioctl for pin connectivity test 1012351 I2ef01d98603840289c436e14bf3df54a2ab9198b phy: qcom-ufs: add svs2 support 1021816 I7741eca6ac07cd4393fc373e796570066da7cce6 diag: Fix for corrupted dci log mask 1025856 Iabdb1c21757ad6dead50fdc4aa3b12077f8f840f ARM: dts: msm: Add initial device tree files for MSMFALC 1013124 Ic57a369ed1e194ab512b4b86ce4d216df46b5f46 usb: pd: Handle disconnection immediately if SDP 1024334 I31f3ed15e8cf02046cfcc8d9c062522065bb022c ARM: dts: msm: add gpio keys support for msmcobalt 1026010 I732b50c974a73c08038995900e008b4e16e9437b sched: fix CPU frequency estimation while idle 1025499 Ib6f218989616b038adb7a001cbc6302924041aa1 ARM: dts: msm: Update MPM interrupt mappings for cobalt 1017798 Icd62b9d98d5763d209c7de897653f19f8031e236 qcom-charger: qpnp-smb2: update usb type on PD_ACTIVE 1012351 I50008a892c8c21a4a3361998d76a2f8cbc2995bb scsi: ufs: add new get_scale_down_gear vops 1012351 Ife2ae05950e6c437bef779955432d91d5117d49b scsi: ufs: add ufshcd_dme_rmw wrapper 1012351 I5a048629dade92fe79631ba30d3b3e280a77d4e6 scsi: ufs-qcom: add auto hibern8 support 1026010 I274d5047a50565cfaaa2fb821ece21c8cf4c991d sched: fix potential deflated frequency estimation durin 992989 I9c007dededd88a7e8bc8c3226507a46046e96bc4 Defconfig: arm64: Enable BTFM in performance defconfig 1022772 If65ac051253b4e51f6af5cd60f98eaf908b3bcfd msm: mdss: hdmi: add deep color support 1026037 994733 I8410462b17c5741fb03239377fed18c5f6034b90 diag: Update logs, events and messages codes 1011669 I9ae7a806a5b659a2c15fa86b37cd1075913095d5 defconfig: msm-perf: enable QPNP_POWER_ON driver 1024496 I5d69436ec59476fc5cd427458d79f8c99266f243 scsi: ufs: ICE 3.0 changes 1015627 Ia6584de897c366032251033aa8e03c54e9d899bb wil6210: fix race conditions between TX send and complet 1018090 If5dafbcc0d88596332b794ad5b6fdc1f9ff98a45 pmic-voter: disassociate device from votables 1024406 Ib78b8fec5d1ab934f4d4ec80b7e008149707ce0f USB: f_mtp: Set maximum burst value to two for superspee 1015627 I81b99522ada809fa375c6b6887f8ea0e6482fba3 wil6210: fix dma mapping error cleanup in __wil_tx_vring 1026010 I469263c90e12f39bb36bde5ed26298b7c1c77597 sched: preserve CPU cycle counter in rq 1021057 I6238b0434936fcff562d7a049e5e9d13794f9cdc usb: pd: Add IPC logging to policy engine 1007226 I345a9b6c82fb5e591d8bdcf48afed48ccaafddc3 qseecom: added small sleep between busy-wait calls to TZ 1013680 If99081c1fd356e69710c94441affec92fac24075 ASoC: msm: Add EC reference support for USB audio ADSP s 872263 I5d02d3df7b69e30243a0b44ddf36a894ad739d59 msm: ipa3: lower turbo voting threshold 1012351 I9597507419e59884c44429c8c34f7469fa5192cb ARM: dts: msm: enable ufs hw ctl clocks for msmcobalt 1015627 If7c91c31b490b0762d23df21db7c30652b0817d4 wil6210: guarantee safe access to rx descriptors shared 1025422 I9abc6352732c189e90cefaa3eb4161ea6d103529 ARM: dts: msm: Fix bus governor for Venus VMEM on msmcob 1010920 I2b988d2a6112add06fa433c4b1deeec0b6e6bb58 soc: qcom: glink: Fix parallel migration issue 1025431 I25a9412c32ddb5d55379b9e96c91f246f6bd4aa9 Revert "ARM: dts: msm: Add v4l2 rotator node to msm8996" 1024334 I85c0401c6c47d4988d39ef637c1517916e41e3cb defconfig: msmcortex: enable gpio keys module 1015627 Ie7e073d672a561b768cb62f1e39af24c1d7c3322 wil6210: align wil log functions to wil_dbg_ratelimited 1021009 Ifa62e4e4662d72904411ff4dda10a29a76962851 ARM: dts: msm: Add flash nodes to camera on msmcobalt 1012351 I57e5be8eae4b2cf70a2a9d9c81c9a54a24e03e71 scsi: ufs: add auto hibern8 support 1012351 I4a027f89deddff4735df45da9cffbfb1849af5f4 scsi: ufs-qcom : Add quirk for broken auto hibern8 1022772 Ibf7877eb6edd29baefee57bc12188989d897d47e clk: msm: hdmi: add cobalt hdmi pll calculator and clock 1018090 I8e2bc3366ec12e8485e4be86ee56ba5e4d113c3c pmic-voter: improve pmic-voter api implementation 1018090 I81f5974f81a697f3698bc58df1d3ed59fa2579a7 pmic-voter: remove default value 1026010 I3608cb09c30797bf858f434579fd07555546fb60 sched: avoid potential race between governor and thermal 1022772 I4373fc9be34d7f49059159256cfd6dca045ff39f ARM: dts: msm: add MDSS HDMI device support for msmcobal 1025856 Ie622624676c648cb109e62d3dcf0601222b291c9 defconfig: msm: Add MSMFALCON in defconfig 1022772 I4cf25e531fea39f7b68bd5ef6edd89243c1ded21 ARM: dts: msm: add MDSS HDMI PLL device node for msmcoba 1015627 I7af20f1db39b7e4c5ac2956fc655f120eb61e002 wil6210: protect wil_vring_fini_tx in parallel to tx com 999619 I69e1570fe2bdb2e0b5bc14eb2dc158fe504ce921 msm: camera: isp: Vote bandwidth to 0 when shutting vfe 1015627 Iddacd9b54304b47263647942e1e6784eefb9a6d2 wil6210: add pm_notify handling 1012351 I11d941f239ccd1978089194fa269e143fc640d36 ARM: dts: msm: Update min freq for UFS src clks on msmco Change-Id: I96c059fda8202df68b41f1d8c2e034c55e683e85 CRs-Fixed: 1026037, 1024334, 1025431, 1024496, 992989, 1026010, 1007226, 1026450, 1021816, 1018542, 1025499, 1010920, 1021009, 1017798, 1012351, 1024406, 872263, 994733, 1025422, 1011669, 999619, 1015627, 1013124, 1025856, 1013680, 1022772, 1018090, 1021057
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c170
-rw-r--r--kernel/sched/cputime.c2
-rw-r--r--kernel/sched/sched.h14
3 files changed, 108 insertions, 78 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e145b5640b12..6736e2d7cbaf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1745,11 +1745,6 @@ static void update_cluster_topology(void) { }
#define SCHED_MIN_FREQ 1
-struct cpu_cycle {
- u64 cycles;
- u64 time;
-};
-
#if defined(CONFIG_SCHED_HMP)
/*
@@ -1942,14 +1937,13 @@ update_window_start(struct rq *rq, u64 wallclock)
#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
-static inline u64 scale_exec_time(u64 delta, struct rq *rq,
- const struct cpu_cycle *cc)
+static inline u64 scale_exec_time(u64 delta, struct rq *rq)
{
int cpu = cpu_of(rq);
int sf;
- delta = DIV64_U64_ROUNDUP(delta * cc->cycles,
- max_possible_freq * cc->time);
+ delta = DIV64_U64_ROUNDUP(delta * rq->cc.cycles,
+ max_possible_freq * rq->cc.time);
sf = DIV_ROUND_UP(cpu_efficiency(cpu) * 1024, max_possible_efficiency);
delta *= sf;
@@ -2393,8 +2387,7 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
* Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
*/
static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock, u64 irqtime,
- const struct cpu_cycle *cc)
+ int event, u64 wallclock, u64 irqtime)
{
int new_window, nr_full_windows = 0;
int p_is_curr_task = (p == rq->curr);
@@ -2527,7 +2520,7 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
delta = wallclock - mark_start;
else
delta = irqtime;
- delta = scale_exec_time(delta, rq, cc);
+ delta = scale_exec_time(delta, rq);
*curr_runnable_sum += delta;
if (new_task)
*nt_curr_runnable_sum += delta;
@@ -2553,15 +2546,14 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
if (!nr_full_windows) {
/* A full window hasn't elapsed, account partial
* contribution to previous completed window. */
- delta = scale_exec_time(window_start - mark_start, rq,
- cc);
+ delta = scale_exec_time(window_start - mark_start, rq);
if (!exiting_task(p))
p->ravg.prev_window += delta;
} else {
/* Since at least one full window has elapsed,
* the contribution to the previous window is the
* full window (window_size). */
- delta = scale_exec_time(window_size, rq, cc);
+ delta = scale_exec_time(window_size, rq);
if (!exiting_task(p))
p->ravg.prev_window = delta;
}
@@ -2571,7 +2563,7 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
*nt_prev_runnable_sum += delta;
/* Account piece of busy time in the current window. */
- delta = scale_exec_time(wallclock - window_start, rq, cc);
+ delta = scale_exec_time(wallclock - window_start, rq);
*curr_runnable_sum += delta;
if (new_task)
*nt_curr_runnable_sum += delta;
@@ -2598,15 +2590,14 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
if (!nr_full_windows) {
/* A full window hasn't elapsed, account partial
* contribution to previous completed window. */
- delta = scale_exec_time(window_start - mark_start, rq,
- cc);
+ delta = scale_exec_time(window_start - mark_start, rq);
if (!is_idle_task(p) && !exiting_task(p))
p->ravg.prev_window += delta;
} else {
/* Since at least one full window has elapsed,
* the contribution to the previous window is the
* full window (window_size). */
- delta = scale_exec_time(window_size, rq, cc);
+ delta = scale_exec_time(window_size, rq);
if (!is_idle_task(p) && !exiting_task(p))
p->ravg.prev_window = delta;
}
@@ -2618,7 +2609,7 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
*nt_prev_runnable_sum += delta;
/* Account piece of busy time in the current window. */
- delta = scale_exec_time(wallclock - window_start, rq, cc);
+ delta = scale_exec_time(wallclock - window_start, rq);
*curr_runnable_sum += delta;
if (new_task)
*nt_curr_runnable_sum += delta;
@@ -2646,7 +2637,7 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
/* Roll window over. If IRQ busy time was just in the current
* window then that is all that need be accounted. */
if (mark_start > window_start) {
- *curr_runnable_sum = scale_exec_time(irqtime, rq, cc);
+ *curr_runnable_sum = scale_exec_time(irqtime, rq);
return;
}
@@ -2655,12 +2646,12 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
delta = window_start - mark_start;
if (delta > window_size)
delta = window_size;
- delta = scale_exec_time(delta, rq, cc);
+ delta = scale_exec_time(delta, rq);
*prev_runnable_sum += delta;
/* Process the remaining IRQ busy time in the current window. */
delta = wallclock - window_start;
- rq->curr_runnable_sum = scale_exec_time(delta, rq, cc);
+ rq->curr_runnable_sum = scale_exec_time(delta, rq);
return;
}
@@ -2690,7 +2681,7 @@ update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
}
static inline void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock, u64 irqtime, const struct cpu_cycle *cc)
+ int event, u64 wallclock, u64 irqtime)
{
}
@@ -2709,34 +2700,56 @@ static void update_task_cpu_cycles(struct task_struct *p, int cpu)
p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
}
-static struct cpu_cycle
-get_task_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
- u64 wallclock)
+static void
+update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
+ u64 wallclock, u64 irqtime)
{
u64 cur_cycles;
- struct cpu_cycle cc;
int cpu = cpu_of(rq);
+ lockdep_assert_held(&rq->lock);
+
if (!use_cycle_counter) {
- cc.cycles = cpu_cur_freq(cpu);
- cc.time = 1;
- return cc;
+ rq->cc.cycles = cpu_cur_freq(cpu);
+ rq->cc.time = 1;
+ return;
}
cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
- if (unlikely(cur_cycles < p->cpu_cycles))
- cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
- else
- cc.cycles = cur_cycles - p->cpu_cycles;
- cc.cycles = cc.cycles * NSEC_PER_MSEC;
- cc.time = wallclock - p->ravg.mark_start;
- BUG_ON((s64)cc.time < 0);
- p->cpu_cycles = cur_cycles;
+ /*
+ * If current task is idle task and irqtime == 0 CPU was
+ * indeed idle and probably its cycle counter was not
+ * increasing. We still need estimatied CPU frequency
+ * for IO wait time accounting. Use the previously
+ * calculated frequency in such a case.
+ */
+ if (!is_idle_task(rq->curr) || irqtime) {
+ if (unlikely(cur_cycles < p->cpu_cycles))
+ rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
+ else
+ rq->cc.cycles = cur_cycles - p->cpu_cycles;
+ rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;
+
+ if (event == IRQ_UPDATE && is_idle_task(p))
+ /*
+ * Time between mark_start of idle task and IRQ handler
+ * entry time is CPU cycle counter stall period.
+ * Upon IRQ handler entry sched_account_irqstart()
+ * replenishes idle task's cpu cycle counter so
+ * rq->cc.cycles now represents increased cycles during
+ * IRQ handler rather than time between idle entry and
+ * IRQ exit. Thus use irqtime as time delta.
+ */
+ rq->cc.time = irqtime;
+ else
+ rq->cc.time = wallclock - p->ravg.mark_start;
+ BUG_ON((s64)rq->cc.time < 0);
+ }
- trace_sched_get_task_cpu_cycles(cpu, event, cc.cycles, cc.time);
+ p->cpu_cycles = cur_cycles;
- return cc;
+ trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
}
static int account_busy_for_task_demand(struct task_struct *p, int event)
@@ -2824,10 +2837,9 @@ done:
trace_sched_update_history(rq, p, runtime, samples, event);
}
-static void add_to_task_demand(struct rq *rq, struct task_struct *p,
- u64 delta, const struct cpu_cycle *cc)
+static void add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
{
- delta = scale_exec_time(delta, rq, cc);
+ delta = scale_exec_time(delta, rq);
p->ravg.sum += delta;
if (unlikely(p->ravg.sum > sched_ravg_window))
p->ravg.sum = sched_ravg_window;
@@ -2884,8 +2896,7 @@ static void add_to_task_demand(struct rq *rq, struct task_struct *p,
* depends on it!
*/
static void update_task_demand(struct task_struct *p, struct rq *rq,
- int event, u64 wallclock,
- const struct cpu_cycle *cc)
+ int event, u64 wallclock)
{
u64 mark_start = p->ravg.mark_start;
u64 delta, window_start = rq->window_start;
@@ -2908,7 +2919,7 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
if (!new_window) {
/* The simple case - busy time contained within the existing
* window. */
- add_to_task_demand(rq, p, wallclock - mark_start, cc);
+ add_to_task_demand(rq, p, wallclock - mark_start);
return;
}
@@ -2919,12 +2930,12 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
window_start -= (u64)nr_full_windows * (u64)window_size;
/* Process (window_start - mark_start) first */
- add_to_task_demand(rq, p, window_start - mark_start, cc);
+ add_to_task_demand(rq, p, window_start - mark_start);
/* Push new sample(s) into task's demand history */
update_history(rq, p, p->ravg.sum, 1, event);
if (nr_full_windows)
- update_history(rq, p, scale_exec_time(window_size, rq, cc),
+ update_history(rq, p, scale_exec_time(window_size, rq),
nr_full_windows, event);
/* Roll window_start back to current to process any remainder
@@ -2933,18 +2944,16 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
/* Process (wallclock - window_start) next */
mark_start = window_start;
- add_to_task_demand(rq, p, wallclock - mark_start, cc);
+ add_to_task_demand(rq, p, wallclock - mark_start);
}
/* Reflect task activity on its demand and cpu's busy time statistics */
-static struct cpu_cycle
+static void
update_task_ravg(struct task_struct *p, struct rq *rq, int event,
u64 wallclock, u64 irqtime)
{
- struct cpu_cycle cc = { .cycles = SCHED_MIN_FREQ, .time = 1 };
-
if (sched_use_pelt || !rq->window_start || sched_disable_window_stats)
- return cc;
+ return;
lockdep_assert_held(&rq->lock);
@@ -2955,18 +2964,16 @@ update_task_ravg(struct task_struct *p, struct rq *rq, int event,
goto done;
}
- cc = get_task_cpu_cycles(p, rq, event, wallclock);
- update_task_demand(p, rq, event, wallclock, &cc);
- update_cpu_busy_time(p, rq, event, wallclock, irqtime, &cc);
+ update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
+ update_task_demand(p, rq, event, wallclock);
+ update_cpu_busy_time(p, rq, event, wallclock, irqtime);
update_task_pred_demand(rq, p, event);
done:
trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
- cc.cycles, cc.time,
+ rq->cc.cycles, rq->cc.time,
_group_cpu_time(p->grp, cpu_of(rq)));
p->ravg.mark_start = wallclock;
-
- return cc;
}
void sched_account_irqtime(int cpu, struct task_struct *curr,
@@ -3009,6 +3016,17 @@ void sched_account_irqtime(int cpu, struct task_struct *curr,
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
+void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (!rq->window_start || sched_disable_window_stats)
+ return;
+
+ if (is_idle_task(curr) && use_cycle_counter)
+ update_task_cpu_cycles(curr, cpu);
+}
+
static void reset_task_stats(struct task_struct *p)
{
u32 sum = 0;
@@ -3259,7 +3277,6 @@ void sched_get_cpus_busy(struct sched_load *busy,
int early_detection[cpus];
int cpu, i = 0;
unsigned int window_size;
- struct cpu_cycle cc;
u64 max_prev_sum = 0;
int max_busy_cpu = cpumask_first(query_cpus);
struct related_thread_group *grp;
@@ -3283,9 +3300,9 @@ void sched_get_cpus_busy(struct sched_load *busy,
for_each_cpu(cpu, query_cpus) {
rq = cpu_rq(cpu);
- cc = update_task_ravg(rq->curr, rq, TASK_UPDATE,
- sched_ktime_clock(), 0);
- cur_freq[i] = cpu_cycles_to_freq(i, cc.cycles, cc.time);
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(),
+ 0);
+ cur_freq[i] = cpu_cycles_to_freq(i, rq->cc.cycles, rq->cc.time);
load[i] = rq->old_busy_time = rq->prev_runnable_sum;
nload[i] = rq->nt_prev_runnable_sum;
@@ -3644,7 +3661,7 @@ static void set_preferred_cluster(struct related_thread_group *grp)
#ifdef CONFIG_SCHED_FREQ_INPUT
-static struct cpu_cycle
+static void
update_task_ravg(struct task_struct *p, struct rq *rq,
int event, u64 wallclock, u64 irqtime);
@@ -4021,23 +4038,24 @@ static void update_cpu_cluster_capacity(const cpumask_t *cpus)
post_big_task_count_change(cpu_possible_mask);
}
+static DEFINE_SPINLOCK(cpu_freq_min_max_lock);
void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax)
{
struct cpumask cpumask;
struct sched_cluster *cluster;
- unsigned int orig_max_freq;
int i, update_capacity = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&cpu_freq_min_max_lock, flags);
cpumask_copy(&cpumask, cpus);
for_each_cpu(i, &cpumask) {
cluster = cpu_rq(i)->cluster;
cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
- orig_max_freq = cpu_max_freq(i);
+ update_capacity += (cluster->max_mitigated_freq != fmax);
cluster->max_mitigated_freq = fmax;
-
- update_capacity += (orig_max_freq != cpu_max_freq(i));
}
+ spin_unlock_irqrestore(&cpu_freq_min_max_lock, flags);
if (update_capacity)
update_cpu_cluster_capacity(cpus);
@@ -4073,7 +4091,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
cpumask_andnot(&policy_cluster, &policy_cluster,
&cluster->cpus);
- orig_max_freq = cpu_max_freq(i);
+ orig_max_freq = cluster->max_freq;
cluster->min_freq = policy->min;
cluster->max_freq = policy->max;
cluster->cur_freq = policy->cur;
@@ -4094,7 +4112,7 @@ static int cpufreq_notifier_policy(struct notifier_block *nb,
continue;
}
- update_capacity += (orig_max_freq != cpu_max_freq(i));
+ update_capacity += (orig_max_freq != cluster->max_freq);
}
if (update_capacity)
@@ -4222,16 +4240,10 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
-static struct cpu_cycle
+static void
update_task_ravg(struct task_struct *p, struct rq *rq,
int event, u64 wallclock, u64 irqtime)
{
- static const struct cpu_cycle cc = {
- .cycles = SCHED_MIN_FREQ,
- .time = 1
- };
-
- return cc;
}
static inline void mark_task_starting(struct task_struct *p) {}
@@ -10962,6 +10974,8 @@ void __init sched_init(void)
rq->avg_irqload = 0;
rq->irqload_ts = 0;
rq->static_cpu_pwr_cost = 0;
+ rq->cc.cycles = SCHED_MIN_FREQ;
+ rq->cc.time = 1;
/*
* All cpus part of same cluster by default. This avoids the
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 930d3ce4f34e..647f184f8aec 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -80,6 +80,8 @@ void irqtime_account_irq(struct task_struct *curr)
if (account)
sched_account_irqtime(cpu, curr, delta, wallclock);
+ else if (curr != this_cpu_ksoftirqd())
+ sched_account_irqstart(cpu, curr, wallclock);
local_irq_restore(flags);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3d5a89cc6eef..ff2161cc9fc0 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -427,6 +427,11 @@ extern struct sched_cluster *sched_cluster[NR_CPUS];
extern int group_will_fit(struct sched_cluster *cluster,
struct related_thread_group *grp, u64 demand);
+struct cpu_cycle {
+ u64 cycles;
+ u64 time;
+};
+
#define for_each_sched_cluster(cluster) \
list_for_each_entry_rcu(cluster, &cluster_head, list)
@@ -749,6 +754,7 @@ struct rq {
u64 irqload_ts;
unsigned int static_cpu_pwr_cost;
struct task_struct *ed_task;
+ struct cpu_cycle cc;
#ifdef CONFIG_SCHED_FREQ_INPUT
u64 old_busy_time, old_busy_time_group;
@@ -1071,6 +1077,9 @@ extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
extern unsigned int max_task_load(void);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
u64 delta, u64 wallclock);
+extern void sched_account_irqstart(int cpu, struct task_struct *curr,
+ u64 wallclock);
+
unsigned int cpu_temp(int cpu);
int sched_set_group_id(struct task_struct *p, unsigned int group_id);
extern unsigned int nr_eligible_big_tasks(int cpu);
@@ -1317,6 +1326,11 @@ static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
{
}
+static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
+ u64 wallclock)
+{
+}
+
static inline int sched_cpu_high_irqload(int cpu) { return 0; }
static inline void set_preferred_cluster(struct related_thread_group *grp) { }