summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorSrinivasarao P <spathi@codeaurora.org>2018-02-05 11:56:41 +0530
committerSrinivasarao P <spathi@codeaurora.org>2018-02-05 11:59:38 +0530
commit2fd547e8d1f09628007fc2bca04ccff416bb9d76 (patch)
tree5c38fac921c4ed2778f6c9c4d5b732614528bd87 /kernel/sched
parent38cacfd106259c2f10d8ee33047b7212d7844732 (diff)
parentaa856bd83c43969dc3e8f8c0e97e6105e91aef76 (diff)
Merge android-4.4.115 (aa856bd) into msm-4.4
* refs/heads/tmp-aa856bd Linux 4.4.115 spi: imx: do not access registers while clocks disabled serial: imx: Only wakeup via RTSDEN bit if the system has RTS/CTS selinux: general protection fault in sock_has_perm usb: uas: unconditionally bring back host after reset usb: f_fs: Prevent gadget unbind if it is already unbound USB: serial: simple: add Motorola Tetra driver usbip: list: don't list devices attached to vhci_hcd usbip: prevent bind loops on devices attached to vhci_hcd USB: serial: io_edgeport: fix possible sleep-in-atomic CDC-ACM: apply quirk for card reader USB: cdc-acm: Do not log urb submission errors on disconnect USB: serial: pl2303: new device id for Chilitag usb: option: Add support for FS040U modem staging: rtl8188eu: Fix incorrect response to SIOCGIWESSID usb: gadget: don't dereference g until after it has been null checked media: usbtv: add a new usbid scsi: ufs: ufshcd: fix potential NULL pointer dereference in ufshcd_config_vreg scsi: aacraid: Prevent crash in case of free interrupt during scsi EH path xfs: ubsan fixes drm/omap: Fix error handling path in 'omap_dmm_probe()' kmemleak: add scheduling point to kmemleak_scan() SUNRPC: Allow connect to return EHOSTUNREACH quota: Check for register_shrinker() failure. net: ethernet: xilinx: Mark XILINX_LL_TEMAC broken on 64-bit hwmon: (pmbus) Use 64bit math for DIRECT format values lockd: fix "list_add double add" caused by legacy signal interface nfsd: check for use of the closed special stateid grace: replace BUG_ON by WARN_ONCE in exit_net hook nfsd: Ensure we check stateid validity in the seqid operation checks nfsd: CLOSE SHOULD return the invalid special stateid for NFSv4.x (x>0) xen-netfront: remove warning when unloading module KVM: VMX: Fix rflags cache during vCPU reset btrfs: fix deadlock when writing out space cache mac80211: fix the update of path metric for RANN frame openvswitch: fix the incorrect flow action alloc size drm/amdkfd: Fix SDMA oversubsription handling drm/amdkfd: Fix SDMA ring buffer size calculation drm/amdgpu: Fix SDMA load/unload sequence on HWS disabled mode bcache: check return value of register_shrinker cpufreq: Add Loongson machine dependencies ACPI / bus: Leave modalias empty for devices which are not present KVM: x86: ioapic: Preserve read-only values in the redirection table KVM: x86: ioapic: Clear Remote IRR when entry is switched to edge-triggered KVM: x86: ioapic: Fix level-triggered EOI and IOAPIC reconfigure race KVM: X86: Fix operand/address-size during instruction decoding KVM: x86: Don't re-execute instruction when not passing CR2 value KVM: x86: emulator: Return to user-mode on L1 CPL=0 emulation failure igb: Free IRQs when device is hotplugged mtd: nand: denali_pci: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE gpio: ath79: add missing MODULE_DESCRIPTION/LICENSE gpio: iop: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE power: reset: zx-reboot: add missing MODULE_DESCRIPTION/AUTHOR/LICENSE crypto: af_alg - whitelist mask and type crypto: aesni - handle zero length dst buffer ALSA: seq: Make ioctls race-free kaiser: fix intel_bts perf crashes x86/pti: Make unpoison of pgd for trusted boot work for real bpf: reject stores into ctx via st and xadd bpf: fix 32-bit divide by zero bpf: fix divides by zero bpf: avoid false sharing of map refcount with max_entries bpf: arsh is not supported in 32 bit alu thus reject it bpf: introduce BPF_JIT_ALWAYS_ON config bpf: fix bpf_tail_call() x64 JIT x86: bpf_jit: small optimization in emit_bpf_tail_call() bpf: fix branch pruning logic loop: fix concurrent lo_open/lo_release ANDROID: sdcardfs: Protect set_top ANDROID: fsnotify: Notify lower fs of open Revert "ANDROID: sdcardfs: notify lower file of opens" ANDROID: sdcardfs: Use lower getattr times/size ANDROID: sched/rt: schedtune: Add boost retention to RT Conflicts: arch/x86/Kconfig kernel/sched/rt.c Change-Id: I91b08e1b8e0a1c6ca9c245597acad0bf197f9527 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/rt.c148
-rw-r--r--kernel/sched/sched.h1
3 files changed, 150 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 59ba1d1266a7..41a8fdb1436c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2368,6 +2368,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
init_dl_task_timer(&p->dl);
__dl_clear_params(p);
+ init_rt_schedtune_timer(&p->rt);
INIT_LIST_HEAD(&p->rt.run_list);
p->rt.timeout = 0;
p->rt.time_slice = sched_rr_timeslice;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 8025828ff4e0..baad64b94f15 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/irq_work.h>
#include <trace/events/sched.h>
+#include <linux/hrtimer.h>
#include "tune.h"
@@ -980,6 +981,70 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
return 0;
}
+#define RT_SCHEDTUNE_INTERVAL 50000000ULL
+
+static enum hrtimer_restart rt_schedtune_timer(struct hrtimer *timer)
+{
+ struct sched_rt_entity *rt_se = container_of(timer,
+ struct sched_rt_entity,
+ schedtune_timer);
+ struct task_struct *p = rt_task_of(rt_se);
+ struct rq *rq = task_rq(p);
+
+ raw_spin_lock(&rq->lock);
+
+ /*
+ * Nothing to do if:
+ * - task has switched runqueues
+ * - task isn't RT anymore
+ */
+ if (rq != task_rq(p) || (p->sched_class != &rt_sched_class))
+ goto out;
+
+ /*
+ * If task got enqueued back during callback time, it means we raced
+ * with the enqueue on another cpu, that's Ok, just do nothing as
+ * enqueue path would have tried to cancel us and we shouldn't run
+ * Also check the schedtune_enqueued flag as class-switch on a
+ * sleeping task may have already canceled the timer and done dq
+ */
+ if (p->on_rq || !rt_se->schedtune_enqueued)
+ goto out;
+
+ /*
+ * RT task is no longer active, cancel boost
+ */
+ rt_se->schedtune_enqueued = false;
+ schedtune_dequeue_task(p, cpu_of(rq));
+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
+out:
+ raw_spin_unlock(&rq->lock);
+
+ /*
+ * This can free the task_struct if no more references.
+ */
+ put_task_struct(p);
+
+ return HRTIMER_NORESTART;
+}
+
+void init_rt_schedtune_timer(struct sched_rt_entity *rt_se)
+{
+ struct hrtimer *timer = &rt_se->schedtune_timer;
+
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ timer->function = rt_schedtune_timer;
+ rt_se->schedtune_enqueued = false;
+}
+
+static void start_schedtune_timer(struct sched_rt_entity *rt_se)
+{
+ struct hrtimer *timer = &rt_se->schedtune_timer;
+
+ hrtimer_start(timer, ns_to_ktime(RT_SCHEDTUNE_INTERVAL),
+ HRTIMER_MODE_REL_PINNED);
+}
+
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@@ -1386,7 +1451,32 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
+ if (!schedtune_task_boost(p))
+ return;
+
+ /*
+ * If schedtune timer is active, that means a boost was already
+ * done, just cancel the timer so that deboost doesn't happen.
+ * Otherwise, increase the boost. If an enqueued timer was
+ * cancelled, put the task reference.
+ */
+ if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
+ put_task_struct(p);
+
+ /*
+ * schedtune_enqueued can be true in the following situation:
+ * enqueue_task_rt grabs rq lock before timer fires
+ * or before its callback acquires rq lock
+ * schedtune_enqueued can be false if timer callback is running
+ * and timer just released rq lock, or if the timer finished
+ * running and canceling the boost
+ */
+ if (rt_se->schedtune_enqueued)
+ return;
+
+ rt_se->schedtune_enqueued = true;
schedtune_enqueue_task(p, cpu_of(rq));
+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1398,7 +1488,19 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
dec_hmp_sched_stats_rt(rq, p);
dequeue_pushable_task(rq, p);
+
+ if (!rt_se->schedtune_enqueued)
+ return;
+
+ if (flags == DEQUEUE_SLEEP) {
+ get_task_struct(p);
+ start_schedtune_timer(rt_se);
+ return;
+ }
+
+ rt_se->schedtune_enqueued = false;
schedtune_dequeue_task(p, cpu_of(rq));
+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
/*
@@ -1472,6 +1574,32 @@ task_may_not_preempt(struct task_struct *task, int cpu)
task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
}
+/*
+ * Perform a schedtune dequeue and cancelation of boost timers if needed.
+ * Should be called only with the rq->lock held.
+ */
+static void schedtune_dequeue_rt(struct rq *rq, struct task_struct *p)
+{
+ struct sched_rt_entity *rt_se = &p->rt;
+
+ BUG_ON(!raw_spin_is_locked(&rq->lock));
+
+ if (!rt_se->schedtune_enqueued)
+ return;
+
+ /*
+ * Incase of class change cancel any active timers. If an enqueued
+ * timer was cancelled, put the task ref.
+ */
+ if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
+ put_task_struct(p);
+
+ /* schedtune_enqueued is true, deboost it */
+ rt_se->schedtune_enqueued = false;
+ schedtune_dequeue_task(p, task_cpu(p));
+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
+}
+
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
int sibling_count_hint)
@@ -1546,6 +1674,19 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
rcu_read_unlock();
out:
+ /*
+ * If previous CPU was different, make sure to cancel any active
+ * schedtune timers and deboost.
+ */
+ if (task_cpu(p) != cpu) {
+ unsigned long fl;
+ struct rq *prq = task_rq(p);
+
+ raw_spin_lock_irqsave(&prq->lock, fl);
+ schedtune_dequeue_rt(prq, p);
+ raw_spin_unlock_irqrestore(&prq->lock, fl);
+ }
+
return cpu;
}
@@ -2402,6 +2543,13 @@ static void rq_offline_rt(struct rq *rq)
static void switched_from_rt(struct rq *rq, struct task_struct *p)
{
/*
+ * On class switch from rt, always cancel active schedtune timers,
+ * this handles the cases where we switch class for a task that is
+ * already rt-dequeued but has a running timer.
+ */
+ schedtune_dequeue_rt(rq, p);
+
+ /*
* If there are other RT tasks then we will reschedule
* and the scheduling of the other RT tasks will handle
* the balancing. But if we are the last RT task
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ca2294d06f44..6ef3a59612a9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2199,6 +2199,7 @@ extern void resched_cpu(int cpu);
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+extern void init_rt_schedtune_timer(struct sched_rt_entity *rt_se);
extern struct dl_bandwidth def_dl_bandwidth;
extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);