diff options
Diffstat (limited to 'kernel/sched/rt.c')
| -rw-r--r-- | kernel/sched/rt.c | 43 | 
1 files changed, 34 insertions, 9 deletions
| diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f42ae7fb5ec5..b60dad720173 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -778,12 +778,9 @@ static inline int balance_runtime(struct rt_rq *rt_rq)  static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)  { -	int i, idle = 1; +	int i, idle = 1, throttled = 0;  	const struct cpumask *span; -	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) -		return 1; -  	span = sched_rt_period_mask();  	for_each_cpu(i, span) {  		int enqueue = 0; @@ -818,12 +815,17 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)  			if (!rt_rq_throttled(rt_rq))  				enqueue = 1;  		} +		if (rt_rq->rt_throttled) +			throttled = 1;  		if (enqueue)  			sched_rt_rq_enqueue(rt_rq);  		raw_spin_unlock(&rq->lock);  	} +	if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)) +		return 1; +  	return idle;  } @@ -855,8 +857,30 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)  		return 0;  	if (rt_rq->rt_time > runtime) { -		rt_rq->rt_throttled = 1; -		printk_once(KERN_WARNING "sched: RT throttling activated\n"); +		struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); + +		/* +		 * Don't actually throttle groups that have no runtime assigned +		 * but accrue some time due to boosting. +		 */ +		if (likely(rt_b->rt_runtime)) { +			static bool once = false; + +			rt_rq->rt_throttled = 1; + +			if (!once) { +				once = true; +				printk_sched("sched: RT throttling activated\n"); +			} +		} else { +			/* +			 * In case we did anyway, make it go away, +			 * replenishment is a joke, since it will replenish us +			 * with exactly 0 ns. +			 */ +			rt_rq->rt_time = 0; +		} +  		if (rt_rq_throttled(rt_rq)) {  			sched_rt_rq_dequeue(rt_rq);  			return 1; @@ -884,7 +908,8 @@ static void update_curr_rt(struct rq *rq)  	if (unlikely((s64)delta_exec < 0))  		delta_exec = 0; -	schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); +	schedstat_set(curr->se.statistics.exec_max, +		      max(curr->se.statistics.exec_max, delta_exec));  	curr->se.sum_exec_runtime += delta_exec;  	account_group_exec_runtime(curr, delta_exec); @@ -1972,7 +1997,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)  	if (--p->rt.time_slice)  		return; -	p->rt.time_slice = DEF_TIMESLICE; +	p->rt.time_slice = RR_TIMESLICE;  	/*  	 * Requeue to the end of queue if we are not the only element @@ -2000,7 +2025,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)  	 * Time slice is 0 for SCHED_FIFO tasks  	 */  	if (task->policy == SCHED_RR) -		return DEF_TIMESLICE; +		return RR_TIMESLICE;  	else  		return 0;  } | 
