summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/types.h3
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/hmp.c86
-rw-r--r--kernel/sched/sched.h3
4 files changed, 78 insertions, 16 deletions
diff --git a/include/linux/types.h b/include/linux/types.h
index 70dd3dfde631..9f2d2f46b459 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -9,6 +9,9 @@
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
+#define DECLARE_BITMAP_ARRAY(name,nr,bits) \
+ unsigned long name[nr][BITS_TO_LONGS(bits)]
+
typedef __u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5c616517d4d3..7e7e19ed53c6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8028,6 +8028,8 @@ void __init sched_init(void)
/* No other choice */
BUG_ON(!rq->top_tasks[j]);
+
+ clear_top_tasks_bitmap(rq->top_tasks_bitmap[j]);
}
#endif
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 8675ebeebf6a..dffe18ebab74 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -863,6 +863,10 @@ static DEFINE_RWLOCK(related_thread_group_lock);
__read_mostly unsigned int sched_load_granule =
MIN_SCHED_RAVG_WINDOW / NUM_LOAD_INDICES;
+/* Size of bitmaps maintained to track top tasks */
+static const unsigned int top_tasks_bitmap_size =
+ BITS_TO_LONGS(NUM_LOAD_INDICES + 1) * sizeof(unsigned long);
+
/*
* Demand aggregation for frequency purpose:
*
@@ -2183,6 +2187,12 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
p->ravg.pred_demand = new;
}
+void clear_top_tasks_bitmap(unsigned long *bitmap)
+{
+ memset(bitmap, 0, top_tasks_bitmap_size);
+ __set_bit(NUM_LOAD_INDICES, bitmap);
+}
+
/*
* Special case the last index and provide a fast path for index = 0.
* Note that sched_load_granule can change underneath us if we are not
@@ -2191,9 +2201,12 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
static u32 __maybe_unused top_task_load(struct rq *rq)
{
int index = rq->prev_top;
+ u8 prev = 1 - rq->curr_table;
if (!index) {
- if (!rq->prev_runnable_sum)
+ int msb = NUM_LOAD_INDICES - 1;
+
+ if (!test_bit(msb, rq->top_tasks_bitmap[prev]))
return 0;
else
return sched_load_granule;
@@ -2217,8 +2230,10 @@ static int load_to_index(u32 load)
static void update_top_tasks(struct task_struct *p, struct rq *rq,
u32 old_curr_window, int new_window, bool full_window)
{
- u8 *curr_table = rq->top_tasks[rq->curr_table];
- u8 *prev_table = rq->top_tasks[1 - rq->curr_table];
+ u8 curr = rq->curr_table;
+ u8 prev = 1 - curr;
+ u8 *curr_table = rq->top_tasks[curr];
+ u8 *prev_table = rq->top_tasks[prev];
int old_index, new_index, update_index;
u32 curr_window = p->ravg.curr_window;
u32 prev_window = p->ravg.prev_window;
@@ -2241,6 +2256,14 @@ static void update_top_tasks(struct task_struct *p, struct rq *rq,
rq->curr_top = new_index;
}
+ if (!curr_table[old_index])
+ __clear_bit(NUM_LOAD_INDICES - old_index - 1,
+ rq->top_tasks_bitmap[curr]);
+
+ if (curr_table[new_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - new_index - 1,
+ rq->top_tasks_bitmap[curr]);
+
return;
}
@@ -2264,6 +2287,10 @@ static void update_top_tasks(struct task_struct *p, struct rq *rq,
prev_table[update_index] += 1;
rq->prev_top = update_index;
}
+
+ if (prev_table[update_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - update_index - 1,
+ rq->top_tasks_bitmap[prev]);
} else {
zero_index_update = !old_curr_window && prev_window;
if (old_index != update_index || zero_index_update) {
@@ -2274,6 +2301,14 @@ static void update_top_tasks(struct task_struct *p, struct rq *rq,
if (update_index > rq->prev_top)
rq->prev_top = update_index;
+
+ if (!prev_table[old_index])
+ __clear_bit(NUM_LOAD_INDICES - old_index - 1,
+ rq->top_tasks_bitmap[prev]);
+
+ if (prev_table[update_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - update_index - 1,
+ rq->top_tasks_bitmap[prev]);
}
}
@@ -2282,6 +2317,10 @@ static void update_top_tasks(struct task_struct *p, struct rq *rq,
if (new_index > rq->curr_top)
rq->curr_top = new_index;
+
+ if (curr_table[new_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - new_index - 1,
+ rq->top_tasks_bitmap[curr]);
}
}
@@ -2412,11 +2451,14 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
int curr_top = rq->curr_top;
clear_top_tasks_table(rq->top_tasks[prev_table]);
+ clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]);
if (prev_sum_reset) {
curr_sum = nt_curr_sum = 0;
curr_top = 0;
clear_top_tasks_table(rq->top_tasks[curr_table]);
+ clear_top_tasks_bitmap(
+ rq->top_tasks_bitmap[curr_table]);
}
*prev_runnable_sum = curr_sum;
@@ -3155,6 +3197,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
memset(&rq->load_subs[i], 0,
sizeof(struct load_subtractions));
clear_top_tasks_table(rq->top_tasks[i]);
+ clear_top_tasks_bitmap(rq->top_tasks_bitmap[i]);
}
rq->curr_table = 0;
@@ -3572,19 +3615,14 @@ static inline void inter_cluster_migration_fixup
BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0);
}
-static int find_next_top_index(u8 *tasks, int end)
+static int get_top_index(unsigned long *bitmap, unsigned long old_top)
{
- int i;
+ int index = find_next_bit(bitmap, NUM_LOAD_INDICES, old_top);
- if (end <= 1)
+ if (index == NUM_LOAD_INDICES)
return 0;
- for (i = end - 1; i >= 0; i--) {
- if (tasks[i])
- return i;
- }
-
- return 0;
+ return NUM_LOAD_INDICES - 1 - index;
}
static void
@@ -3606,13 +3644,21 @@ migrate_top_tasks(struct task_struct *p, struct rq *src_rq, struct rq *dst_rq)
src_table[index] -= 1;
dst_table[index] += 1;
+ if (!src_table[index])
+ __clear_bit(NUM_LOAD_INDICES - index - 1,
+ src_rq->top_tasks_bitmap[src]);
+
+ if (dst_table[index] == 1)
+ __set_bit(NUM_LOAD_INDICES - index - 1,
+ dst_rq->top_tasks_bitmap[dst]);
+
if (index > dst_rq->curr_top)
dst_rq->curr_top = index;
top_index = src_rq->curr_top;
if (index == top_index && !src_table[index])
- src_rq->curr_top =
- find_next_top_index(src_table, top_index);
+ src_rq->curr_top = get_top_index(
+ src_rq->top_tasks_bitmap[src], top_index);
}
if (prev_window) {
@@ -3624,13 +3670,21 @@ migrate_top_tasks(struct task_struct *p, struct rq *src_rq, struct rq *dst_rq)
src_table[index] -= 1;
dst_table[index] += 1;
+ if (!src_table[index])
+ __clear_bit(NUM_LOAD_INDICES - index - 1,
+ src_rq->top_tasks_bitmap[src]);
+
+ if (dst_table[index] == 1)
+ __set_bit(NUM_LOAD_INDICES - index - 1,
+ dst_rq->top_tasks_bitmap[dst]);
+
if (index > dst_rq->prev_top)
dst_rq->prev_top = index;
top_index = src_rq->prev_top;
if (index == top_index && !src_table[index])
- src_rq->prev_top =
- find_next_top_index(src_table, top_index);
+ src_rq->prev_top = get_top_index(
+ src_rq->top_tasks_bitmap[src], top_index);
}
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5cbf374696ee..4fd56b04c336 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -753,6 +753,8 @@ struct rq {
u64 nt_curr_runnable_sum;
u64 nt_prev_runnable_sum;
struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
+ DECLARE_BITMAP_ARRAY(top_tasks_bitmap,
+ NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES);
u8 *top_tasks[NUM_TRACKED_WINDOWS];
u8 curr_table;
int prev_top;
@@ -1417,6 +1419,7 @@ extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
struct cftype *cft, u64 upmigrate_discourage);
extern void sched_hmp_parse_dt(void);
extern void init_sched_hmp_boost_policy(void);
+extern void clear_top_tasks_bitmap(unsigned long *bitmap);
#else /* CONFIG_SCHED_HMP */