linux-pikaos-template/patches/0002-eevdfbore.patch
2023-07-18 19:43:09 +01:00

544 lines
18 KiB
Diff

From 4a346951e2b3c7de65511c95f74fdd7197e3d2e5 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Tue, 11 Jul 2023 19:31:15 +0200
Subject: [PATCH] bore-eevdf
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
include/linux/sched.h | 10 +++
init/Kconfig | 20 +++++
kernel/sched/core.c | 62 +++++++++++++
kernel/sched/debug.c | 4 +
kernel/sched/fair.c | 193 ++++++++++++++++++++++++++++++++++++++--
kernel/sched/features.h | 4 +
kernel/sched/sched.h | 1 +
7 files changed, 286 insertions(+), 8 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 88c3e7ba8992..6b4c553aea75 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -560,6 +560,12 @@ struct sched_entity {
u64 sum_exec_runtime;
u64 prev_sum_exec_runtime;
u64 vruntime;
+#ifdef CONFIG_SCHED_BORE
+ u64 prev_burst_time;
+ u64 burst_time;
+ u64 max_burst_time;
+ u8 penalty_score;
+#endif // CONFIG_SCHED_BORE
s64 vlag;
u64 slice;
@@ -991,6 +997,10 @@ struct task_struct {
struct list_head children;
struct list_head sibling;
struct task_struct *group_leader;
+#ifdef CONFIG_SCHED_BORE
+ u64 child_burst_cache;
+ u64 child_burst_last_cached;
+#endif // CONFIG_SCHED_BORE
/*
* 'ptraced' is the list of tasks this task is using ptrace() on.
diff --git a/init/Kconfig b/init/Kconfig
index b6d38eccca10..e90546df3182 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1277,6 +1277,26 @@ config CHECKPOINT_RESTORE
If unsure, say N here.
+config SCHED_BORE
+ bool "Burst-Oriented Response Enhancer"
+ default y
+ help
+ In Desktop and Mobile computing, one might prefer interactive
+ tasks to keep responsive no matter what they run in the background.
+
+ Enabling this kernel feature modifies the scheduler to discriminate
+ tasks by their burst time (runtime since it last went sleeping or
+ yielding state) and prioritize those that run less bursty.
+ Such tasks usually include window compositor, widgets backend,
+ terminal emulator, video playback, games and so on.
+ With a little impact to scheduling fairness, it may improve
+ responsiveness especially under heavy background workload.
+
+ You can turn it off by setting the sysctl kernel.sched_bore = 0.
+ Enabling this feature implies NO_GENTLE_FAIR_SLEEPERS by default.
+
+ If unsure say Y here.
+
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
select CGROUPS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index df2f22a9729c..4995243a2ba4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4490,6 +4490,57 @@ int wake_up_state(struct task_struct *p, unsigned int state)
return try_to_wake_up(p, state, 0);
}
+#ifdef CONFIG_SCHED_BORE
+#define CHILD_BURST_CUTOFF_BITS 9
+extern unsigned int sched_burst_cache_lifetime;
+
+void __init sched_init_bore(void) {
+ init_task.child_burst_cache = 0;
+ init_task.child_burst_last_cached = 0;
+ init_task.se.prev_burst_time = 0;
+ init_task.se.burst_time = 0;
+ init_task.se.max_burst_time = 0;
+}
+
+void inline __sched_fork_bore(struct task_struct *p) {
+ p->child_burst_cache = 0;
+ p->child_burst_last_cached = 0;
+ p->se.burst_time = 0;
+}
+
+static inline void update_task_child_burst_time_cache(struct task_struct *p) {
+ u64 sum = 0, avg_burst_time = 0;
+ u32 cnt = 0;
+ struct task_struct *child;
+
+ read_lock(&tasklist_lock);
+ list_for_each_entry(child, &p->children, sibling) {
+ cnt++;
+ sum += child->se.max_burst_time >> CHILD_BURST_CUTOFF_BITS;
+ }
+ read_unlock(&tasklist_lock);
+
+ if (cnt) avg_burst_time = div_u64(sum, cnt) << CHILD_BURST_CUTOFF_BITS;
+ p->child_burst_cache = max(avg_burst_time, p->se.max_burst_time);
+}
+
+static void update_task_initial_burst_time(struct task_struct *task) {
+ struct sched_entity *se = &task->se;
+ struct task_struct *par = task->real_parent;
+ u64 now = ktime_get_ns();
+
+ if (likely(par)) {
+ if (par->child_burst_last_cached + sched_burst_cache_lifetime < now) {
+ par->child_burst_last_cached = now;
+ update_task_child_burst_time_cache(par);
+ }
+ se->prev_burst_time = max(se->prev_burst_time, par->child_burst_cache);
+ }
+
+ se->max_burst_time = se->prev_burst_time;
+}
+#endif // CONFIG_SCHED_BORE
+
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
@@ -4506,6 +4557,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+#ifdef CONFIG_SCHED_BORE
+ __sched_fork_bore(p);
+#endif // CONFIG_SCHED_BORE
p->se.vlag = 0;
INIT_LIST_HEAD(&p->se.group_node);
@@ -4735,6 +4789,9 @@ late_initcall(sched_core_sysctl_init);
int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
__sched_fork(clone_flags, p);
+#ifdef CONFIG_SCHED_BORE
+ update_task_initial_burst_time(p);
+#endif // CONFIG_SCHED_BORE
/*
* We mark the process as NEW here. This guarantees that
* nobody will actually run it, and a signal or other external
@@ -9968,6 +10025,11 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class != &stop_sched_class + 1);
#endif
+#ifdef CONFIG_SCHED_BORE
+ sched_init_bore();
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 2.4.2 by Masahito Suzuki");
+#endif // CONFIG_SCHED_BORE
+
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 5c743bcb340d..755ef4c8d34b 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -348,6 +348,7 @@ static __init int sched_init_debug(void)
#endif
debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
+ debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
@@ -595,6 +596,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
+#ifdef CONFIG_SCHED_BORE
+ SEQ_printf(m, " %2d", p->se.penalty_score);
+#endif
#ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 36dcf4770830..30080b227866 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@
*
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
+ *
+ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler
+ * Copyright (C) 2021-2023 Masahito Suzuki <firelzrd@gmail.com>
*/
#include <linux/energy_model.h>
#include <linux/mmap_lock.h>
@@ -66,17 +69,17 @@
* SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
*
- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
+ * (default SCHED_TUNABLESCALING_NONE = *1)
*/
-unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
+unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
/*
* Minimal preemption granularity for CPU-bound tasks:
*
- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (default: 3 msec * 1, units: nanoseconds)
*/
-unsigned int sysctl_sched_base_slice = 750000ULL;
-static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
+unsigned int sysctl_sched_base_slice = 3000000ULL;
+static unsigned int normalized_sysctl_sched_base_slice = 3000000ULL;
/*
* After fork, child runs first. If set to 0 (default) then
@@ -84,8 +87,75 @@ static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
*/
unsigned int sysctl_sched_child_runs_first __read_mostly;
+/*
+ * SCHED_OTHER wake-up granularity.
+ *
+ * This option delays the preemption effects of decoupled workloads
+ * and reduces their over-scheduling. Synchronous workloads will still
+ * have immediate wakeup/sleep latencies.
+ *
+ * (default: 3.2 msec * 1, units: nanoseconds)
+ */
+unsigned int sysctl_sched_wakeup_granularity = 3200000UL;
+static unsigned int normalized_sysctl_sched_wakeup_granularity = 3200000UL;
+
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+#ifdef CONFIG_SCHED_BORE
+unsigned int __read_mostly sched_bore = 1;
+unsigned int __read_mostly sched_burst_cache_lifetime = 15000000;
+unsigned int __read_mostly sched_burst_penalty_offset = 18;
+unsigned int __read_mostly sched_burst_penalty_scale = 1292;
+unsigned int __read_mostly sched_burst_smoothness = 1;
+static int three = 3;
+static int sixty_four = 64;
+static int maxval_12_bits = 4095;
+
+#define FIXED_SHIFT 10
+#define FIXED_ONE (1 << FIXED_SHIFT)
+typedef u32 fixed;
+
+static void update_burst_score(struct sched_entity *se) {
+ u64 burst_time = se->max_burst_time;
+
+ int msb = fls64(burst_time);
+ fixed integer_part = msb << FIXED_SHIFT;
+ fixed fractional_part = burst_time << (64 - msb) << 1 >> (64 - FIXED_SHIFT);
+ fixed greed = integer_part | fractional_part;
+
+ fixed tolerance = sched_burst_penalty_offset << FIXED_SHIFT;
+ fixed penalty = max(0, (s32)greed - (s32)tolerance);
+ fixed scaled_penalty = penalty * sched_burst_penalty_scale >> 10;
+
+ u8 score = min(39U, scaled_penalty >> FIXED_SHIFT);
+ se->penalty_score = score;
+}
+
+static inline u64 penalty_scale(u64 delta, struct sched_entity *se) {
+ return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->penalty_score], 22);
+}
+
+static inline u64 __binary_smooth(u64 new, u64 old, unsigned int smoothness) {
+ return (new + old * ((1 << smoothness) - 1)) >> smoothness;
+}
+
+void restart_burst(struct sched_entity *se) {
+ se->max_burst_time = se->prev_burst_time = __binary_smooth(
+ se->burst_time, se->prev_burst_time, sched_burst_smoothness);
+ se->burst_time = 0;
+}
+
+#define calc_delta_fair(delta, se) __calc_delta_fair(delta, se, true)
+#define calc_delta_fair_unscaled(delta, se) __calc_delta_fair(delta, se, false)
+static inline u64
+__calc_delta_fair(u64 delta, struct sched_entity *se, bool bscale);
+
+static s64 wakeup_preempt_backstep_delta(u64 rtime, struct sched_entity *se) {
+ u64 delta = calc_delta_fair_unscaled(rtime, se);
+ return delta - penalty_scale(delta, se);
+}
+#endif // CONFIG_SCHED_BORE
+
int sched_thermal_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str)
{
@@ -145,6 +215,51 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
#ifdef CONFIG_SYSCTL
static struct ctl_table sched_fair_sysctls[] = {
+#ifdef CONFIG_SCHED_BORE
+ {
+ .procname = "sched_bore",
+ .data = &sched_bore,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "sched_burst_cache_lifetime",
+ .data = &sched_burst_cache_lifetime,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sched_burst_penalty_offset",
+ .data = &sched_burst_penalty_offset,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sixty_four,
+ },
+ {
+ .procname = "sched_burst_penalty_scale",
+ .data = &sched_burst_penalty_scale,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &maxval_12_bits,
+ },
+ {
+ .procname = "sched_burst_smoothness",
+ .data = &sched_burst_smoothness,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &three,
+ },
+#endif // CONFIG_SCHED_BORE
{
.procname = "sched_child_runs_first",
.data = &sysctl_sched_child_runs_first,
@@ -238,6 +353,7 @@ static void update_sysctl(void)
#define SET_SYSCTL(name) \
(sysctl_##name = (factor) * normalized_sysctl_##name)
SET_SYSCTL(sched_base_slice);
+ SET_SYSCTL(sched_wakeup_granularity);
#undef SET_SYSCTL
}
@@ -308,11 +424,19 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight
/*
* delta /= w
*/
+#ifdef CONFIG_SCHED_BORE
+static inline u64
+__calc_delta_fair(u64 delta, struct sched_entity *se, bool bscale)
+#else // CONFIG_SCHED_BORE
static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
+#endif // CONFIG_SCHED_BORE
{
if (unlikely(se->load.weight != NICE_0_LOAD))
delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
+#ifdef CONFIG_SCHED_BORE
+ if (bscale && likely(sched_bore)) delta = penalty_scale(delta, se);
+#endif // CONFIG_SCHED_BORE
return delta;
}
@@ -708,7 +832,11 @@ void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
SCHED_WARN_ON(!se->on_rq);
lag = avg_vruntime(cfs_rq) - se->vruntime;
+#ifdef CONFIG_SCHED_BORE
+ limit = calc_delta_fair_unscaled(max_t(u64, 2*se->slice, TICK_NSEC), se);
+#else // CONFIG_SCHED_BORE
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+#endif // CONFIG_SCHED_BORE
se->vlag = clamp(lag, -limit, limit);
}
@@ -946,6 +1074,7 @@ int sched_update_scaling(void)
#define WRT_SYSCTL(name) \
(normalized_sysctl_##name = sysctl_##name / (factor))
WRT_SYSCTL(sched_base_slice);
+ WRT_SYSCTL(sched_wakeup_granularity);
#undef WRT_SYSCTL
return 0;
@@ -1123,6 +1252,11 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);
+#ifdef CONFIG_SCHED_BORE
+ curr->burst_time += delta_exec;
+ curr->max_burst_time = max(curr->max_burst_time, curr->burst_time);
+ update_burst_score(curr);
+#endif // CONFIG_SCHED_BORE
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
@@ -5237,6 +5371,9 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
+static int
+wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
+
/*
* Pick the next process, keeping these things in mind, in this order:
* 1) keep things fair between processes/task groups
@@ -5247,14 +5384,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
static struct sched_entity *
pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
+ struct sched_entity *candidate = pick_eevdf(cfs_rq);
/*
* Enabling NEXT_BUDDY will affect latency but not fairness.
*/
if (sched_feat(NEXT_BUDDY) &&
- cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
+ cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next) &&
+ wakeup_preempt_entity(cfs_rq->next, candidate) < 1)
return cfs_rq->next;
- return pick_eevdf(cfs_rq);
+ return candidate;
}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -6504,6 +6643,38 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq);
}
+static unsigned long wakeup_gran(struct sched_entity *se)
+{
+ unsigned long gran = sysctl_sched_wakeup_granularity;
+#ifdef CONFIG_SCHED_BORE
+ return calc_delta_fair_unscaled(gran, se);
+#else // CONFIG_SCHED_BORE
+ return calc_delta_fair(gran, se);
+#endif // CONFIG_SCHED_BORE
+}
+
+static int
+wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
+{
+ s64 gran, vdiff = curr->vruntime - se->vruntime;
+#ifdef CONFIG_SCHED_BORE
+ if (likely(sched_bore)) {
+ u64 rtime = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ vdiff += wakeup_preempt_backstep_delta(rtime, curr)
+ - wakeup_preempt_backstep_delta(rtime, se);
+ }
+#endif // CONFIG_SCHED_BORE
+
+ if (vdiff <= 0)
+ return -1;
+
+ gran = wakeup_gran(se);
+ if (vdiff > gran)
+ return 1;
+
+ return 0;
+}
+
static void set_next_buddy(struct sched_entity *se);
/*
@@ -6522,6 +6693,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
util_est_dequeue(&rq->cfs, p);
for_each_sched_entity(se) {
+#ifdef CONFIG_SCHED_BORE
+ if (task_sleep) restart_burst(se);
+#endif // CONFIG_SCHED_BORE
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, flags);
@@ -8012,7 +8186,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
/*
* XXX pick_eevdf(cfs_rq) != se ?
*/
- if (pick_eevdf(cfs_rq) == pse)
+ if ((pick_eevdf(cfs_rq) == pse) && (wakeup_preempt_entity(se, pse) == 1))
goto preempt;
return;
@@ -8225,6 +8399,9 @@ static void yield_task_fair(struct rq *rq)
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *se = &curr->se;
+#ifdef CONFIG_SCHED_BORE
+ restart_burst(se);
+#endif // CONFIG_SCHED_BORE
/*
* Are we the only task in the tree?
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index ca95044a7479..a7d34d1b28c5 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -13,7 +13,11 @@ SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
* wakeup-preemption), since its likely going to consume data we
* touched, increases cache locality.
*/
+#ifdef CONFIG_SCHED_BORE
+SCHED_FEAT(NEXT_BUDDY, true)
+#else // CONFIG_SCHED_BORE
SCHED_FEAT(NEXT_BUDDY, false)
+#endif // CONFIG_SCHED_BORE
/*
* Consider buddies to be cache hot, decreases the likeliness of a
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 96b1ae519f20..cc0a17fb23c2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2482,6 +2482,7 @@ extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_base_slice;
+extern unsigned int sysctl_sched_wakeup_granularity;
#ifdef CONFIG_SCHED_DEBUG
extern int sysctl_resched_latency_warn_ms;
--
2.41.0