linux-pikaos-6.11.0/patches/0003-bore-cachy-ext.patch

997 lines
30 KiB
Diff
Raw Permalink Normal View History

2024-09-16 14:10:53 +02:00
From 35259c1c06596a086582bb3c63461b039e1e517d Mon Sep 17 00:00:00 2001
2024-08-30 15:04:41 +02:00
From: Piotr Gorski <lucjan.lucjanov@gmail.com>
2024-09-16 14:10:53 +02:00
Date: Fri, 13 Sep 2024 14:15:05 +0200
2024-08-30 15:04:41 +02:00
Subject: [PATCH] bore-cachy-ext
Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
---
include/linux/sched.h | 10 ++
init/Kconfig | 17 ++
kernel/Kconfig.hz | 16 ++
2024-09-16 14:10:53 +02:00
kernel/sched/core.c | 141 +++++++++++++++
2024-08-30 15:04:41 +02:00
kernel/sched/debug.c | 60 ++++++-
2024-09-16 14:10:53 +02:00
kernel/sched/fair.c | 379 +++++++++++++++++++++++++++++++++++++---
2024-08-30 15:04:41 +02:00
kernel/sched/features.h | 20 ++-
kernel/sched/sched.h | 7 +
2024-09-16 14:10:53 +02:00
8 files changed, 623 insertions(+), 27 deletions(-)
2024-08-30 15:04:41 +02:00
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5b4f78fe3..b9e5ea2aa 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -551,6 +551,16 @@ struct sched_entity {
u64 sum_exec_runtime;
u64 prev_sum_exec_runtime;
u64 vruntime;
+#ifdef CONFIG_SCHED_BORE
+ u64 burst_time;
+ u8 prev_burst_penalty;
+ u8 curr_burst_penalty;
+ u8 burst_penalty;
+ u8 burst_score;
+ u8 child_burst;
+ u32 child_burst_cnt;
+ u64 child_burst_last_cached;
+#endif // CONFIG_SCHED_BORE
s64 vlag;
u64 slice;
diff --git a/init/Kconfig b/init/Kconfig
2024-09-16 14:10:53 +02:00
index e1a88d48d..3aea8e43c 100644
2024-08-30 15:04:41 +02:00
--- a/init/Kconfig
+++ b/init/Kconfig
2024-09-16 14:10:53 +02:00
@@ -1327,6 +1327,23 @@ config CHECKPOINT_RESTORE
2024-08-30 15:04:41 +02:00
If unsure, say N here.
+config SCHED_BORE
+ bool "Burst-Oriented Response Enhancer"
+ default y
+ help
+ In Desktop and Mobile computing, one might prefer interactive
+ tasks to keep responsive no matter what they run in the background.
+
+ Enabling this kernel feature modifies the scheduler to discriminate
+ tasks by their burst time (runtime since it last went sleeping or
+ yielding state) and prioritize those that run less bursty.
+ Such tasks usually include window compositor, widgets backend,
+ terminal emulator, video playback, games and so on.
+ With a little impact to scheduling fairness, it may improve
+ responsiveness especially under heavy background workload.
+
+ If unsure, say Y here.
+
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
select CGROUPS
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 0f78364ef..b50189ee5 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -79,5 +79,21 @@ config HZ
default 750 if HZ_750
default 1000 if HZ_1000
+config MIN_BASE_SLICE_NS
+ int "Default value for min_base_slice_ns"
+ default 2000000
+ help
+ The BORE Scheduler automatically calculates the optimal base
+ slice for the configured HZ using the following equation:
+
+ base_slice_ns = max(min_base_slice_ns, 1000000000/HZ)
+
+ This option sets the default lower bound limit of the base slice
+ to prevent the loss of task throughput due to overscheduling.
+
+ Setting this value too high can cause the system to boot with
+ an unnecessarily large base slice, resulting in high scheduling
+ latency and poor system responsiveness.
+
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2024-09-16 14:10:53 +02:00
index c792a6feb..dfb93c5f7 100644
2024-08-30 15:04:41 +02:00
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
2024-09-16 14:10:53 +02:00
@@ -4336,6 +4336,136 @@ int wake_up_state(struct task_struct *p, unsigned int state)
2024-08-30 15:04:41 +02:00
return try_to_wake_up(p, state, 0);
}
+#ifdef CONFIG_SCHED_BORE
+extern u8 sched_burst_fork_atavistic;
+extern uint sched_burst_cache_lifetime;
+
+static void __init sched_init_bore(void) {
+ init_task.se.burst_time = 0;
+ init_task.se.prev_burst_penalty = 0;
+ init_task.se.curr_burst_penalty = 0;
+ init_task.se.burst_penalty = 0;
+ init_task.se.burst_score = 0;
+ init_task.se.child_burst_last_cached = 0;
+}
+
+inline void sched_fork_bore(struct task_struct *p) {
+ p->se.burst_time = 0;
+ p->se.curr_burst_penalty = 0;
+ p->se.burst_score = 0;
+ p->se.child_burst_last_cached = 0;
+}
+
+static u32 count_child_tasks(struct task_struct *p) {
+ struct task_struct *child;
+ u32 cnt = 0;
+ list_for_each_entry(child, &p->children, sibling) {cnt++;}
+ return cnt;
+}
+
2024-09-16 14:10:53 +02:00
+static inline bool task_burst_inheritable(struct task_struct *p) {
2024-08-30 15:04:41 +02:00
+ return (p->sched_class == &fair_sched_class);
+}
+
+static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) {
+ u64 expiration_time =
+ p->se.child_burst_last_cached + sched_burst_cache_lifetime;
+ return ((s64)(expiration_time - now) < 0);
+}
+
+static void __update_child_burst_cache(
2024-09-16 14:10:53 +02:00
+ struct task_struct *p, u32 cnt, u32 sum, u64 now) {
2024-08-30 15:04:41 +02:00
+ u8 avg = 0;
+ if (cnt) avg = sum / cnt;
+ p->se.child_burst = max(avg, p->se.burst_penalty);
+ p->se.child_burst_cnt = cnt;
+ p->se.child_burst_last_cached = now;
+}
+
+static inline void update_child_burst_direct(struct task_struct *p, u64 now) {
+ struct task_struct *child;
2024-09-16 14:10:53 +02:00
+ u32 cnt = 0, sum = 0;
2024-08-30 15:04:41 +02:00
+
+ list_for_each_entry(child, &p->children, sibling) {
2024-09-16 14:10:53 +02:00
+ if (!task_burst_inheritable(child)) continue;
2024-08-30 15:04:41 +02:00
+ cnt++;
+ sum += child->se.burst_penalty;
+ }
+
+ __update_child_burst_cache(p, cnt, sum, now);
+}
+
+static inline u8 __inherit_burst_direct(struct task_struct *p, u64 now) {
+ struct task_struct *parent = p->real_parent;
+ if (child_burst_cache_expired(parent, now))
+ update_child_burst_direct(parent, now);
+
+ return parent->se.child_burst;
+}
+
+static void update_child_burst_topological(
+ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) {
+ struct task_struct *child, *dec;
2024-09-16 14:10:53 +02:00
+ u32 cnt = 0, dcnt = 0, sum = 0;
2024-08-30 15:04:41 +02:00
+
+ list_for_each_entry(child, &p->children, sibling) {
+ dec = child;
+ while ((dcnt = count_child_tasks(dec)) == 1)
+ dec = list_first_entry(&dec->children, struct task_struct, sibling);
+
+ if (!dcnt || !depth) {
2024-09-16 14:10:53 +02:00
+ if (!task_burst_inheritable(dec)) continue;
2024-08-30 15:04:41 +02:00
+ cnt++;
+ sum += dec->se.burst_penalty;
+ continue;
+ }
+ if (!child_burst_cache_expired(dec, now)) {
+ cnt += dec->se.child_burst_cnt;
+ sum += (u32)dec->se.child_burst * dec->se.child_burst_cnt;
+ continue;
+ }
+ update_child_burst_topological(dec, now, depth - 1, &cnt, &sum);
+ }
+
+ __update_child_burst_cache(p, cnt, sum, now);
+ *acnt += cnt;
+ *asum += sum;
+}
+
+static inline u8 __inherit_burst_topological(struct task_struct *p, u64 now) {
+ struct task_struct *anc = p->real_parent;
+ u32 cnt = 0, sum = 0;
+
+ while (anc->real_parent != anc && count_child_tasks(anc) == 1)
+ anc = anc->real_parent;
+
+ if (child_burst_cache_expired(anc, now))
+ update_child_burst_topological(
+ anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum);
+
+ return anc->se.child_burst;
+}
+
+static inline void inherit_burst(struct task_struct *p) {
+ u8 burst_cache;
+ u64 now = ktime_get_ns();
+
+ read_lock(&tasklist_lock);
+ burst_cache = likely(sched_burst_fork_atavistic)?
+ __inherit_burst_topological(p, now):
+ __inherit_burst_direct(p, now);
+ read_unlock(&tasklist_lock);
+
+ p->se.prev_burst_penalty = max(p->se.prev_burst_penalty, burst_cache);
+}
+
+static void sched_post_fork_bore(struct task_struct *p) {
2024-09-16 14:10:53 +02:00
+ if (task_burst_inheritable(p))
2024-08-30 15:04:41 +02:00
+ inherit_burst(p);
+ p->se.burst_penalty = p->se.prev_burst_penalty;
+}
+#endif // CONFIG_SCHED_BORE
+
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
2024-09-16 14:10:53 +02:00
@@ -4352,6 +4482,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
2024-08-30 15:04:41 +02:00
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
+#ifdef CONFIG_SCHED_BORE
+ sched_fork_bore(p);
+#endif // CONFIG_SCHED_BORE
p->se.vlag = 0;
p->se.slice = sysctl_sched_base_slice;
INIT_LIST_HEAD(&p->se.group_node);
2024-09-16 14:10:53 +02:00
@@ -4686,6 +4819,9 @@ void sched_cancel_fork(struct task_struct *p)
2024-08-30 15:04:41 +02:00
void sched_post_fork(struct task_struct *p)
{
+#ifdef CONFIG_SCHED_BORE
+ sched_post_fork_bore(p);
+#endif // CONFIG_SCHED_BORE
uclamp_post_fork(p);
scx_post_fork(p);
}
2024-09-16 14:10:53 +02:00
@@ -8283,6 +8419,11 @@ void __init sched_init(void)
2024-08-30 15:04:41 +02:00
BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
#endif
+#ifdef CONFIG_SCHED_BORE
+ sched_init_bore();
2024-09-16 14:10:53 +02:00
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.2.11 by Masahito Suzuki");
2024-08-30 15:04:41 +02:00
+#endif // CONFIG_SCHED_BORE
+
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index c057ef46c..3cab39e34 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -167,7 +167,52 @@ static const struct file_operations sched_feat_fops = {
};
#ifdef CONFIG_SMP
+#ifdef CONFIG_SCHED_BORE
+static ssize_t sched_min_base_slice_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[16];
+ unsigned int value;
+
+ if (cnt > 15)
+ cnt = 15;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = '\0';
+
+ if (kstrtouint(buf, 10, &value))
+ return -EINVAL;
+ if (!value)
+ return -EINVAL;
+
+ sysctl_sched_min_base_slice = value;
+ sched_update_min_base_slice();
+
+ *ppos += cnt;
+ return cnt;
+}
+
+static int sched_min_base_slice_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%d\n", sysctl_sched_min_base_slice);
+ return 0;
+}
+
+static int sched_min_base_slice_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, sched_min_base_slice_show, NULL);
+}
+
+static const struct file_operations sched_min_base_slice_fops = {
+ .open = sched_min_base_slice_open,
+ .write = sched_min_base_slice_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#else // !CONFIG_SCHED_BORE
static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
@@ -213,7 +258,7 @@ static const struct file_operations sched_scaling_fops = {
.llseek = seq_lseek,
.release = single_release,
};
-
+#endif // CONFIG_SCHED_BORE
#endif /* SMP */
#ifdef CONFIG_PREEMPT_DYNAMIC
@@ -347,13 +392,20 @@ static __init int sched_init_debug(void)
debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
#endif
+#ifdef CONFIG_SCHED_BORE
+ debugfs_create_file("min_base_slice_ns", 0644, debugfs_sched, NULL, &sched_min_base_slice_fops);
+ debugfs_create_u32("base_slice_ns", 0400, debugfs_sched, &sysctl_sched_base_slice);
+#else // !CONFIG_SCHED_BORE
debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
+#endif // CONFIG_SCHED_BORE
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
#ifdef CONFIG_SMP
+#if !defined(CONFIG_SCHED_BORE)
debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
+#endif // CONFIG_SCHED_BORE
debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
@@ -596,6 +648,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
+#ifdef CONFIG_SCHED_BORE
+ SEQ_printf(m, " %2d", p->se.burst_score);
+#endif // CONFIG_SCHED_BORE
#ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
@@ -1069,6 +1124,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.load.weight);
#ifdef CONFIG_SMP
+#ifdef CONFIG_SCHED_BORE
+ P(se.burst_score);
+#endif // CONFIG_SCHED_BORE
P(se.avg.load_sum);
P(se.avg.runnable_sum);
P(se.avg.util_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2024-09-16 14:10:53 +02:00
index 2928026d7..f7040962b 100644
2024-08-30 15:04:41 +02:00
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@
*
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
+ *
+ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler
+ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com>
*/
#include <linux/energy_model.h>
#include <linux/mmap_lock.h>
2024-09-16 14:10:53 +02:00
@@ -64,28 +67,174 @@
2024-08-30 15:04:41 +02:00
* SCHED_TUNABLESCALING_LOG - scaled logarithmically, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
*
- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
+ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant)
+ * (EEVDF default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
*/
+#ifdef CONFIG_SCHED_BORE
+unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
+#else // !CONFIG_SCHED_BORE
unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
+#endif // CONFIG_SCHED_BORE
/*
* Minimal preemption granularity for CPU-bound tasks:
*
- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (BORE default: max(1 sec / HZ, min_base_slice) constant, units: nanoseconds)
+ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-#ifdef CONFIG_CACHY
-unsigned int sysctl_sched_base_slice = 350000ULL;
-static unsigned int normalized_sysctl_sched_base_slice = 350000ULL;
-#else
+#ifdef CONFIG_SCHED_BORE
+unsigned int sysctl_sched_base_slice = 1000000000ULL / HZ;
+static unsigned int configured_sched_base_slice = 1000000000ULL / HZ;
+unsigned int sysctl_sched_min_base_slice = CONFIG_MIN_BASE_SLICE_NS;
+#else // !CONFIG_SCHED_BORE
unsigned int sysctl_sched_base_slice = 750000ULL;
static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
-#endif
+#endif // CONFIG_SCHED_BORE
-#ifdef CONFIG_CACHY
-const_debug unsigned int sysctl_sched_migration_cost = 300000UL;
-#else
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
-#endif
+
+#ifdef CONFIG_SCHED_BORE
+u8 __read_mostly sched_bore = 1;
+u8 __read_mostly sched_burst_exclude_kthreads = 1;
+u8 __read_mostly sched_burst_smoothness_long = 1;
+u8 __read_mostly sched_burst_smoothness_short = 0;
+u8 __read_mostly sched_burst_fork_atavistic = 2;
+u8 __read_mostly sched_burst_penalty_offset = 22;
+uint __read_mostly sched_burst_penalty_scale = 1280;
+uint __read_mostly sched_burst_cache_lifetime = 60000000;
+uint __read_mostly sched_deadline_boost_mask = ENQUEUE_INITIAL
+ | ENQUEUE_WAKEUP;
+uint __read_mostly sched_deadline_preserve_mask = ENQUEUE_RESTORE
+ | ENQUEUE_MIGRATED;
+static int __maybe_unused sixty_four = 64;
+static int __maybe_unused maxval_12_bits = 4095;
+
+#define MAX_BURST_PENALTY (39U <<2)
+
+static inline u32 log2plus1_u64_u32f8(u64 v) {
+ u32 msb = fls64(v);
2024-09-16 14:10:53 +02:00
+ u8 fractional = (v << (64 - msb) >> 55);
2024-08-30 15:04:41 +02:00
+ return msb << 8 | fractional;
+}
+
+static inline u32 calc_burst_penalty(u64 burst_time) {
+ u32 greed, tolerance, penalty, scaled_penalty;
+
+ greed = log2plus1_u64_u32f8(burst_time);
+ tolerance = sched_burst_penalty_offset << 8;
+ penalty = max(0, (s32)greed - (s32)tolerance);
+ scaled_penalty = penalty * sched_burst_penalty_scale >> 16;
+
+ return min(MAX_BURST_PENALTY, scaled_penalty);
+}
+
+static inline u64 __scale_slice(u64 delta, u8 score) {
+ return mul_u64_u32_shr(delta, sched_prio_to_wmult[score], 22);
+}
+
+static inline u64 __unscale_slice(u64 delta, u8 score) {
+ return mul_u64_u32_shr(delta, sched_prio_to_weight[score], 10);
+}
+
+static void reweight_entity(
+ struct cfs_rq *cfs_rq, struct sched_entity *se, unsigned long weight);
+
+static void reweight_task_by_prio(struct task_struct *p, int prio)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ struct load_weight *load = &se->load;
+ unsigned long weight = scale_load(sched_prio_to_weight[prio]);
+
+ reweight_entity(cfs_rq, se, weight);
+ load->inv_weight = sched_prio_to_wmult[prio];
+}
+
+static inline u8 effective_prio(struct task_struct *p) {
+ u8 prio = p->static_prio - MAX_RT_PRIO;
+ if (likely(sched_bore))
+ prio += p->se.burst_score;
+ return min(39, prio);
+}
+
+static void update_burst_score(struct sched_entity *se) {
+ if (!entity_is_task(se)) return;
+ struct task_struct *p = task_of(se);
+ u8 prev_prio = effective_prio(p);
+
+ u8 burst_score = 0;
2024-09-16 14:10:53 +02:00
+ if (!((p->flags & PF_KTHREAD) && likely(sched_burst_exclude_kthreads)))
2024-08-30 15:04:41 +02:00
+ burst_score = se->burst_penalty >> 2;
+ se->burst_score = burst_score;
+
+ u8 new_prio = effective_prio(p);
+ if (new_prio != prev_prio)
+ reweight_task_by_prio(p, new_prio);
+}
+
+static void update_burst_penalty(struct sched_entity *se) {
+ se->curr_burst_penalty = calc_burst_penalty(se->burst_time);
+ se->burst_penalty = max(se->prev_burst_penalty, se->curr_burst_penalty);
+ update_burst_score(se);
+}
+
+static inline u32 binary_smooth(u32 new, u32 old) {
2024-09-16 14:10:53 +02:00
+ int increment = new - old;
+ return (0 <= increment)?
+ old + ( increment >> (int)sched_burst_smoothness_long):
+ old - (-increment >> (int)sched_burst_smoothness_short);
2024-08-30 15:04:41 +02:00
+}
+
+static void restart_burst(struct sched_entity *se) {
+ se->burst_penalty = se->prev_burst_penalty =
+ binary_smooth(se->curr_burst_penalty, se->prev_burst_penalty);
+ se->curr_burst_penalty = 0;
+ se->burst_time = 0;
+ update_burst_score(se);
+}
+
+static void restart_burst_rescale_deadline(struct sched_entity *se) {
+ s64 vscaled, wremain, vremain = se->deadline - se->vruntime;
+ struct task_struct *p = task_of(se);
+ u8 prev_prio = effective_prio(p);
+ restart_burst(se);
+ u8 new_prio = effective_prio(p);
+ if (prev_prio > new_prio) {
+ wremain = __unscale_slice(abs(vremain), prev_prio);
+ vscaled = __scale_slice(wremain, new_prio);
+ if (unlikely(vremain < 0))
+ vscaled = -vscaled;
+ se->deadline = se->vruntime + vscaled;
+ }
+}
+
+static void reset_task_weights_bore(void) {
2024-09-16 14:10:53 +02:00
+ struct task_struct *task;
+ struct rq *rq;
+ struct rq_flags rf;
+
+ write_lock_irq(&tasklist_lock);
+ for_each_process(task) {
+ rq = task_rq(task);
+ rq_lock_irqsave(rq, &rf);
+ reweight_task_by_prio(task, effective_prio(task));
+ rq_unlock_irqrestore(rq, &rf);
+ }
+ write_unlock_irq(&tasklist_lock);
2024-08-30 15:04:41 +02:00
+}
+
+int sched_bore_update_handler(const struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos);
+ if (ret || !write)
+ return ret;
+
+ reset_task_weights_bore();
+
+ return 0;
+}
+#endif // CONFIG_SCHED_BORE
static int __init setup_sched_thermal_decay_shift(char *str)
{
2024-09-16 14:10:53 +02:00
@@ -130,12 +279,8 @@ int __weak arch_asym_cpu_priority(int cpu)
2024-08-30 15:04:41 +02:00
*
* (default: 5 msec, units: microseconds)
*/
-#ifdef CONFIG_CACHY
-static unsigned int sysctl_sched_cfs_bandwidth_slice = 3000UL;
-#else
static unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#endif
-#endif
#ifdef CONFIG_NUMA_BALANCING
/* Restrict the NUMA promotion throughput (MB/s) for each target node. */
2024-09-16 14:10:53 +02:00
@@ -144,6 +289,92 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
2024-08-30 15:04:41 +02:00
#ifdef CONFIG_SYSCTL
static struct ctl_table sched_fair_sysctls[] = {
+#ifdef CONFIG_SCHED_BORE
+ {
+ .procname = "sched_bore",
+ .data = &sched_bore,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = sched_bore_update_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "sched_burst_exclude_kthreads",
+ .data = &sched_burst_exclude_kthreads,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "sched_burst_smoothness_long",
+ .data = &sched_burst_smoothness_long,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "sched_burst_smoothness_short",
+ .data = &sched_burst_smoothness_short,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "sched_burst_fork_atavistic",
+ .data = &sched_burst_fork_atavistic,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_THREE,
+ },
+ {
+ .procname = "sched_burst_penalty_offset",
+ .data = &sched_burst_penalty_offset,
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sixty_four,
+ },
+ {
+ .procname = "sched_burst_penalty_scale",
+ .data = &sched_burst_penalty_scale,
+ .maxlen = sizeof(uint),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &maxval_12_bits,
+ },
+ {
+ .procname = "sched_burst_cache_lifetime",
+ .data = &sched_burst_cache_lifetime,
+ .maxlen = sizeof(uint),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+ {
+ .procname = "sched_deadline_boost_mask",
+ .data = &sched_deadline_boost_mask,
+ .maxlen = sizeof(uint),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+ {
+ .procname = "sched_deadline_preserve_mask",
+ .data = &sched_deadline_preserve_mask,
+ .maxlen = sizeof(uint),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+#endif // CONFIG_SCHED_BORE
#ifdef CONFIG_CFS_BANDWIDTH
{
.procname = "sched_cfs_bandwidth_slice_us",
2024-09-16 14:10:53 +02:00
@@ -201,6 +432,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
2024-08-30 15:04:41 +02:00
*
* This idea comes from the SD scheduler of Con Kolivas:
*/
+#ifdef CONFIG_SCHED_BORE
+static void update_sysctl(void) {
+ sysctl_sched_base_slice =
+ max(sysctl_sched_min_base_slice, configured_sched_base_slice);
+}
+void sched_update_min_base_slice(void) { update_sysctl(); }
+#else // !CONFIG_SCHED_BORE
static unsigned int get_update_sysctl_factor(void)
{
unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
2024-09-16 14:10:53 +02:00
@@ -231,6 +469,7 @@ static void update_sysctl(void)
2024-08-30 15:04:41 +02:00
SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL
}
+#endif // CONFIG_SCHED_BORE
void __init sched_init_granularity(void)
{
2024-09-16 14:10:53 +02:00
@@ -708,6 +947,9 @@ static s64 entity_lag(u64 avruntime, struct sched_entity *se)
2024-08-30 15:04:41 +02:00
vlag = avruntime - se->vruntime;
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+#ifdef CONFIG_SCHED_BORE
2024-09-16 14:10:53 +02:00
+ limit >>= !!sched_bore;
2024-08-30 15:04:41 +02:00
+#endif // CONFIG_SCHED_BORE
return clamp(vlag, -limit, limit);
}
2024-09-16 14:10:53 +02:00
@@ -868,6 +1110,39 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
2024-08-30 15:04:41 +02:00
return __node_2_se(left);
}
+static inline bool pick_curr(struct cfs_rq *cfs_rq,
+ struct sched_entity *curr, struct sched_entity *wakee)
+{
+ /*
+ * Nothing to preserve...
+ */
+ if (!curr || !sched_feat(RESPECT_SLICE))
+ return false;
+
+ /*
+ * Allow preemption at the 0-lag point -- even if not all of the slice
+ * is consumed. Note: placement of positive lag can push V left and render
+ * @curr instantly ineligible irrespective the time on-cpu.
+ */
+ if (sched_feat(RUN_TO_PARITY) && !entity_eligible(cfs_rq, curr))
+ return false;
+
+ /*
+ * Don't preserve @curr when the @wakee has a shorter slice and earlier
+ * deadline. IOW, explicitly allow preemption.
+ */
+ if (sched_feat(PREEMPT_SHORT) && wakee &&
+ wakee->slice < curr->slice &&
+ (s64)(wakee->deadline - curr->deadline) < 0)
+ return false;
+
+ /*
+ * Preserve @curr to allow it to finish its first slice.
+ * See the HACK in set_next_entity().
+ */
+ return curr->vlag == curr->deadline;
+}
+
/*
* Earliest Eligible Virtual Deadline First
*
2024-09-16 14:10:53 +02:00
@@ -887,28 +1162,27 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
2024-08-30 15:04:41 +02:00
*
* Which allows tree pruning through eligibility.
*/
-static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
+static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *wakee)
{
struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
struct sched_entity *se = __pick_first_entity(cfs_rq);
struct sched_entity *curr = cfs_rq->curr;
struct sched_entity *best = NULL;
+ if (curr && !curr->on_rq)
+ curr = NULL;
+
/*
* We can safely skip eligibility check if there is only one entity
* in this cfs_rq, saving some cycles.
*/
if (cfs_rq->nr_running == 1)
- return curr && curr->on_rq ? curr : se;
-
- if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
- curr = NULL;
+ return curr ?: se;
/*
- * Once selected, run a task until it either becomes non-eligible or
- * until it gets a new slice. See the HACK in set_next_entity().
+ * Preserve @curr to let it finish its slice.
*/
- if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
+ if (pick_curr(cfs_rq, curr, wakee))
return curr;
/* Pick the leftmost entity if it's eligible */
2024-09-16 14:10:53 +02:00
@@ -967,6 +1241,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
2024-08-30 15:04:41 +02:00
* Scheduling class statistics methods:
*/
#ifdef CONFIG_SMP
+#if !defined(CONFIG_SCHED_BORE)
int sched_update_scaling(void)
{
unsigned int factor = get_update_sysctl_factor();
2024-09-16 14:10:53 +02:00
@@ -978,6 +1253,7 @@ int sched_update_scaling(void)
2024-08-30 15:04:41 +02:00
return 0;
}
+#endif // CONFIG_SCHED_BORE
#endif
#endif
2024-09-16 14:10:53 +02:00
@@ -1178,6 +1454,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
2024-08-30 15:04:41 +02:00
if (unlikely(delta_exec <= 0))
return;
+#ifdef CONFIG_SCHED_BORE
+ curr->burst_time += delta_exec;
+ update_burst_penalty(curr);
+#endif // CONFIG_SCHED_BORE
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
2024-09-16 14:10:53 +02:00
@@ -5193,6 +5473,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2024-08-30 15:04:41 +02:00
s64 lag = 0;
se->slice = sysctl_sched_base_slice;
+#ifdef CONFIG_SCHED_BORE
+ if (likely(sched_bore) &&
+ (flags & ~sched_deadline_boost_mask & sched_deadline_preserve_mask))
+ vslice = se->deadline - se->vruntime;
+ else
+#endif // CONFIG_SCHED_BORE
vslice = calc_delta_fair(se->slice, se);
/*
2024-09-16 14:10:53 +02:00
@@ -5203,6 +5489,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2024-08-30 15:04:41 +02:00
*
* EEVDF: placement strategy #1 / #2
*/
+#ifdef CONFIG_SCHED_BORE
+ if (se->vlag)
+#endif // CONFIG_SCHED_BORE
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
2024-09-16 14:10:53 +02:00
@@ -5278,6 +5567,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
2024-08-30 15:04:41 +02:00
* on average, halfway through their slice, as such start tasks
* off with half a slice to ease into the competition.
*/
+#ifdef CONFIG_SCHED_BORE
+ if (likely(sched_bore)) {
+ if (flags & sched_deadline_boost_mask)
+ vslice /= 2;
+ }
+ else
+#endif // CONFIG_SCHED_BORE
if (sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL))
vslice /= 2;
2024-09-16 14:10:53 +02:00
@@ -5492,7 +5788,7 @@ pick_next_entity(struct cfs_rq *cfs_rq)
2024-08-30 15:04:41 +02:00
cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
return cfs_rq->next;
- return pick_eevdf(cfs_rq);
+ return pick_eevdf(cfs_rq, NULL);
}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2024-09-16 14:10:53 +02:00
@@ -6860,6 +7156,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2024-08-30 15:04:41 +02:00
bool was_sched_idle = sched_idle_rq(rq);
util_est_dequeue(&rq->cfs, p);
+#ifdef CONFIG_SCHED_BORE
+ if (task_sleep) {
+ cfs_rq = cfs_rq_of(se);
+ if (cfs_rq->curr == se)
+ update_curr(cfs_rq);
+ restart_burst(se);
+ }
+#endif // CONFIG_SCHED_BORE
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
2024-09-16 14:10:53 +02:00
@@ -8428,7 +8732,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
2024-08-30 15:04:41 +02:00
/*
* XXX pick_eevdf(cfs_rq) != se ?
*/
- if (pick_eevdf(cfs_rq) == pse)
+ if (pick_eevdf(cfs_rq, pse) == pse)
goto preempt;
return;
2024-09-16 14:10:53 +02:00
@@ -8646,16 +8950,25 @@ static void yield_task_fair(struct rq *rq)
2024-08-30 15:04:41 +02:00
/*
* Are we the only task in the tree?
*/
+#if !defined(CONFIG_SCHED_BORE)
if (unlikely(rq->nr_running == 1))
return;
clear_buddies(cfs_rq, se);
+#endif // CONFIG_SCHED_BORE
update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
+#ifdef CONFIG_SCHED_BORE
+ restart_burst_rescale_deadline(se);
+ if (unlikely(rq->nr_running == 1))
+ return;
+
+ clear_buddies(cfs_rq, se);
+#endif // CONFIG_SCHED_BORE
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
2024-09-16 14:10:53 +02:00
@@ -12720,6 +13033,9 @@ static void task_fork_fair(struct task_struct *p)
2024-08-30 15:04:41 +02:00
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
+#ifdef CONFIG_SCHED_BORE
+ update_burst_score(se);
+#endif // CONFIG_SCHED_BORE
place_entity(cfs_rq, se, ENQUEUE_INITIAL);
rq_unlock(rq, &rf);
}
2024-09-16 14:10:53 +02:00
@@ -13303,3 +13619,16 @@ __init void init_sched_fair_class(void)
2024-08-30 15:04:41 +02:00
#endif /* SMP */
}
+
+#ifdef CONFIG_SCHED_BORE
+void reweight_task(struct task_struct *p, int prio)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ struct load_weight *load = &se->load;
+ unsigned long weight = scale_load(sched_prio_to_weight[prio]);
+
+ reweight_entity(cfs_rq, se, weight);
+ load->inv_weight = sched_prio_to_wmult[prio];
+}
+#endif // CONFIG_SCHED_BORE
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 143f55df8..bfeb9f653 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -5,8 +5,26 @@
* sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled.
*/
SCHED_FEAT(PLACE_LAG, true)
+/*
+ * Give new tasks half a slice to ease into the competition.
+ */
SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
-SCHED_FEAT(RUN_TO_PARITY, true)
+/*
+ * Inhibit (wakeup) preemption until the current task has exhausted its slice.
+ */
+#ifdef CONFIG_SCHED_BORE
+SCHED_FEAT(RESPECT_SLICE, false)
+#else // !CONFIG_SCHED_BORE
+SCHED_FEAT(RESPECT_SLICE, true)
+#endif // CONFIG_SCHED_BORE
+/*
+ * Relax RESPECT_SLICE to allow preemption once current has reached 0-lag.
+ */
+SCHED_FEAT(RUN_TO_PARITY, false)
+/*
+ * Allow tasks with a shorter slice to disregard RESPECT_SLICE
+ */
+SCHED_FEAT(PREEMPT_SHORT, true)
/*
* Prefer to schedule the task we woke last (assuming it failed
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
2024-09-16 14:10:53 +02:00
index 207a04f02..c99430161 100644
2024-08-30 15:04:41 +02:00
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
2024-09-16 14:10:53 +02:00
@@ -2063,7 +2063,11 @@ static inline void update_sched_domain_debugfs(void) { }
2024-08-30 15:04:41 +02:00
static inline void dirty_sched_domain_sysctl(int cpu) { }
#endif
+#ifdef CONFIG_SCHED_BORE
+extern void sched_update_min_base_slice(void);
+#else // !CONFIG_SCHED_BORE
extern int sched_update_scaling(void);
+#endif // CONFIG_SCHED_BORE
static inline const struct cpumask *task_user_cpus(struct task_struct *p)
{
2024-09-16 14:10:53 +02:00
@@ -2736,6 +2740,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate;
2024-08-30 15:04:41 +02:00
extern const_debug unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_base_slice;
+#ifdef CONFIG_SCHED_BORE
+extern unsigned int sysctl_sched_min_base_slice;
+#endif // CONFIG_SCHED_BORE
#ifdef CONFIG_SCHED_DEBUG
extern int sysctl_resched_latency_warn_ms;
--
2.45.2.606.g9005149a4a