Update patches/0002-bore-cachy.patch

This commit is contained in:
ferreo 2025-01-20 16:37:34 +01:00
parent 5bf06daace
commit 797c9c5962

View File

@ -1,6 +1,6 @@
From 2485f3af3e13d470a6bf3b928725a50b54cb3f55 Mon Sep 17 00:00:00 2001 From 2aaaad0215c8d15c5133eb2bc1c77c021edff609 Mon Sep 17 00:00:00 2001
From: Eric Naim <dnaim@cachyos.org> From: Eric Naim <dnaim@cachyos.org>
Date: Tue, 7 Jan 2025 17:26:29 +0700 Date: Mon, 20 Jan 2025 09:19:36 +0700
Subject: [PATCH] bore-cachy Subject: [PATCH] bore-cachy
Signed-off-by: Eric Naim <dnaim@cachyos.org> Signed-off-by: Eric Naim <dnaim@cachyos.org>
@ -11,17 +11,17 @@ Signed-off-by: Eric Naim <dnaim@cachyos.org>
kernel/Kconfig.hz | 17 ++ kernel/Kconfig.hz | 17 ++
kernel/fork.c | 6 + kernel/fork.c | 6 +
kernel/sched/Makefile | 1 + kernel/sched/Makefile | 1 +
kernel/sched/bore.c | 446 +++++++++++++++++++++++++++++++++++++ kernel/sched/bore.c | 443 +++++++++++++++++++++++++++++++++++++
kernel/sched/core.c | 6 + kernel/sched/core.c | 6 +
kernel/sched/debug.c | 61 ++++- kernel/sched/debug.c | 61 ++++-
kernel/sched/fair.c | 89 ++++++-- kernel/sched/fair.c | 86 +++++--
kernel/sched/sched.h | 9 + kernel/sched/sched.h | 9 +
11 files changed, 692 insertions(+), 18 deletions(-) 11 files changed, 686 insertions(+), 18 deletions(-)
create mode 100644 include/linux/sched/bore.h create mode 100644 include/linux/sched/bore.h
create mode 100644 kernel/sched/bore.c create mode 100644 kernel/sched/bore.c
diff --git a/include/linux/sched.h b/include/linux/sched.h diff --git a/include/linux/sched.h b/include/linux/sched.h
index 02eaf84c8626..c76461bd57f3 100644 index 64934e0830af..7ec02a323014 100644
--- a/include/linux/sched.h --- a/include/linux/sched.h
+++ b/include/linux/sched.h +++ b/include/linux/sched.h
@@ -538,6 +538,15 @@ struct sched_statistics { @@ -538,6 +538,15 @@ struct sched_statistics {
@ -58,7 +58,7 @@ index 02eaf84c8626..c76461bd57f3 100644
diff --git a/include/linux/sched/bore.h b/include/linux/sched/bore.h diff --git a/include/linux/sched/bore.h b/include/linux/sched/bore.h
new file mode 100644 new file mode 100644
index 000000000000..a36947e12c2f index 000000000000..a8faabc2885e
--- /dev/null --- /dev/null
+++ b/include/linux/sched/bore.h +++ b/include/linux/sched/bore.h
@@ -0,0 +1,40 @@ @@ -0,0 +1,40 @@
@ -68,7 +68,7 @@ index 000000000000..a36947e12c2f
+ +
+#ifndef _LINUX_SCHED_BORE_H +#ifndef _LINUX_SCHED_BORE_H
+#define _LINUX_SCHED_BORE_H +#define _LINUX_SCHED_BORE_H
+#define SCHED_BORE_VERSION "5.9.5" +#define SCHED_BORE_VERSION "5.9.6"
+ +
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+extern u8 __read_mostly sched_bore; +extern u8 __read_mostly sched_bore;
@ -103,10 +103,10 @@ index 000000000000..a36947e12c2f
+#endif // CONFIG_SCHED_BORE +#endif // CONFIG_SCHED_BORE
+#endif // _LINUX_SCHED_BORE_H +#endif // _LINUX_SCHED_BORE_H
diff --git a/init/Kconfig b/init/Kconfig diff --git a/init/Kconfig b/init/Kconfig
index 857869dbc22c..9bd4551a7c3a 100644 index 9437171030e2..c6f811d72dfd 100644
--- a/init/Kconfig --- a/init/Kconfig
+++ b/init/Kconfig +++ b/init/Kconfig
@@ -1361,6 +1361,23 @@ config CHECKPOINT_RESTORE @@ -1362,6 +1362,23 @@ config CHECKPOINT_RESTORE
If unsure, say N here. If unsure, say N here.
@ -158,10 +158,10 @@ index 0f78364efd4f..83a6b919ab29 100644
config SCHED_HRTICK config SCHED_HRTICK
def_bool HIGH_RES_TIMERS def_bool HIGH_RES_TIMERS
diff --git a/kernel/fork.c b/kernel/fork.c diff --git a/kernel/fork.c b/kernel/fork.c
index d27b8f5582df..86adb9321e2d 100644 index e919c8c3a121..726d3daa0498 100644
--- a/kernel/fork.c --- a/kernel/fork.c
+++ b/kernel/fork.c +++ b/kernel/fork.c
@@ -117,6 +117,8 @@ @@ -116,6 +116,8 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
@ -170,7 +170,7 @@ index d27b8f5582df..86adb9321e2d 100644
#include <trace/events/sched.h> #include <trace/events/sched.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
@@ -2522,6 +2524,10 @@ __latent_entropy struct task_struct *copy_process( @@ -2524,6 +2526,10 @@ __latent_entropy struct task_struct *copy_process(
p->start_time = ktime_get_ns(); p->start_time = ktime_get_ns();
p->start_boottime = ktime_get_boottime_ns(); p->start_boottime = ktime_get_boottime_ns();
@ -192,10 +192,10 @@ index 976092b7bd45..293aad675444 100644
+obj-y += bore.o +obj-y += bore.o
diff --git a/kernel/sched/bore.c b/kernel/sched/bore.c diff --git a/kernel/sched/bore.c b/kernel/sched/bore.c
new file mode 100644 new file mode 100644
index 000000000000..d55cd32b34ea index 000000000000..23aeb5649479
--- /dev/null --- /dev/null
+++ b/kernel/sched/bore.c +++ b/kernel/sched/bore.c
@@ -0,0 +1,446 @@ @@ -0,0 +1,443 @@
+/* +/*
+ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler + * Burst-Oriented Response Enhancer (BORE) CPU Scheduler
+ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com> + * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com>
@ -395,10 +395,9 @@ index 000000000000..d55cd32b34ea
+ parent = parent->real_parent; + parent = parent->real_parent;
+ +
+ bc = &parent->se.child_burst; + bc = &parent->se.child_burst;
+ spin_lock(&bc->lock); + guard(spinlock)(&bc->lock);
+ if (burst_cache_expired(bc, now)) + if (burst_cache_expired(bc, now))
+ update_child_burst_direct(parent, now); + update_child_burst_direct(parent, now);
+ spin_unlock(&bc->lock);
+ +
+ return bc->score; + return bc->score;
+} +}
@ -461,11 +460,10 @@ index 000000000000..d55cd32b34ea
+ } + }
+ +
+ bc = &anc->se.child_burst; + bc = &anc->se.child_burst;
+ spin_lock(&bc->lock); + guard(spinlock)(&bc->lock);
+ if (burst_cache_expired(bc, now)) + if (burst_cache_expired(bc, now))
+ update_child_burst_topological( + update_child_burst_topological(
+ anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum); + anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum);
+ spin_unlock(&bc->lock);
+ +
+ return bc->score; + return bc->score;
+} +}
@ -486,10 +484,9 @@ index 000000000000..d55cd32b34ea
+static inline u8 inherit_burst_tg(struct task_struct *p, u64 now) { +static inline u8 inherit_burst_tg(struct task_struct *p, u64 now) {
+ struct task_struct *parent = rcu_dereference(p->group_leader); + struct task_struct *parent = rcu_dereference(p->group_leader);
+ struct sched_burst_cache *bc = &parent->se.group_burst; + struct sched_burst_cache *bc = &parent->se.group_burst;
+ spin_lock(&bc->lock); + guard(spinlock)(&bc->lock);
+ if (burst_cache_expired(bc, now)) + if (burst_cache_expired(bc, now))
+ update_tg_burst(parent, now); + update_tg_burst(parent, now);
+ spin_unlock(&bc->lock);
+ +
+ return bc->score; + return bc->score;
+} +}
@ -643,7 +640,7 @@ index 000000000000..d55cd32b34ea
+#endif // CONFIG_SYSCTL +#endif // CONFIG_SYSCTL
+#endif // CONFIG_SCHED_BORE +#endif // CONFIG_SCHED_BORE
diff --git a/kernel/sched/core.c b/kernel/sched/core.c diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d07dc87787df..3829d932a028 100644 index 3e5a6bf587f9..fb4bb3fa5a96 100644
--- a/kernel/sched/core.c --- a/kernel/sched/core.c
+++ b/kernel/sched/core.c +++ b/kernel/sched/core.c
@@ -97,6 +97,8 @@ @@ -97,6 +97,8 @@
@ -655,7 +652,7 @@ index d07dc87787df..3829d932a028 100644
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu); EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask); EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
@@ -8377,6 +8379,10 @@ void __init sched_init(void) @@ -8481,6 +8483,10 @@ void __init sched_init(void)
BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class)); BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
#endif #endif
@ -667,7 +664,7 @@ index d07dc87787df..3829d932a028 100644
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 82b165bf48c4..d2d48cb6a668 100644 index a1be00a988bf..66fcb229007d 100644
--- a/kernel/sched/debug.c --- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c +++ b/kernel/sched/debug.c
@@ -167,7 +167,53 @@ static const struct file_operations sched_feat_fops = { @@ -167,7 +167,53 @@ static const struct file_operations sched_feat_fops = {
@ -733,7 +730,7 @@ index 82b165bf48c4..d2d48cb6a668 100644
#endif /* SMP */ #endif /* SMP */
#ifdef CONFIG_PREEMPT_DYNAMIC #ifdef CONFIG_PREEMPT_DYNAMIC
@@ -504,13 +550,20 @@ static __init int sched_init_debug(void) @@ -505,13 +551,20 @@ static __init int sched_init_debug(void)
debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops); debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
#endif #endif
@ -754,7 +751,7 @@ index 82b165bf48c4..d2d48cb6a668 100644
debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost); debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate); debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
@@ -755,6 +808,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) @@ -756,6 +809,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
@ -764,7 +761,7 @@ index 82b165bf48c4..d2d48cb6a668 100644
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif #endif
@@ -1244,6 +1300,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, @@ -1245,6 +1301,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.load.weight); P(se.load.weight);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -775,7 +772,7 @@ index 82b165bf48c4..d2d48cb6a668 100644
P(se.avg.runnable_sum); P(se.avg.runnable_sum);
P(se.avg.util_sum); P(se.avg.util_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d06d306b7fba..2edb57febcc5 100644 index c532ffb153b4..c55d61977364 100644
--- a/kernel/sched/fair.c --- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c +++ b/kernel/sched/fair.c
@@ -55,6 +55,8 @@ @@ -55,6 +55,8 @@
@ -866,17 +863,17 @@ index d06d306b7fba..2edb57febcc5 100644
void __init sched_init_granularity(void) void __init sched_init_granularity(void)
{ {
@@ -708,6 +718,9 @@ static s64 entity_lag(u64 avruntime, struct sched_entity *se) @@ -710,6 +720,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
vlag = avruntime - se->vruntime; vlag = avg_vruntime(cfs_rq) - se->vruntime;
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+ limit >>= !!sched_bore; + limit >>= !!sched_bore;
+#endif // CONFIG_SCHED_BORE +#endif // CONFIG_SCHED_BORE
return clamp(vlag, -limit, limit); se->vlag = clamp(vlag, -limit, limit);
} }
@@ -939,6 +952,10 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq) @@ -934,6 +947,10 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
* until it gets a new slice. See the HACK in set_next_entity(). * until it gets a new slice. See the HACK in set_next_entity().
*/ */
if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline) if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
@ -887,7 +884,7 @@ index d06d306b7fba..2edb57febcc5 100644
return curr; return curr;
/* Pick the leftmost entity if it's eligible */ /* Pick the leftmost entity if it's eligible */
@@ -997,6 +1014,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) @@ -992,6 +1009,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
* Scheduling class statistics methods: * Scheduling class statistics methods:
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -895,7 +892,7 @@ index d06d306b7fba..2edb57febcc5 100644
int sched_update_scaling(void) int sched_update_scaling(void)
{ {
unsigned int factor = get_update_sysctl_factor(); unsigned int factor = get_update_sysctl_factor();
@@ -1008,6 +1026,7 @@ int sched_update_scaling(void) @@ -1003,6 +1021,7 @@ int sched_update_scaling(void)
return 0; return 0;
} }
@ -903,7 +900,7 @@ index d06d306b7fba..2edb57febcc5 100644
#endif #endif
#endif #endif
@@ -1238,6 +1257,10 @@ static void update_curr(struct cfs_rq *cfs_rq) @@ -1233,6 +1252,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
if (unlikely(delta_exec <= 0)) if (unlikely(delta_exec <= 0))
return; return;
@ -914,26 +911,16 @@ index d06d306b7fba..2edb57febcc5 100644
curr->vruntime += calc_delta_fair(delta_exec, curr); curr->vruntime += calc_delta_fair(delta_exec, curr);
resched = update_deadline(cfs_rq, curr); resched = update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq); update_min_vruntime(cfs_rq);
@@ -3893,7 +3916,7 @@ static void reweight_eevdf(struct sched_entity *se, u64 avruntime, @@ -3784,7 +3807,7 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
se->deadline = avruntime + vslice;
} static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags);
-static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, -static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
+void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, +void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight) unsigned long weight)
{ {
bool curr = cfs_rq->curr == se; bool curr = cfs_rq->curr == se;
@@ -5302,6 +5325,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) @@ -5272,7 +5295,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*
* EEVDF: placement strategy #1 / #2
*/
+#ifdef CONFIG_SCHED_BORE
+ if (se->vlag)
+#endif // CONFIG_SCHED_BORE
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
@@ -5377,7 +5403,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se->rel_deadline = 0; se->rel_deadline = 0;
return; return;
} }
@ -946,7 +933,7 @@ index d06d306b7fba..2edb57febcc5 100644
/* /*
* When joining the competition; the existing tasks will be, * When joining the competition; the existing tasks will be,
* on average, halfway through their slice, as such start tasks * on average, halfway through their slice, as such start tasks
@@ -7259,6 +7289,15 @@ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) @@ -7148,6 +7175,15 @@ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
util_est_dequeue(&rq->cfs, p); util_est_dequeue(&rq->cfs, p);
util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP); util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP);
@ -962,7 +949,7 @@ index d06d306b7fba..2edb57febcc5 100644
if (dequeue_entities(rq, &p->se, flags) < 0) if (dequeue_entities(rq, &p->se, flags) < 0)
return false; return false;
@@ -9072,16 +9111,25 @@ static void yield_task_fair(struct rq *rq) @@ -8961,16 +8997,25 @@ static void yield_task_fair(struct rq *rq)
/* /*
* Are we the only task in the tree? * Are we the only task in the tree?
*/ */
@ -988,7 +975,7 @@ index d06d306b7fba..2edb57febcc5 100644
/* /*
* Tell update_rq_clock() that we've just updated, * Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule() * so we don't do microscopic update in schedule()
@@ -13133,6 +13181,9 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) @@ -13044,6 +13089,9 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
static void task_fork_fair(struct task_struct *p) static void task_fork_fair(struct task_struct *p)
{ {
set_task_max_allowed_capacity(p); set_task_max_allowed_capacity(p);
@ -998,7 +985,7 @@ index d06d306b7fba..2edb57febcc5 100644
} }
/* /*
@@ -13243,6 +13294,10 @@ static void attach_task_cfs_rq(struct task_struct *p) @@ -13154,6 +13202,10 @@ static void attach_task_cfs_rq(struct task_struct *p)
static void switched_from_fair(struct rq *rq, struct task_struct *p) static void switched_from_fair(struct rq *rq, struct task_struct *p)
{ {
@ -1010,10 +997,10 @@ index d06d306b7fba..2edb57febcc5 100644
} }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d6e2ca8c8cd2..f9677c5c4831 100644 index dee2797009e3..bdc0b9c037d4 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -2084,7 +2084,11 @@ static inline void update_sched_domain_debugfs(void) { } @@ -2091,7 +2091,11 @@ static inline void update_sched_domain_debugfs(void) { }
static inline void dirty_sched_domain_sysctl(int cpu) { } static inline void dirty_sched_domain_sysctl(int cpu) { }
#endif #endif
@ -1025,7 +1012,7 @@ index d6e2ca8c8cd2..f9677c5c4831 100644
static inline const struct cpumask *task_user_cpus(struct task_struct *p) static inline const struct cpumask *task_user_cpus(struct task_struct *p)
{ {
@@ -2834,7 +2838,12 @@ extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags); @@ -2828,7 +2832,12 @@ extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
extern const_debug unsigned int sysctl_sched_nr_migrate; extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost; extern const_debug unsigned int sysctl_sched_migration_cost;
@ -1039,5 +1026,5 @@ index d6e2ca8c8cd2..f9677c5c4831 100644
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
extern int sysctl_resched_latency_warn_ms; extern int sysctl_resched_latency_warn_ms;
-- --
2.47.1 2.48.1