Update patches/0002-bore-cachy.patch

This commit is contained in:
ferreo 2025-02-03 11:22:27 +01:00
parent 79a764baab
commit 6e511f7387

View File

@ -1,6 +1,6 @@
From 2aaaad0215c8d15c5133eb2bc1c77c021edff609 Mon Sep 17 00:00:00 2001
From 0acf71b81503851ed2dcf85bcf2a952387539dbd Mon Sep 17 00:00:00 2001
From: Eric Naim <dnaim@cachyos.org>
Date: Mon, 20 Jan 2025 09:19:36 +0700
Date: Mon, 3 Feb 2025 11:09:45 +0800
Subject: [PATCH] bore-cachy
Signed-off-by: Eric Naim <dnaim@cachyos.org>
@ -21,10 +21,10 @@ Signed-off-by: Eric Naim <dnaim@cachyos.org>
create mode 100644 kernel/sched/bore.c
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 64934e0830af..7ec02a323014 100644
index 9632e3318e0d..84bea3fe36f4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -538,6 +538,15 @@ struct sched_statistics {
@@ -544,6 +544,15 @@ struct sched_statistics {
#endif /* CONFIG_SCHEDSTATS */
} ____cacheline_aligned;
@ -40,7 +40,7 @@ index 64934e0830af..7ec02a323014 100644
struct sched_entity {
/* For load-balancing: */
struct load_weight load;
@@ -557,6 +566,15 @@ struct sched_entity {
@@ -563,6 +572,15 @@ struct sched_entity {
u64 sum_exec_runtime;
u64 prev_sum_exec_runtime;
u64 vruntime;
@ -103,10 +103,10 @@ index 000000000000..a8faabc2885e
+#endif // CONFIG_SCHED_BORE
+#endif // _LINUX_SCHED_BORE_H
diff --git a/init/Kconfig b/init/Kconfig
index 9437171030e2..c6f811d72dfd 100644
index 4b0adb48a40d..33b2b6e8f387 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1362,6 +1362,23 @@ config CHECKPOINT_RESTORE
@@ -1375,6 +1375,23 @@ config CHECKPOINT_RESTORE
If unsure, say N here.
@ -158,7 +158,7 @@ index 0f78364efd4f..83a6b919ab29 100644
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
diff --git a/kernel/fork.c b/kernel/fork.c
index e919c8c3a121..726d3daa0498 100644
index d9658b516bcb..889bbd12f20f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -116,6 +116,8 @@
@ -170,7 +170,7 @@ index e919c8c3a121..726d3daa0498 100644
#include <trace/events/sched.h>
#define CREATE_TRACE_POINTS
@@ -2524,6 +2526,10 @@ __latent_entropy struct task_struct *copy_process(
@@ -2532,6 +2534,10 @@ __latent_entropy struct task_struct *copy_process(
p->start_time = ktime_get_ns();
p->start_boottime = ktime_get_boottime_ns();
@ -640,7 +640,7 @@ index 000000000000..23aeb5649479
+#endif // CONFIG_SYSCTL
+#endif // CONFIG_SCHED_BORE
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3e5a6bf587f9..fb4bb3fa5a96 100644
index 165c90ba64ea..4b786324705c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -97,6 +97,8 @@
@ -652,7 +652,7 @@ index 3e5a6bf587f9..fb4bb3fa5a96 100644
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
@@ -8481,6 +8483,10 @@ void __init sched_init(void)
@@ -8489,6 +8491,10 @@ void __init sched_init(void)
BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
#endif
@ -664,7 +664,7 @@ index 3e5a6bf587f9..fb4bb3fa5a96 100644
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index a1be00a988bf..66fcb229007d 100644
index fd7e85220715..927c4f5509e9 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -167,7 +167,53 @@ static const struct file_operations sched_feat_fops = {
@ -761,7 +761,7 @@ index a1be00a988bf..66fcb229007d 100644
#ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
@@ -1245,6 +1301,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
@@ -1242,6 +1298,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.load.weight);
#ifdef CONFIG_SMP
@ -772,10 +772,10 @@ index a1be00a988bf..66fcb229007d 100644
P(se.avg.runnable_sum);
P(se.avg.util_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c532ffb153b4..c55d61977364 100644
index 457fb08efc66..68ae922480be 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -55,6 +55,8 @@
@@ -58,6 +58,8 @@
#include "stats.h"
#include "autogroup.h"
@ -784,7 +784,7 @@ index c532ffb153b4..c55d61977364 100644
/*
* The initial- and re-scaling of tunables is configurable
*
@@ -64,28 +66,32 @@
@@ -67,28 +69,32 @@
* SCHED_TUNABLESCALING_LOG - scaled logarithmically, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
*
@ -828,7 +828,7 @@ index c532ffb153b4..c55d61977364 100644
static int __init setup_sched_thermal_decay_shift(char *str)
{
@@ -130,12 +136,8 @@ int __weak arch_asym_cpu_priority(int cpu)
@@ -133,12 +139,8 @@ int __weak arch_asym_cpu_priority(int cpu)
*
* (default: 5 msec, units: microseconds)
*/
@ -841,7 +841,7 @@ index c532ffb153b4..c55d61977364 100644
#ifdef CONFIG_NUMA_BALANCING
/* Restrict the NUMA promotion throughput (MB/s) for each target node. */
@@ -201,6 +203,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
@@ -204,6 +206,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
*
* This idea comes from the SD scheduler of Con Kolivas:
*/
@ -855,7 +855,7 @@ index c532ffb153b4..c55d61977364 100644
static unsigned int get_update_sysctl_factor(void)
{
unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
@@ -231,6 +240,7 @@ static void update_sysctl(void)
@@ -234,6 +243,7 @@ static void update_sysctl(void)
SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL
}
@ -863,7 +863,7 @@ index c532ffb153b4..c55d61977364 100644
void __init sched_init_granularity(void)
{
@@ -710,6 +720,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -713,6 +723,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
vlag = avg_vruntime(cfs_rq) - se->vruntime;
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
@ -873,18 +873,18 @@ index c532ffb153b4..c55d61977364 100644
se->vlag = clamp(vlag, -limit, limit);
}
@@ -934,6 +947,10 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
@@ -937,6 +950,10 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
* until it gets a new slice. See the HACK in set_next_entity().
*/
if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
+#ifdef CONFIG_SCHED_BORE
+ if (!(likely(sched_bore) && likely(sched_burst_parity_threshold) &&
+ sched_burst_parity_threshold < cfs_rq->nr_running))
+ sched_burst_parity_threshold < cfs_rq->nr_queued))
+#endif // CONFIG_SCHED_BORE
return curr;
/* Pick the leftmost entity if it's eligible */
@@ -992,6 +1009,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
@@ -995,6 +1012,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
* Scheduling class statistics methods:
*/
#ifdef CONFIG_SMP
@ -892,7 +892,7 @@ index c532ffb153b4..c55d61977364 100644
int sched_update_scaling(void)
{
unsigned int factor = get_update_sysctl_factor();
@@ -1003,6 +1021,7 @@ int sched_update_scaling(void)
@@ -1006,6 +1024,7 @@ int sched_update_scaling(void)
return 0;
}
@ -900,7 +900,7 @@ index c532ffb153b4..c55d61977364 100644
#endif
#endif
@@ -1233,6 +1252,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
@@ -1236,6 +1255,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
if (unlikely(delta_exec <= 0))
return;
@ -911,7 +911,7 @@ index c532ffb153b4..c55d61977364 100644
curr->vruntime += calc_delta_fair(delta_exec, curr);
resched = update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
@@ -3784,7 +3807,7 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
@@ -3783,7 +3806,7 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags);
@ -920,7 +920,7 @@ index c532ffb153b4..c55d61977364 100644
unsigned long weight)
{
bool curr = cfs_rq->curr == se;
@@ -5272,7 +5295,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -5287,7 +5310,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se->rel_deadline = 0;
return;
}
@ -933,7 +933,7 @@ index c532ffb153b4..c55d61977364 100644
/*
* When joining the competition; the existing tasks will be,
* on average, halfway through their slice, as such start tasks
@@ -7148,6 +7175,15 @@ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
@@ -7164,6 +7191,15 @@ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
util_est_dequeue(&rq->cfs, p);
util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP);
@ -949,7 +949,7 @@ index c532ffb153b4..c55d61977364 100644
if (dequeue_entities(rq, &p->se, flags) < 0)
return false;
@@ -8961,16 +8997,25 @@ static void yield_task_fair(struct rq *rq)
@@ -8977,16 +9013,25 @@ static void yield_task_fair(struct rq *rq)
/*
* Are we the only task in the tree?
*/
@ -975,7 +975,7 @@ index c532ffb153b4..c55d61977364 100644
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
@@ -13044,6 +13089,9 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
@@ -13117,6 +13162,9 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
static void task_fork_fair(struct task_struct *p)
{
set_task_max_allowed_capacity(p);
@ -985,7 +985,7 @@ index c532ffb153b4..c55d61977364 100644
}
/*
@@ -13154,6 +13202,10 @@ static void attach_task_cfs_rq(struct task_struct *p)
@@ -13227,6 +13275,10 @@ static void attach_task_cfs_rq(struct task_struct *p)
static void switched_from_fair(struct rq *rq, struct task_struct *p)
{
@ -997,10 +997,10 @@ index c532ffb153b4..c55d61977364 100644
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index dee2797009e3..bdc0b9c037d4 100644
index ed5a75725411..5e64d621547c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2091,7 +2091,11 @@ static inline void update_sched_domain_debugfs(void) { }
@@ -2121,7 +2121,11 @@ static inline void update_sched_domain_debugfs(void) { }
static inline void dirty_sched_domain_sysctl(int cpu) { }
#endif
@ -1012,7 +1012,7 @@ index dee2797009e3..bdc0b9c037d4 100644
static inline const struct cpumask *task_user_cpus(struct task_struct *p)
{
@@ -2828,7 +2832,12 @@ extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
@@ -2845,7 +2849,12 @@ extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
@ -1027,4 +1027,3 @@ index dee2797009e3..bdc0b9c037d4 100644
extern int sysctl_resched_latency_warn_ms;
--
2.48.1