Update patches/0003-bore-cachy-ext.patch
This commit is contained in:
parent
a280828878
commit
dc2e5a6347
@ -1,6 +1,6 @@
|
|||||||
From 02401e1df25c88de792f9f7261ac827e488e607b Mon Sep 17 00:00:00 2001
|
From e91f8d993bc2b1a1424cb2f5a931fe8f31eb97b9 Mon Sep 17 00:00:00 2001
|
||||||
From: Eric Naim <dnaim@cachyos.org>
|
From: Eric Naim <dnaim@cachyos.org>
|
||||||
Date: Fri, 4 Oct 2024 14:55:16 +0800
|
Date: Tue, 8 Oct 2024 23:02:55 +0800
|
||||||
Subject: [PATCH] bore-cachy-ext
|
Subject: [PATCH] bore-cachy-ext
|
||||||
|
|
||||||
Signed-off-by: Eric Naim <dnaim@cachyos.org>
|
Signed-off-by: Eric Naim <dnaim@cachyos.org>
|
||||||
@ -11,18 +11,18 @@ Signed-off-by: Eric Naim <dnaim@cachyos.org>
|
|||||||
kernel/Kconfig.hz | 17 ++
|
kernel/Kconfig.hz | 17 ++
|
||||||
kernel/fork.c | 5 +
|
kernel/fork.c | 5 +
|
||||||
kernel/sched/Makefile | 1 +
|
kernel/sched/Makefile | 1 +
|
||||||
kernel/sched/bore.c | 380 +++++++++++++++++++++++++++++++++++++
|
kernel/sched/bore.c | 381 +++++++++++++++++++++++++++++++++++++
|
||||||
kernel/sched/core.c | 7 +
|
kernel/sched/core.c | 7 +
|
||||||
kernel/sched/debug.c | 60 +++++-
|
kernel/sched/debug.c | 60 +++++-
|
||||||
kernel/sched/fair.c | 102 ++++++++--
|
kernel/sched/fair.c | 102 ++++++++--
|
||||||
kernel/sched/features.h | 4 +
|
kernel/sched/features.h | 4 +
|
||||||
kernel/sched/sched.h | 7 +
|
kernel/sched/sched.h | 7 +
|
||||||
12 files changed, 639 insertions(+), 18 deletions(-)
|
12 files changed, 640 insertions(+), 18 deletions(-)
|
||||||
create mode 100644 include/linux/sched/bore.h
|
create mode 100644 include/linux/sched/bore.h
|
||||||
create mode 100644 kernel/sched/bore.c
|
create mode 100644 kernel/sched/bore.c
|
||||||
|
|
||||||
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
||||||
index 5b4f78fe379d..bb4567e29671 100644
|
index c5a7901b2580..bab2d659b667 100644
|
||||||
--- a/include/linux/sched.h
|
--- a/include/linux/sched.h
|
||||||
+++ b/include/linux/sched.h
|
+++ b/include/linux/sched.h
|
||||||
@@ -537,6 +537,14 @@ struct sched_statistics {
|
@@ -537,6 +537,14 @@ struct sched_statistics {
|
||||||
@ -163,7 +163,7 @@ index 0f78364efd4f..83a6b919ab29 100644
|
|||||||
config SCHED_HRTICK
|
config SCHED_HRTICK
|
||||||
def_bool HIGH_RES_TIMERS
|
def_bool HIGH_RES_TIMERS
|
||||||
diff --git a/kernel/fork.c b/kernel/fork.c
|
diff --git a/kernel/fork.c b/kernel/fork.c
|
||||||
index 69a0a7210060..68737ef70d5c 100644
|
index eb290420d926..8f060c73877b 100644
|
||||||
--- a/kernel/fork.c
|
--- a/kernel/fork.c
|
||||||
+++ b/kernel/fork.c
|
+++ b/kernel/fork.c
|
||||||
@@ -116,6 +116,8 @@
|
@@ -116,6 +116,8 @@
|
||||||
@ -175,7 +175,7 @@ index 69a0a7210060..68737ef70d5c 100644
|
|||||||
#include <trace/events/sched.h>
|
#include <trace/events/sched.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
@@ -2354,6 +2356,9 @@ __latent_entropy struct task_struct *copy_process(
|
@@ -2351,6 +2353,9 @@ __latent_entropy struct task_struct *copy_process(
|
||||||
retval = sched_fork(clone_flags, p);
|
retval = sched_fork(clone_flags, p);
|
||||||
if (retval)
|
if (retval)
|
||||||
goto bad_fork_cleanup_policy;
|
goto bad_fork_cleanup_policy;
|
||||||
@ -196,10 +196,10 @@ index 976092b7bd45..293aad675444 100644
|
|||||||
+obj-y += bore.o
|
+obj-y += bore.o
|
||||||
diff --git a/kernel/sched/bore.c b/kernel/sched/bore.c
|
diff --git a/kernel/sched/bore.c b/kernel/sched/bore.c
|
||||||
new file mode 100644
|
new file mode 100644
|
||||||
index 000000000000..62a0191a32d7
|
index 000000000000..cd7e8a8d6075
|
||||||
--- /dev/null
|
--- /dev/null
|
||||||
+++ b/kernel/sched/bore.c
|
+++ b/kernel/sched/bore.c
|
||||||
@@ -0,0 +1,380 @@
|
@@ -0,0 +1,381 @@
|
||||||
+/*
|
+/*
|
||||||
+ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler
|
+ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler
|
||||||
+ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com>
|
+ * Copyright (C) 2021-2024 Masahito Suzuki <firelzrd@gmail.com>
|
||||||
@ -320,6 +320,9 @@ index 000000000000..62a0191a32d7
|
|||||||
+ }
|
+ }
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
|
+static inline bool task_is_bore_eligible(struct task_struct *p)
|
||||||
|
+{return p->sched_class == &fair_sched_class;}
|
||||||
|
+
|
||||||
+static void reset_task_weights_bore(void) {
|
+static void reset_task_weights_bore(void) {
|
||||||
+ struct task_struct *task;
|
+ struct task_struct *task;
|
||||||
+ struct rq *rq;
|
+ struct rq *rq;
|
||||||
@ -327,6 +330,7 @@ index 000000000000..62a0191a32d7
|
|||||||
+
|
+
|
||||||
+ write_lock_irq(&tasklist_lock);
|
+ write_lock_irq(&tasklist_lock);
|
||||||
+ for_each_process(task) {
|
+ for_each_process(task) {
|
||||||
|
+ if (!task_is_bore_eligible(task)) continue;
|
||||||
+ rq = task_rq(task);
|
+ rq = task_rq(task);
|
||||||
+ rq_lock_irqsave(rq, &rf);
|
+ rq_lock_irqsave(rq, &rf);
|
||||||
+ reweight_task_by_prio(task, effective_prio(task));
|
+ reweight_task_by_prio(task, effective_prio(task));
|
||||||
@ -353,9 +357,6 @@ index 000000000000..62a0191a32d7
|
|||||||
+ return cnt;
|
+ return cnt;
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+static inline bool task_is_bore_eligible(struct task_struct *p)
|
|
||||||
+{return p->sched_class == &fair_sched_class;}
|
|
||||||
+
|
|
||||||
+static inline bool burst_cache_expired(struct sched_burst_cache *bc, u64 now)
|
+static inline bool burst_cache_expired(struct sched_burst_cache *bc, u64 now)
|
||||||
+{return (s64)(bc->timestamp + sched_burst_cache_lifetime - now) < 0;}
|
+{return (s64)(bc->timestamp + sched_burst_cache_lifetime - now) < 0;}
|
||||||
+
|
+
|
||||||
@ -581,7 +582,7 @@ index 000000000000..62a0191a32d7
|
|||||||
+#endif // CONFIG_SYSCTL
|
+#endif // CONFIG_SYSCTL
|
||||||
+#endif // CONFIG_SCHED_BORE
|
+#endif // CONFIG_SCHED_BORE
|
||||||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||||||
index c792a6feb7a9..447ab2bb147e 100644
|
index 8ae04bd4a5a4..4aa992f99c36 100644
|
||||||
--- a/kernel/sched/core.c
|
--- a/kernel/sched/core.c
|
||||||
+++ b/kernel/sched/core.c
|
+++ b/kernel/sched/core.c
|
||||||
@@ -97,6 +97,8 @@
|
@@ -97,6 +97,8 @@
|
||||||
@ -593,12 +594,12 @@ index c792a6feb7a9..447ab2bb147e 100644
|
|||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
|
||||||
|
|
||||||
@@ -8283,6 +8285,11 @@ void __init sched_init(void)
|
@@ -8290,6 +8292,11 @@ void __init sched_init(void)
|
||||||
BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
|
BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
+#ifdef CONFIG_SCHED_BORE
|
+#ifdef CONFIG_SCHED_BORE
|
||||||
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.6.0 by Masahito Suzuki");
|
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.6.1 by Masahito Suzuki");
|
||||||
+ init_task_bore(&init_task);
|
+ init_task_bore(&init_task);
|
||||||
+#endif // CONFIG_SCHED_BORE
|
+#endif // CONFIG_SCHED_BORE
|
||||||
+
|
+
|
||||||
@ -713,7 +714,7 @@ index c057ef46c5f8..3cab39e34824 100644
|
|||||||
P(se.avg.runnable_sum);
|
P(se.avg.runnable_sum);
|
||||||
P(se.avg.util_sum);
|
P(se.avg.util_sum);
|
||||||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
||||||
index 2928026d76a3..90c96ecded2d 100644
|
index a36e37a674e8..603d72b9e6e8 100644
|
||||||
--- a/kernel/sched/fair.c
|
--- a/kernel/sched/fair.c
|
||||||
+++ b/kernel/sched/fair.c
|
+++ b/kernel/sched/fair.c
|
||||||
@@ -55,6 +55,8 @@
|
@@ -55,6 +55,8 @@
|
||||||
@ -856,7 +857,7 @@ index 2928026d76a3..90c96ecded2d 100644
|
|||||||
curr->vruntime += calc_delta_fair(delta_exec, curr);
|
curr->vruntime += calc_delta_fair(delta_exec, curr);
|
||||||
update_deadline(cfs_rq, curr);
|
update_deadline(cfs_rq, curr);
|
||||||
update_min_vruntime(cfs_rq);
|
update_min_vruntime(cfs_rq);
|
||||||
@@ -3795,7 +3822,7 @@ static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
|
@@ -3804,7 +3831,7 @@ static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
|
||||||
se->deadline = avruntime + vslice;
|
se->deadline = avruntime + vslice;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -865,7 +866,7 @@ index 2928026d76a3..90c96ecded2d 100644
|
|||||||
unsigned long weight)
|
unsigned long weight)
|
||||||
{
|
{
|
||||||
bool curr = cfs_rq->curr == se;
|
bool curr = cfs_rq->curr == se;
|
||||||
@@ -5203,6 +5230,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
@@ -5212,6 +5239,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||||
*
|
*
|
||||||
* EEVDF: placement strategy #1 / #2
|
* EEVDF: placement strategy #1 / #2
|
||||||
*/
|
*/
|
||||||
@ -875,7 +876,7 @@ index 2928026d76a3..90c96ecded2d 100644
|
|||||||
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
|
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
|
||||||
struct sched_entity *curr = cfs_rq->curr;
|
struct sched_entity *curr = cfs_rq->curr;
|
||||||
unsigned long load;
|
unsigned long load;
|
||||||
@@ -5273,6 +5303,16 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
@@ -5282,6 +5312,16 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||||
|
|
||||||
se->vruntime = vruntime - lag;
|
se->vruntime = vruntime - lag;
|
||||||
|
|
||||||
@ -892,7 +893,7 @@ index 2928026d76a3..90c96ecded2d 100644
|
|||||||
/*
|
/*
|
||||||
* When joining the competition; the existing tasks will be,
|
* When joining the competition; the existing tasks will be,
|
||||||
* on average, halfway through their slice, as such start tasks
|
* on average, halfway through their slice, as such start tasks
|
||||||
@@ -5382,6 +5422,7 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
|
@@ -5391,6 +5431,7 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
|
||||||
static void
|
static void
|
||||||
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||||
{
|
{
|
||||||
@ -900,7 +901,7 @@ index 2928026d76a3..90c96ecded2d 100644
|
|||||||
int action = UPDATE_TG;
|
int action = UPDATE_TG;
|
||||||
|
|
||||||
if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
|
if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
|
||||||
@@ -5409,6 +5450,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
@@ -5418,6 +5459,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||||
clear_buddies(cfs_rq, se);
|
clear_buddies(cfs_rq, se);
|
||||||
|
|
||||||
update_entity_lag(cfs_rq, se);
|
update_entity_lag(cfs_rq, se);
|
||||||
@ -912,7 +913,7 @@ index 2928026d76a3..90c96ecded2d 100644
|
|||||||
if (se != cfs_rq->curr)
|
if (se != cfs_rq->curr)
|
||||||
__dequeue_entity(cfs_rq, se);
|
__dequeue_entity(cfs_rq, se);
|
||||||
se->on_rq = 0;
|
se->on_rq = 0;
|
||||||
@@ -6860,6 +6906,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
@@ -6869,6 +6915,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||||
bool was_sched_idle = sched_idle_rq(rq);
|
bool was_sched_idle = sched_idle_rq(rq);
|
||||||
|
|
||||||
util_est_dequeue(&rq->cfs, p);
|
util_est_dequeue(&rq->cfs, p);
|
||||||
@ -927,7 +928,7 @@ index 2928026d76a3..90c96ecded2d 100644
|
|||||||
|
|
||||||
for_each_sched_entity(se) {
|
for_each_sched_entity(se) {
|
||||||
cfs_rq = cfs_rq_of(se);
|
cfs_rq = cfs_rq_of(se);
|
||||||
@@ -8646,16 +8700,25 @@ static void yield_task_fair(struct rq *rq)
|
@@ -8651,16 +8705,25 @@ static void yield_task_fair(struct rq *rq)
|
||||||
/*
|
/*
|
||||||
* Are we the only task in the tree?
|
* Are we the only task in the tree?
|
||||||
*/
|
*/
|
||||||
@ -953,7 +954,7 @@ index 2928026d76a3..90c96ecded2d 100644
|
|||||||
/*
|
/*
|
||||||
* Tell update_rq_clock() that we've just updated,
|
* Tell update_rq_clock() that we've just updated,
|
||||||
* so we don't do microscopic update in schedule()
|
* so we don't do microscopic update in schedule()
|
||||||
@@ -12720,6 +12783,9 @@ static void task_fork_fair(struct task_struct *p)
|
@@ -12725,6 +12788,9 @@ static void task_fork_fair(struct task_struct *p)
|
||||||
curr = cfs_rq->curr;
|
curr = cfs_rq->curr;
|
||||||
if (curr)
|
if (curr)
|
||||||
update_curr(cfs_rq);
|
update_curr(cfs_rq);
|
||||||
@ -963,7 +964,7 @@ index 2928026d76a3..90c96ecded2d 100644
|
|||||||
place_entity(cfs_rq, se, ENQUEUE_INITIAL);
|
place_entity(cfs_rq, se, ENQUEUE_INITIAL);
|
||||||
rq_unlock(rq, &rf);
|
rq_unlock(rq, &rf);
|
||||||
}
|
}
|
||||||
@@ -12832,6 +12898,10 @@ static void attach_task_cfs_rq(struct task_struct *p)
|
@@ -12837,6 +12903,10 @@ static void attach_task_cfs_rq(struct task_struct *p)
|
||||||
|
|
||||||
static void switched_from_fair(struct rq *rq, struct task_struct *p)
|
static void switched_from_fair(struct rq *rq, struct task_struct *p)
|
||||||
{
|
{
|
||||||
@ -1016,4 +1017,4 @@ index 48d893de632b..62e7e9e5fd9c 100644
|
|||||||
#ifdef CONFIG_SCHED_DEBUG
|
#ifdef CONFIG_SCHED_DEBUG
|
||||||
extern int sysctl_resched_latency_warn_ms;
|
extern int sysctl_resched_latency_warn_ms;
|
||||||
--
|
--
|
||||||
2.46.2
|
2.47.0
|
||||||
|
Loading…
Reference in New Issue
Block a user