This commit is contained in:
ferrreo 2023-07-18 19:43:09 +01:00
parent e1489c7f35
commit 6dc67ea0fa
7 changed files with 9371 additions and 4580 deletions

4
config
View File

@ -1,6 +1,6 @@
# #
# Automatically generated file; DO NOT EDIT. # Automatically generated file; DO NOT EDIT.
# Linux/x86 6.4.1 Kernel Configuration # Linux/x86 6.4.3 Kernel Configuration
# #
CONFIG_CC_VERSION_TEXT="gcc (GCC) 13.1.1 20230525" CONFIG_CC_VERSION_TEXT="gcc (GCC) 13.1.1 20230525"
CONFIG_CC_IS_GCC=y CONFIG_CC_IS_GCC=y
@ -4684,7 +4684,6 @@ CONFIG_XILLYBUS_CLASS=m
CONFIG_XILLYBUS=m CONFIG_XILLYBUS=m
CONFIG_XILLYBUS_PCIE=m CONFIG_XILLYBUS_PCIE=m
CONFIG_XILLYUSB=m CONFIG_XILLYUSB=m
CONFIG_DDCCI=m
# end of Character devices # end of Character devices
# #
@ -6942,7 +6941,6 @@ CONFIG_BACKLIGHT_MAX8925=m
CONFIG_BACKLIGHT_MT6370=m CONFIG_BACKLIGHT_MT6370=m
CONFIG_BACKLIGHT_APPLE=m CONFIG_BACKLIGHT_APPLE=m
CONFIG_BACKLIGHT_QCOM_WLED=m CONFIG_BACKLIGHT_QCOM_WLED=m
CONFIG_BACKLIGHT_DDCCI=m
CONFIG_BACKLIGHT_RT4831=m CONFIG_BACKLIGHT_RT4831=m
CONFIG_BACKLIGHT_SAHARA=m CONFIG_BACKLIGHT_SAHARA=m
CONFIG_BACKLIGHT_WM831X=m CONFIG_BACKLIGHT_WM831X=m

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
From 5e4ded34523fcaf5aea5c77d45239b6dd33f1c91 Mon Sep 17 00:00:00 2001 From d5ebb5aa8f44f2a81002becad5f85b6e70801575 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Thu, 1 Jun 2023 16:37:55 +0200 Date: Tue, 11 Jul 2023 19:27:06 +0200
Subject: [PATCH] EEVDF Subject: [PATCH] EEVDF
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -13,14 +13,14 @@ Signed-off-by: Peter Jung <admin@ptr1337.dev>
init/init_task.c | 3 +- init/init_task.c | 3 +-
kernel/sched/core.c | 65 +- kernel/sched/core.c | 65 +-
kernel/sched/debug.c | 49 +- kernel/sched/debug.c | 49 +-
kernel/sched/fair.c | 1152 +++++++++++------------ kernel/sched/fair.c | 1157 +++++++++++------------
kernel/sched/features.h | 24 +- kernel/sched/features.h | 24 +-
kernel/sched/sched.h | 22 +- kernel/sched/sched.h | 22 +-
tools/include/uapi/linux/sched.h | 4 +- tools/include/uapi/linux/sched.h | 4 +-
12 files changed, 726 insertions(+), 660 deletions(-) 12 files changed, 733 insertions(+), 658 deletions(-)
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index f67c0829350b..a39dfda3d032 100644 index e592a9364473..c826ab4e2e1a 100644
--- a/Documentation/admin-guide/cgroup-v2.rst --- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1121,6 +1121,16 @@ All time durations are in microseconds. @@ -1121,6 +1121,16 @@ All time durations are in microseconds.
@ -78,7 +78,7 @@ index 7ee7ed5de722..6dbc5a1bf6a8 100644
* Template for declaring augmented rbtree callbacks (generic case) * Template for declaring augmented rbtree callbacks (generic case)
* *
diff --git a/include/linux/sched.h b/include/linux/sched.h diff --git a/include/linux/sched.h b/include/linux/sched.h
index eed5d65b8d1f..63ac38d66ec6 100644 index 8473324705ca..88c3e7ba8992 100644
--- a/include/linux/sched.h --- a/include/linux/sched.h
+++ b/include/linux/sched.h +++ b/include/linux/sched.h
@@ -550,13 +550,18 @@ struct sched_entity { @@ -550,13 +550,18 @@ struct sched_entity {
@ -196,7 +196,7 @@ index ff6c4b9bfe6b..511cbcf3510d 100644
.rt = { .rt = {
.run_list = LIST_HEAD_INIT(init_task.rt.run_list), .run_list = LIST_HEAD_INIT(init_task.rt.run_list),
diff --git a/kernel/sched/core.c b/kernel/sched/core.c diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index bcb3a7e684ca..3bcb77b00e5b 100644 index 1b971c69d3a2..df2f22a9729c 100644
--- a/kernel/sched/core.c --- a/kernel/sched/core.c
+++ b/kernel/sched/core.c +++ b/kernel/sched/core.c
@@ -1305,6 +1305,12 @@ static void set_load_weight(struct task_struct *p, bool update_load) @@ -1305,6 +1305,12 @@ static void set_load_weight(struct task_struct *p, bool update_load)
@ -232,7 +232,7 @@ index bcb3a7e684ca..3bcb77b00e5b 100644
/* /*
* We don't need the reset flag anymore after the fork. It has * We don't need the reset flag anymore after the fork. It has
@@ -7512,7 +7522,7 @@ static struct task_struct *find_process_by_pid(pid_t pid) @@ -7525,7 +7535,7 @@ static struct task_struct *find_process_by_pid(pid_t pid)
#define SETPARAM_POLICY -1 #define SETPARAM_POLICY -1
static void __setscheduler_params(struct task_struct *p, static void __setscheduler_params(struct task_struct *p,
@ -241,7 +241,7 @@ index bcb3a7e684ca..3bcb77b00e5b 100644
{ {
int policy = attr->sched_policy; int policy = attr->sched_policy;
@@ -7536,6 +7546,13 @@ static void __setscheduler_params(struct task_struct *p, @@ -7549,6 +7559,13 @@ static void __setscheduler_params(struct task_struct *p,
set_load_weight(p, true); set_load_weight(p, true);
} }
@ -255,7 +255,7 @@ index bcb3a7e684ca..3bcb77b00e5b 100644
/* /*
* Check the target process has a UID that matches the current process's: * Check the target process has a UID that matches the current process's:
*/ */
@@ -7676,6 +7693,13 @@ static int __sched_setscheduler(struct task_struct *p, @@ -7682,6 +7699,13 @@ static int __sched_setscheduler(struct task_struct *p,
return retval; return retval;
} }
@ -266,10 +266,10 @@ index bcb3a7e684ca..3bcb77b00e5b 100644
+ return -EINVAL; + return -EINVAL;
+ } + }
+ +
if (pi) /* Update task specific "requested" clamps */
cpuset_read_lock(); if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) {
retval = uclamp_validate(p, attr);
@@ -7710,6 +7734,9 @@ static int __sched_setscheduler(struct task_struct *p, @@ -7723,6 +7747,9 @@ static int __sched_setscheduler(struct task_struct *p,
goto change; goto change;
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
goto change; goto change;
@ -279,7 +279,7 @@ index bcb3a7e684ca..3bcb77b00e5b 100644
p->sched_reset_on_fork = reset_on_fork; p->sched_reset_on_fork = reset_on_fork;
retval = 0; retval = 0;
@@ -7798,6 +7825,7 @@ static int __sched_setscheduler(struct task_struct *p, @@ -7811,6 +7838,7 @@ static int __sched_setscheduler(struct task_struct *p,
__setscheduler_params(p, attr); __setscheduler_params(p, attr);
__setscheduler_prio(p, newprio); __setscheduler_prio(p, newprio);
} }
@ -287,7 +287,7 @@ index bcb3a7e684ca..3bcb77b00e5b 100644
__setscheduler_uclamp(p, attr); __setscheduler_uclamp(p, attr);
if (queued) { if (queued) {
@@ -8008,6 +8036,9 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a @@ -8021,6 +8049,9 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a
size < SCHED_ATTR_SIZE_VER1) size < SCHED_ATTR_SIZE_VER1)
return -EINVAL; return -EINVAL;
@ -297,7 +297,7 @@ index bcb3a7e684ca..3bcb77b00e5b 100644
/* /*
* XXX: Do we want to be lenient like existing syscalls; or do we want * XXX: Do we want to be lenient like existing syscalls; or do we want
* to be strict and return an error on out-of-bounds values? * to be strict and return an error on out-of-bounds values?
@@ -8245,6 +8276,8 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, @@ -8258,6 +8289,8 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
get_params(p, &kattr); get_params(p, &kattr);
kattr.sched_flags &= SCHED_FLAG_ALL; kattr.sched_flags &= SCHED_FLAG_ALL;
@ -306,7 +306,7 @@ index bcb3a7e684ca..3bcb77b00e5b 100644
#ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_UCLAMP_TASK
/* /*
* This could race with another potential updater, but this is fine * This could race with another potential updater, but this is fine
@@ -11181,6 +11214,25 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css, @@ -11215,6 +11248,25 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css,
{ {
return sched_group_set_idle(css_tg(css), idle); return sched_group_set_idle(css_tg(css), idle);
} }
@ -332,7 +332,7 @@ index bcb3a7e684ca..3bcb77b00e5b 100644
#endif #endif
static struct cftype cpu_legacy_files[] = { static struct cftype cpu_legacy_files[] = {
@@ -11195,6 +11247,11 @@ static struct cftype cpu_legacy_files[] = { @@ -11229,6 +11281,11 @@ static struct cftype cpu_legacy_files[] = {
.read_s64 = cpu_idle_read_s64, .read_s64 = cpu_idle_read_s64,
.write_s64 = cpu_idle_write_s64, .write_s64 = cpu_idle_write_s64,
}, },
@ -344,7 +344,7 @@ index bcb3a7e684ca..3bcb77b00e5b 100644
#endif #endif
#ifdef CONFIG_CFS_BANDWIDTH #ifdef CONFIG_CFS_BANDWIDTH
{ {
@@ -11412,6 +11469,12 @@ static struct cftype cpu_files[] = { @@ -11468,6 +11525,12 @@ static struct cftype cpu_files[] = {
.read_s64 = cpu_idle_read_s64, .read_s64 = cpu_idle_read_s64,
.write_s64 = cpu_idle_write_s64, .write_s64 = cpu_idle_write_s64,
}, },
@ -358,7 +358,7 @@ index bcb3a7e684ca..3bcb77b00e5b 100644
#ifdef CONFIG_CFS_BANDWIDTH #ifdef CONFIG_CFS_BANDWIDTH
{ {
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 066ff1c8ae4e..e7e83181fbb6 100644 index aeeba46a096b..5c743bcb340d 100644
--- a/kernel/sched/debug.c --- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c +++ b/kernel/sched/debug.c
@@ -347,10 +347,7 @@ static __init int sched_init_debug(void) @@ -347,10 +347,7 @@ static __init int sched_init_debug(void)
@ -373,7 +373,7 @@ index 066ff1c8ae4e..e7e83181fbb6 100644
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms); debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once); debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
@@ -581,9 +578,13 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) @@ -582,9 +579,13 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
else else
SEQ_printf(m, " %c", task_state_to_char(p)); SEQ_printf(m, " %c", task_state_to_char(p));
@ -388,7 +388,7 @@ index 066ff1c8ae4e..e7e83181fbb6 100644
(long long)(p->nvcsw + p->nivcsw), (long long)(p->nvcsw + p->nivcsw),
p->prio); p->prio);
@@ -626,10 +627,9 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) @@ -627,10 +628,9 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{ {
@ -401,7 +401,7 @@ index 066ff1c8ae4e..e7e83181fbb6 100644
unsigned long flags; unsigned long flags;
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -643,26 +643,25 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) @@ -644,26 +644,25 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SPLIT_NS(cfs_rq->exec_clock)); SPLIT_NS(cfs_rq->exec_clock));
raw_spin_rq_lock_irqsave(rq, flags); raw_spin_rq_lock_irqsave(rq, flags);
@ -441,7 +441,7 @@ index 066ff1c8ae4e..e7e83181fbb6 100644
SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
cfs_rq->nr_spread_over); cfs_rq->nr_spread_over);
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
@@ -863,10 +862,7 @@ static void sched_debug_header(struct seq_file *m) @@ -864,10 +863,7 @@ static void sched_debug_header(struct seq_file *m)
SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
#define PN(x) \ #define PN(x) \
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
@ -453,7 +453,7 @@ index 066ff1c8ae4e..e7e83181fbb6 100644
P(sysctl_sched_child_runs_first); P(sysctl_sched_child_runs_first);
P(sysctl_sched_features); P(sysctl_sched_features);
#undef PN #undef PN
@@ -1089,6 +1085,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, @@ -1090,6 +1086,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
#endif #endif
P(policy); P(policy);
P(prio); P(prio);
@ -462,7 +462,7 @@ index 066ff1c8ae4e..e7e83181fbb6 100644
P(dl.runtime); P(dl.runtime);
P(dl.deadline); P(dl.deadline);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9fe8288b1b1f..97678b9b4023 100644 index 64cbea29b007..36dcf4770830 100644
--- a/kernel/sched/fair.c --- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c +++ b/kernel/sched/fair.c
@@ -47,6 +47,7 @@ @@ -47,6 +47,7 @@
@ -908,8 +908,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
+ +
+ while (node) { + while (node) {
+ struct sched_entity *se = __node_2_se(node); + struct sched_entity *se = __node_2_se(node);
+
- return __node_2_se(next);
+ /* + /*
+ * If this entity is not eligible, try the left subtree. + * If this entity is not eligible, try the left subtree.
+ */ + */
@ -928,7 +927,8 @@ index 9fe8288b1b1f..97678b9b4023 100644
+ if (best->deadline == best->min_deadline) + if (best->deadline == best->min_deadline)
+ break; + break;
+ } + }
+
- return __node_2_se(next);
+ /* + /*
+ * If the earlest deadline in this subtree is in the fully + * If the earlest deadline in this subtree is in the fully
+ * eligible left half of our space, go there. + * eligible left half of our space, go there.
@ -984,12 +984,12 @@ index 9fe8288b1b1f..97678b9b4023 100644
{ {
- if (unlikely(se->load.weight != NICE_0_LOAD)) - if (unlikely(se->load.weight != NICE_0_LOAD))
- delta = __calc_delta(delta, NICE_0_LOAD, &se->load); - delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
-
- return delta;
-}
+ u32 weight = sched_prio_to_weight[prio]; + u32 weight = sched_prio_to_weight[prio];
+ u64 base = sysctl_sched_base_slice; + u64 base = sysctl_sched_base_slice;
- return delta;
-}
-
-/* -/*
- * The idea is to set a period in which each task runs once. - * The idea is to set a period in which each task runs once.
- * - *
@ -1149,7 +1149,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
} }
void reweight_task(struct task_struct *p, int prio) void reweight_task(struct task_struct *p, int prio)
@@ -4710,158 +4918,151 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} @@ -4710,98 +4918,140 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
@ -1168,11 +1168,12 @@ index 9fe8288b1b1f..97678b9b4023 100644
- -
-static inline bool entity_is_long_sleeper(struct sched_entity *se) -static inline bool entity_is_long_sleeper(struct sched_entity *se)
+static inline bool +static inline bool
+entity_has_slept(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) +entity_has_slept(struct cfs_rq *cfs_rq, struct sched_entity *se, u64 vslice, int flags)
{ {
- struct cfs_rq *cfs_rq; - struct cfs_rq *cfs_rq;
- u64 sleep_time; - u64 sleep_time;
+ u64 now; + u64 now, vdelta;
+ s64 delta;
- if (se->exec_start == 0) - if (se->exec_start == 0)
+ if (!(flags & ENQUEUE_WAKEUP)) + if (!(flags & ENQUEUE_WAKEUP))
@ -1181,19 +1182,25 @@ index 9fe8288b1b1f..97678b9b4023 100644
- cfs_rq = cfs_rq_of(se); - cfs_rq = cfs_rq_of(se);
- -
- sleep_time = rq_clock_task(rq_of(cfs_rq)); - sleep_time = rq_clock_task(rq_of(cfs_rq));
- + if (flags & ENQUEUE_MIGRATED)
+ return true;
- /* Happen while migrating because of clock task divergence */ - /* Happen while migrating because of clock task divergence */
- if (sleep_time <= se->exec_start) - if (sleep_time <= se->exec_start)
- return false; + now = rq_clock_task(rq_of(cfs_rq));
- + delta = now - se->exec_start;
+ if (delta < 0)
return false;
- sleep_time -= se->exec_start; - sleep_time -= se->exec_start;
- if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD))) - if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
+ if (flags & ENQUEUE_MIGRATED) - return true;
return true; + vdelta = __calc_delta(delta, NICE_0_LOAD, &cfs_rq->load);
+ if (vdelta < vslice)
+ return false;
- return false; - return false;
+ now = rq_clock_task(rq_of(cfs_rq)); + return true;
+ return (s64)(se->exec_start - now) >= se->slice;
} }
static void static void
@ -1239,7 +1246,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
+ */ + */
+ if (sched_feat(PLACE_FUDGE) && + if (sched_feat(PLACE_FUDGE) &&
+ (cfs_rq->avg_slice > se->slice * cfs_rq->avg_load) && + (cfs_rq->avg_slice > se->slice * cfs_rq->avg_load) &&
+ entity_has_slept(cfs_rq, se, flags)) { + entity_has_slept(cfs_rq, se, vslice, flags)) {
+ lag += vslice; + lag += vslice;
+ if (lag > 0) + if (lag > 0)
+ lag = 0; + lag = 0;
@ -1355,6 +1362,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
} }
static void check_enqueue_throttle(struct cfs_rq *cfs_rq); static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -4809,60 +5059,20 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
static inline bool cfs_bandwidth_used(void); static inline bool cfs_bandwidth_used(void);
@ -1417,7 +1425,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
/* /*
* When enqueuing a sched_entity, we must: * When enqueuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now. * - Update loads to have both entity and cfs_rq synced with now.
@@ -4873,18 +5074,28 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) @@ -4874,18 +5084,28 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/ */
update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
se_update_runnable(se); se_update_runnable(se);
@ -1449,7 +1457,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
if (!curr) if (!curr)
__enqueue_entity(cfs_rq, se); __enqueue_entity(cfs_rq, se);
se->on_rq = 1; se->on_rq = 1;
@@ -4896,17 +5107,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) @@ -4907,17 +5127,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
} }
} }
@ -1467,7 +1475,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
static void __clear_buddies_next(struct sched_entity *se) static void __clear_buddies_next(struct sched_entity *se)
{ {
for_each_sched_entity(se) { for_each_sched_entity(se) {
@@ -4918,27 +5118,10 @@ static void __clear_buddies_next(struct sched_entity *se) @@ -4929,27 +5138,10 @@ static void __clear_buddies_next(struct sched_entity *se)
} }
} }
@ -1495,7 +1503,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
} }
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -4972,20 +5155,12 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) @@ -4983,20 +5175,12 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
clear_buddies(cfs_rq, se); clear_buddies(cfs_rq, se);
@ -1517,7 +1525,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
/* return excess runtime on last dequeue */ /* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq); return_cfs_rq_runtime(cfs_rq);
@@ -5004,52 +5179,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) @@ -5015,52 +5199,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_idle_cfs_rq_clock_pelt(cfs_rq); update_idle_cfs_rq_clock_pelt(cfs_rq);
} }
@ -1570,7 +1578,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
static void static void
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{ {
@@ -5088,9 +5217,6 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -5099,9 +5237,6 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime; se->prev_sum_exec_runtime = se->sum_exec_runtime;
} }
@ -1580,7 +1588,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
/* /*
* Pick the next process, keeping these things in mind, in this order: * Pick the next process, keeping these things in mind, in this order:
* 1) keep things fair between processes/task groups * 1) keep things fair between processes/task groups
@@ -5101,50 +5227,14 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); @@ -5112,50 +5247,14 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
static struct sched_entity * static struct sched_entity *
pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{ {
@ -1611,7 +1619,10 @@ index 9fe8288b1b1f..97678b9b4023 100644
- if (!second || (curr && entity_before(curr, second))) - if (!second || (curr && entity_before(curr, second)))
- second = curr; - second = curr;
- } - }
- + if (sched_feat(NEXT_BUDDY) &&
+ cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
+ return cfs_rq->next;
- if (second && wakeup_preempt_entity(second, left) < 1) - if (second && wakeup_preempt_entity(second, left) < 1)
- se = second; - se = second;
- } - }
@ -1627,16 +1638,13 @@ index 9fe8288b1b1f..97678b9b4023 100644
- */ - */
- se = cfs_rq->last; - se = cfs_rq->last;
- } - }
+ if (sched_feat(NEXT_BUDDY) && -
+ cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
+ return cfs_rq->next;
- return se; - return se;
+ return pick_eevdf(cfs_rq); + return pick_eevdf(cfs_rq);
} }
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -5161,8 +5251,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) @@ -5172,8 +5271,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
/* throttle cfs_rqs exceeding runtime */ /* throttle cfs_rqs exceeding runtime */
check_cfs_rq_runtime(cfs_rq); check_cfs_rq_runtime(cfs_rq);
@ -1645,7 +1653,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
if (prev->on_rq) { if (prev->on_rq) {
update_stats_wait_start_fair(cfs_rq, prev); update_stats_wait_start_fair(cfs_rq, prev);
/* Put 'current' back into the tree. */ /* Put 'current' back into the tree. */
@@ -5203,9 +5291,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) @@ -5214,9 +5311,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
return; return;
#endif #endif
@ -1655,7 +1663,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
} }
@@ -6210,13 +6295,12 @@ static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} @@ -6241,13 +6335,12 @@ static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
static void hrtick_start_fair(struct rq *rq, struct task_struct *p) static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
{ {
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
@ -1670,7 +1678,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
s64 delta = slice - ran; s64 delta = slice - ran;
if (delta < 0) { if (delta < 0) {
@@ -6240,8 +6324,7 @@ static void hrtick_update(struct rq *rq) @@ -6271,8 +6364,7 @@ static void hrtick_update(struct rq *rq)
if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class) if (!hrtick_enabled_fair(rq) || curr->sched_class != &fair_sched_class)
return; return;
@ -1680,7 +1688,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
} }
#else /* !CONFIG_SCHED_HRTICK */ #else /* !CONFIG_SCHED_HRTICK */
static inline void static inline void
@@ -6282,17 +6365,6 @@ static int sched_idle_rq(struct rq *rq) @@ -6313,17 +6405,6 @@ static int sched_idle_rq(struct rq *rq)
rq->nr_running); rq->nr_running);
} }
@ -1698,7 +1706,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int sched_idle_cpu(int cpu) static int sched_idle_cpu(int cpu)
{ {
@@ -7778,18 +7850,6 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) @@ -7809,18 +7890,6 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{ {
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
@ -1717,7 +1725,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
if (!task_on_rq_migrating(p)) { if (!task_on_rq_migrating(p)) {
remove_entity_load_avg(se); remove_entity_load_avg(se);
@@ -7827,66 +7887,6 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) @@ -7858,66 +7927,6 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
@ -1784,7 +1792,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
static void set_next_buddy(struct sched_entity *se) static void set_next_buddy(struct sched_entity *se)
{ {
for_each_sched_entity(se) { for_each_sched_entity(se) {
@@ -7898,12 +7898,6 @@ static void set_next_buddy(struct sched_entity *se) @@ -7929,12 +7938,6 @@ static void set_next_buddy(struct sched_entity *se)
} }
} }
@ -1797,7 +1805,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
/* /*
* Preempt the current task with a newly woken task if needed: * Preempt the current task with a newly woken task if needed:
*/ */
@@ -7912,7 +7906,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ @@ -7943,7 +7946,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
struct task_struct *curr = rq->curr; struct task_struct *curr = rq->curr;
struct sched_entity *se = &curr->se, *pse = &p->se; struct sched_entity *se = &curr->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@ -1805,7 +1813,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
int next_buddy_marked = 0; int next_buddy_marked = 0;
int cse_is_idle, pse_is_idle; int cse_is_idle, pse_is_idle;
@@ -7928,7 +7921,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ @@ -7959,7 +7961,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
return; return;
@ -1814,7 +1822,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
set_next_buddy(pse); set_next_buddy(pse);
next_buddy_marked = 1; next_buddy_marked = 1;
} }
@@ -7973,35 +7966,19 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ @@ -8004,35 +8006,19 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
if (cse_is_idle != pse_is_idle) if (cse_is_idle != pse_is_idle)
return; return;
@ -1857,7 +1865,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@@ -8202,8 +8179,6 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) @@ -8233,8 +8219,6 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
/* /*
* sched_yield() is very simple * sched_yield() is very simple
@ -1866,7 +1874,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
*/ */
static void yield_task_fair(struct rq *rq) static void yield_task_fair(struct rq *rq)
{ {
@@ -8219,21 +8194,19 @@ static void yield_task_fair(struct rq *rq) @@ -8250,21 +8234,19 @@ static void yield_task_fair(struct rq *rq)
clear_buddies(cfs_rq, se); clear_buddies(cfs_rq, se);
@ -1900,7 +1908,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
} }
static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
@@ -8476,8 +8449,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env) @@ -8512,8 +8494,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
* Buddy candidates are cache hot: * Buddy candidates are cache hot:
*/ */
if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
@ -1910,7 +1918,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
return 1; return 1;
if (sysctl_sched_migration_cost == -1) if (sysctl_sched_migration_cost == -1)
@@ -11987,8 +11959,8 @@ static void rq_offline_fair(struct rq *rq) @@ -12139,8 +12120,8 @@ static void rq_offline_fair(struct rq *rq)
static inline bool static inline bool
__entity_slice_used(struct sched_entity *se, int min_nr_tasks) __entity_slice_used(struct sched_entity *se, int min_nr_tasks)
{ {
@ -1920,7 +1928,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
return (rtime * min_nr_tasks > slice); return (rtime * min_nr_tasks > slice);
} }
@@ -12144,8 +12116,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) @@ -12296,8 +12277,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
*/ */
static void task_fork_fair(struct task_struct *p) static void task_fork_fair(struct task_struct *p)
{ {
@ -1930,7 +1938,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
struct rq *rq = this_rq(); struct rq *rq = this_rq();
struct rq_flags rf; struct rq_flags rf;
@@ -12154,22 +12126,9 @@ static void task_fork_fair(struct task_struct *p) @@ -12306,22 +12287,9 @@ static void task_fork_fair(struct task_struct *p)
cfs_rq = task_cfs_rq(current); cfs_rq = task_cfs_rq(current);
curr = cfs_rq->curr; curr = cfs_rq->curr;
@ -1955,7 +1963,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
rq_unlock(rq, &rf); rq_unlock(rq, &rf);
} }
@@ -12198,34 +12157,6 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) @@ -12350,34 +12318,6 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
check_preempt_curr(rq, p, 0); check_preempt_curr(rq, p, 0);
} }
@ -1990,7 +1998,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
/* /*
* Propagate the changes of the sched_entity across the tg tree to make it * Propagate the changes of the sched_entity across the tg tree to make it
@@ -12296,16 +12227,6 @@ static void attach_entity_cfs_rq(struct sched_entity *se) @@ -12448,16 +12388,6 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
static void detach_task_cfs_rq(struct task_struct *p) static void detach_task_cfs_rq(struct task_struct *p)
{ {
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
@ -2007,7 +2015,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
detach_entity_cfs_rq(se); detach_entity_cfs_rq(se);
} }
@@ -12313,12 +12234,8 @@ static void detach_task_cfs_rq(struct task_struct *p) @@ -12465,12 +12395,8 @@ static void detach_task_cfs_rq(struct task_struct *p)
static void attach_task_cfs_rq(struct task_struct *p) static void attach_task_cfs_rq(struct task_struct *p)
{ {
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
@ -2020,7 +2028,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
} }
static void switched_from_fair(struct rq *rq, struct task_struct *p) static void switched_from_fair(struct rq *rq, struct task_struct *p)
@@ -12429,6 +12346,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) @@ -12581,6 +12507,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
goto err; goto err;
tg->shares = NICE_0_LOAD; tg->shares = NICE_0_LOAD;
@ -2028,7 +2036,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
init_cfs_bandwidth(tg_cfs_bandwidth(tg)); init_cfs_bandwidth(tg_cfs_bandwidth(tg));
@@ -12527,6 +12445,9 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, @@ -12679,6 +12606,9 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
} }
se->my_q = cfs_rq; se->my_q = cfs_rq;
@ -2038,7 +2046,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
/* guarantee group entities always have weight */ /* guarantee group entities always have weight */
update_load_set(&se->load, NICE_0_LOAD); update_load_set(&se->load, NICE_0_LOAD);
se->parent = parent; se->parent = parent;
@@ -12657,6 +12578,29 @@ int sched_group_set_idle(struct task_group *tg, long idle) @@ -12809,6 +12739,29 @@ int sched_group_set_idle(struct task_group *tg, long idle)
return 0; return 0;
} }
@ -2068,7 +2076,7 @@ index 9fe8288b1b1f..97678b9b4023 100644
#else /* CONFIG_FAIR_GROUP_SCHED */ #else /* CONFIG_FAIR_GROUP_SCHED */
void free_fair_sched_group(struct task_group *tg) { } void free_fair_sched_group(struct task_group *tg) { }
@@ -12683,7 +12627,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task @@ -12835,7 +12788,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
* idle runqueue: * idle runqueue:
*/ */
if (rq->cfs.load.weight) if (rq->cfs.load.weight)
@ -2125,7 +2133,7 @@ index 9e390eb82e38..ca95044a7479 100644
-SCHED_FEAT(ALT_PERIOD, true) -SCHED_FEAT(ALT_PERIOD, true)
-SCHED_FEAT(BASE_SLICE, true) -SCHED_FEAT(BASE_SLICE, true)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d8ba81c66579..0ea13cfac95b 100644 index 0605fb53816d..96b1ae519f20 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -372,6 +372,8 @@ struct task_group { @@ -372,6 +372,8 @@ struct task_group {
@ -2166,7 +2174,7 @@ index d8ba81c66579..0ea13cfac95b 100644
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
unsigned int nr_spread_over; unsigned int nr_spread_over;
@@ -2167,6 +2173,7 @@ extern const u32 sched_prio_to_wmult[40]; @@ -2170,6 +2176,7 @@ extern const u32 sched_prio_to_wmult[40];
#else #else
#define ENQUEUE_MIGRATED 0x00 #define ENQUEUE_MIGRATED 0x00
#endif #endif
@ -2174,7 +2182,7 @@ index d8ba81c66579..0ea13cfac95b 100644
#define RETRY_TASK ((void *)-1UL) #define RETRY_TASK ((void *)-1UL)
@@ -2471,11 +2478,9 @@ extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); @@ -2474,11 +2481,9 @@ extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
extern const_debug unsigned int sysctl_sched_nr_migrate; extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost; extern const_debug unsigned int sysctl_sched_migration_cost;
@ -2188,7 +2196,7 @@ index d8ba81c66579..0ea13cfac95b 100644
extern int sysctl_resched_latency_warn_ms; extern int sysctl_resched_latency_warn_ms;
extern int sysctl_resched_latency_warn_once; extern int sysctl_resched_latency_warn_once;
@@ -2488,6 +2493,8 @@ extern unsigned int sysctl_numa_balancing_scan_size; @@ -2491,6 +2496,8 @@ extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_numa_balancing_hot_threshold; extern unsigned int sysctl_numa_balancing_hot_threshold;
#endif #endif
@ -2197,7 +2205,7 @@ index d8ba81c66579..0ea13cfac95b 100644
#ifdef CONFIG_SCHED_HRTICK #ifdef CONFIG_SCHED_HRTICK
/* /*
@@ -3496,4 +3503,7 @@ static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { } @@ -3499,4 +3506,7 @@ static inline void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) { }
static inline void init_sched_mm_cid(struct task_struct *t) { } static inline void init_sched_mm_cid(struct task_struct *t) { }
#endif #endif

View File

@ -1,9 +1,9 @@
From 5e3bbb489086974a823af55f23cc17d2ea032f8b Mon Sep 17 00:00:00 2001 From 4a346951e2b3c7de65511c95f74fdd7197e3d2e5 Mon Sep 17 00:00:00 2001
From: Piotr Gorski <lucjan.lucjanov@gmail.com> From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 18 Jun 2023 11:05:43 +0200 Date: Tue, 11 Jul 2023 19:31:15 +0200
Subject: [PATCH] bore-eevdf Subject: [PATCH] bore-eevdf
Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com> Signed-off-by: Peter Jung <admin@ptr1337.dev>
--- ---
include/linux/sched.h | 10 +++ include/linux/sched.h | 10 +++
init/Kconfig | 20 +++++ init/Kconfig | 20 +++++
@ -15,7 +15,7 @@ Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
7 files changed, 286 insertions(+), 8 deletions(-) 7 files changed, 286 insertions(+), 8 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h diff --git a/include/linux/sched.h b/include/linux/sched.h
index 63ac38d66..63a2205a5 100644 index 88c3e7ba8992..6b4c553aea75 100644
--- a/include/linux/sched.h --- a/include/linux/sched.h
+++ b/include/linux/sched.h +++ b/include/linux/sched.h
@@ -560,6 +560,12 @@ struct sched_entity { @@ -560,6 +560,12 @@ struct sched_entity {
@ -43,10 +43,10 @@ index 63ac38d66..63a2205a5 100644
/* /*
* 'ptraced' is the list of tasks this task is using ptrace() on. * 'ptraced' is the list of tasks this task is using ptrace() on.
diff --git a/init/Kconfig b/init/Kconfig diff --git a/init/Kconfig b/init/Kconfig
index 0147b4a33..4ab7e154b 100644 index b6d38eccca10..e90546df3182 100644
--- a/init/Kconfig --- a/init/Kconfig
+++ b/init/Kconfig +++ b/init/Kconfig
@@ -1290,6 +1290,26 @@ config CHECKPOINT_RESTORE @@ -1277,6 +1277,26 @@ config CHECKPOINT_RESTORE
If unsure, say N here. If unsure, say N here.
@ -74,7 +74,7 @@ index 0147b4a33..4ab7e154b 100644
bool "Automatic process group scheduling" bool "Automatic process group scheduling"
select CGROUPS select CGROUPS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bcb77b00..65469bc43 100644 index df2f22a9729c..4995243a2ba4 100644
--- a/kernel/sched/core.c --- a/kernel/sched/core.c
+++ b/kernel/sched/core.c +++ b/kernel/sched/core.c
@@ -4490,6 +4490,57 @@ int wake_up_state(struct task_struct *p, unsigned int state) @@ -4490,6 +4490,57 @@ int wake_up_state(struct task_struct *p, unsigned int state)
@ -118,11 +118,11 @@ index 3bcb77b00..65469bc43 100644
+static void update_task_initial_burst_time(struct task_struct *task) { +static void update_task_initial_burst_time(struct task_struct *task) {
+ struct sched_entity *se = &task->se; + struct sched_entity *se = &task->se;
+ struct task_struct *par = task->real_parent; + struct task_struct *par = task->real_parent;
+ u64 ktime = ktime_to_ns(ktime_get()); + u64 now = ktime_get_ns();
+ +
+ if (likely(par)) { + if (likely(par)) {
+ if (par->child_burst_last_cached + sched_burst_cache_lifetime < ktime) { + if (par->child_burst_last_cached + sched_burst_cache_lifetime < now) {
+ par->child_burst_last_cached = ktime; + par->child_burst_last_cached = now;
+ update_task_child_burst_time_cache(par); + update_task_child_burst_time_cache(par);
+ } + }
+ se->prev_burst_time = max(se->prev_burst_time, par->child_burst_cache); + se->prev_burst_time = max(se->prev_burst_time, par->child_burst_cache);
@ -155,20 +155,20 @@ index 3bcb77b00..65469bc43 100644
/* /*
* We mark the process as NEW here. This guarantees that * We mark the process as NEW here. This guarantees that
* nobody will actually run it, and a signal or other external * nobody will actually run it, and a signal or other external
@@ -9955,6 +10012,11 @@ void __init sched_init(void) @@ -9968,6 +10025,11 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class != &stop_sched_class + 1); BUG_ON(&dl_sched_class != &stop_sched_class + 1);
#endif #endif
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+ sched_init_bore(); + sched_init_bore();
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 2.4.1 by Masahito Suzuki"); + printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 2.4.2 by Masahito Suzuki");
+#endif // CONFIG_SCHED_BORE +#endif // CONFIG_SCHED_BORE
+ +
wait_bit_init(); wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index e7e83181f..c29500314 100644 index 5c743bcb340d..755ef4c8d34b 100644
--- a/kernel/sched/debug.c --- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c +++ b/kernel/sched/debug.c
@@ -348,6 +348,7 @@ static __init int sched_init_debug(void) @@ -348,6 +348,7 @@ static __init int sched_init_debug(void)
@ -179,7 +179,7 @@ index e7e83181f..c29500314 100644
debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms); debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once); debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
@@ -594,6 +595,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) @@ -595,6 +596,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
@ -190,7 +190,7 @@ index e7e83181f..c29500314 100644
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif #endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 97678b9b4..c3d632800 100644 index 36dcf4770830..30080b227866 100644
--- a/kernel/sched/fair.c --- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c +++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@ @@ -19,6 +19,9 @@
@ -247,7 +247,7 @@ index 97678b9b4..c3d632800 100644
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+unsigned int __read_mostly sched_bore = 1; +unsigned int __read_mostly sched_bore = 1;
+unsigned int __read_mostly sched_burst_cache_lifetime = 15000000; +unsigned int __read_mostly sched_burst_cache_lifetime = 15000000;
+unsigned int __read_mostly sched_burst_penalty_offset = 12; +unsigned int __read_mostly sched_burst_penalty_offset = 18;
+unsigned int __read_mostly sched_burst_penalty_scale = 1292; +unsigned int __read_mostly sched_burst_penalty_scale = 1292;
+unsigned int __read_mostly sched_burst_smoothness = 1; +unsigned int __read_mostly sched_burst_smoothness = 1;
+static int three = 3; +static int three = 3;
@ -414,7 +414,7 @@ index 97678b9b4..c3d632800 100644
curr->vruntime += calc_delta_fair(delta_exec, curr); curr->vruntime += calc_delta_fair(delta_exec, curr);
update_deadline(cfs_rq, curr); update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq); update_min_vruntime(cfs_rq);
@@ -5217,6 +5351,9 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -5237,6 +5371,9 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime; se->prev_sum_exec_runtime = se->sum_exec_runtime;
} }
@ -424,7 +424,7 @@ index 97678b9b4..c3d632800 100644
/* /*
* Pick the next process, keeping these things in mind, in this order: * Pick the next process, keeping these things in mind, in this order:
* 1) keep things fair between processes/task groups * 1) keep things fair between processes/task groups
@@ -5227,14 +5364,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -5247,14 +5384,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
static struct sched_entity * static struct sched_entity *
pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{ {
@ -443,7 +443,7 @@ index 97678b9b4..c3d632800 100644
} }
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -6464,6 +6603,38 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) @@ -6504,6 +6643,38 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
hrtick_update(rq); hrtick_update(rq);
} }
@ -482,7 +482,7 @@ index 97678b9b4..c3d632800 100644
static void set_next_buddy(struct sched_entity *se); static void set_next_buddy(struct sched_entity *se);
/* /*
@@ -6482,6 +6653,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) @@ -6522,6 +6693,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
util_est_dequeue(&rq->cfs, p); util_est_dequeue(&rq->cfs, p);
for_each_sched_entity(se) { for_each_sched_entity(se) {
@ -492,7 +492,7 @@ index 97678b9b4..c3d632800 100644
cfs_rq = cfs_rq_of(se); cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, flags); dequeue_entity(cfs_rq, se, flags);
@@ -7972,7 +8146,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ @@ -8012,7 +8186,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
/* /*
* XXX pick_eevdf(cfs_rq) != se ? * XXX pick_eevdf(cfs_rq) != se ?
*/ */
@ -501,7 +501,7 @@ index 97678b9b4..c3d632800 100644
goto preempt; goto preempt;
return; return;
@@ -8185,6 +8359,9 @@ static void yield_task_fair(struct rq *rq) @@ -8225,6 +8399,9 @@ static void yield_task_fair(struct rq *rq)
struct task_struct *curr = rq->curr; struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *se = &curr->se; struct sched_entity *se = &curr->se;
@ -512,7 +512,7 @@ index 97678b9b4..c3d632800 100644
/* /*
* Are we the only task in the tree? * Are we the only task in the tree?
diff --git a/kernel/sched/features.h b/kernel/sched/features.h diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index ca95044a7..a7d34d1b2 100644 index ca95044a7479..a7d34d1b28c5 100644
--- a/kernel/sched/features.h --- a/kernel/sched/features.h
+++ b/kernel/sched/features.h +++ b/kernel/sched/features.h
@@ -13,7 +13,11 @@ SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) @@ -13,7 +13,11 @@ SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
@ -528,10 +528,10 @@ index ca95044a7..a7d34d1b2 100644
/* /*
* Consider buddies to be cache hot, decreases the likeliness of a * Consider buddies to be cache hot, decreases the likeliness of a
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0ea13cfac..34cb2fbbb 100644 index 96b1ae519f20..cc0a17fb23c2 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -2479,6 +2479,7 @@ extern const_debug unsigned int sysctl_sched_nr_migrate; @@ -2482,6 +2482,7 @@ extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost; extern const_debug unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_base_slice; extern unsigned int sysctl_sched_base_slice;

File diff suppressed because it is too large Load Diff

View File

@ -2,4 +2,4 @@
echo "Pika Kernel - Building" echo "Pika Kernel - Building"
make -j`nproc` bindeb-pkg LOCALVERSION=-pikaos KDEB_PKGVERSION=$(make kernelversion)-2 make -j`nproc` bindeb-pkg LOCALVERSION=-pikaos KDEB_PKGVERSION=$(make kernelversion)-23

View File

@ -2,7 +2,7 @@
echo "Pika Kernel - Getting source" echo "Pika Kernel - Getting source"
wget -nv https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-6.4.1.tar.gz wget -nv https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-6.4.3.tar.gz
tar -xf ./linux-6.4.1.tar.gz tar -xf ./linux-6.4.3.tar.gz
cd linux-6.4.1 cd linux-6.4.3