Update patches/0003-bore-cachy-ext.patch

This commit is contained in:
ferreo 2024-11-08 13:08:02 +01:00
parent dce421e243
commit 8a90123252

View File

@ -1,6 +1,6 @@
From e91f8d993bc2b1a1424cb2f5a931fe8f31eb97b9 Mon Sep 17 00:00:00 2001 From 318c40e6ac298c062db3e34a9e94e75b81d3a653 Mon Sep 17 00:00:00 2001
From: Eric Naim <dnaim@cachyos.org> From: Eric Naim <dnaim@cachyos.org>
Date: Tue, 8 Oct 2024 23:02:55 +0800 Date: Mon, 28 Oct 2024 10:11:08 +0800
Subject: [PATCH] bore-cachy-ext Subject: [PATCH] bore-cachy-ext
Signed-off-by: Eric Naim <dnaim@cachyos.org> Signed-off-by: Eric Naim <dnaim@cachyos.org>
@ -8,16 +8,16 @@ Signed-off-by: Eric Naim <dnaim@cachyos.org>
include/linux/sched.h | 20 +- include/linux/sched.h | 20 +-
include/linux/sched/bore.h | 37 ++++ include/linux/sched/bore.h | 37 ++++
init/Kconfig | 17 ++ init/Kconfig | 17 ++
kernel/Kconfig.hz | 17 ++ kernel/Kconfig.hz | 43 +++++
kernel/fork.c | 5 + kernel/fork.c | 5 +
kernel/sched/Makefile | 1 + kernel/sched/Makefile | 1 +
kernel/sched/bore.c | 381 +++++++++++++++++++++++++++++++++++++ kernel/sched/bore.c | 381 +++++++++++++++++++++++++++++++++++++
kernel/sched/core.c | 7 + kernel/sched/core.c | 7 +
kernel/sched/debug.c | 60 +++++- kernel/sched/debug.c | 67 ++++++-
kernel/sched/fair.c | 102 ++++++++-- kernel/sched/fair.c | 114 +++++++++--
kernel/sched/features.h | 4 + kernel/sched/features.h | 4 +
kernel/sched/sched.h | 7 + kernel/sched/sched.h | 16 ++
12 files changed, 640 insertions(+), 18 deletions(-) 12 files changed, 694 insertions(+), 18 deletions(-)
create mode 100644 include/linux/sched/bore.h create mode 100644 include/linux/sched/bore.h
create mode 100644 kernel/sched/bore.c create mode 100644 kernel/sched/bore.c
@ -136,10 +136,10 @@ index e1a88d48d652..3aea8e43c360 100644
bool "Automatic process group scheduling" bool "Automatic process group scheduling"
select CGROUPS select CGROUPS
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 0f78364efd4f..83a6b919ab29 100644 index 0f78364efd4f..4cf2d88916bd 100644
--- a/kernel/Kconfig.hz --- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz +++ b/kernel/Kconfig.hz
@@ -79,5 +79,22 @@ config HZ @@ -79,5 +79,48 @@ config HZ
default 750 if HZ_750 default 750 if HZ_750
default 1000 if HZ_1000 default 1000 if HZ_1000
@ -159,6 +159,32 @@ index 0f78364efd4f..83a6b919ab29 100644
+ Setting this value too high can cause the system to boot with + Setting this value too high can cause the system to boot with
+ an unnecessarily large base slice, resulting in high scheduling + an unnecessarily large base slice, resulting in high scheduling
+ latency and poor system responsiveness. + latency and poor system responsiveness.
+
+config MIGRATION_COST_BASE_NS
+ int "Default value for migration_cost_base_ns"
+ default 300000
+ help
+ The BORE Scheduler automatically calculates the optimal
+ migration_cost_ns using the following equation:
+
+ migration_cost_ns =
+ migration_cost_base_ns + ilog2(ncpus) * migration_cost_step_ns
+
+ This option sets the default migration_cost_base_ns
+ to be used in the automatic calculation.
+
+config MIGRATION_COST_STEP_NS
+ int "Default value for migration_cost_step_ns"
+ default 50000
+ help
+ The BORE Scheduler automatically calculates the optimal
+ migration_cost_ns using the following equation:
+
+ migration_cost_ns =
+ migration_cost_base_ns + ilog2(ncpus) * migration_cost_step_ns
+
+ This option sets the default migration_cost_step_ns
+ to be used in the automatic calculation.
+ +
config SCHED_HRTICK config SCHED_HRTICK
def_bool HIGH_RES_TIMERS def_bool HIGH_RES_TIMERS
@ -582,7 +608,7 @@ index 000000000000..cd7e8a8d6075
+#endif // CONFIG_SYSCTL +#endif // CONFIG_SYSCTL
+#endif // CONFIG_SCHED_BORE +#endif // CONFIG_SCHED_BORE
diff --git a/kernel/sched/core.c b/kernel/sched/core.c diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8ae04bd4a5a4..4aa992f99c36 100644 index 8ae04bd4a5a4..896db098c4c5 100644
--- a/kernel/sched/core.c --- a/kernel/sched/core.c
+++ b/kernel/sched/core.c +++ b/kernel/sched/core.c
@@ -97,6 +97,8 @@ @@ -97,6 +97,8 @@
@ -599,7 +625,7 @@ index 8ae04bd4a5a4..4aa992f99c36 100644
#endif #endif
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.6.1 by Masahito Suzuki"); + printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.7.1 by Masahito Suzuki");
+ init_task_bore(&init_task); + init_task_bore(&init_task);
+#endif // CONFIG_SCHED_BORE +#endif // CONFIG_SCHED_BORE
+ +
@ -607,63 +633,66 @@ index 8ae04bd4a5a4..4aa992f99c36 100644
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index c057ef46c5f8..3cab39e34824 100644 index c057ef46c5f8..b71ce5182500 100644
--- a/kernel/sched/debug.c --- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c +++ b/kernel/sched/debug.c
@@ -167,7 +167,52 @@ static const struct file_operations sched_feat_fops = { @@ -167,7 +167,55 @@ static const struct file_operations sched_feat_fops = {
}; };
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+static ssize_t sched_min_base_slice_write(struct file *filp, const char __user *ubuf, +#define DEFINE_SYSCTL_SCHED_FUNC(name, update_func) \
+ size_t cnt, loff_t *ppos) +static ssize_t sched_##name##_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) \
+{ +{ \
+ char buf[16]; + char buf[16]; \
+ unsigned int value; + unsigned int value; \
+ +\
+ if (cnt > 15) + if (cnt > 15) \
+ cnt = 15; + cnt = 15; \
+ +\
+ if (copy_from_user(&buf, ubuf, cnt)) + if (copy_from_user(&buf, ubuf, cnt)) \
+ return -EFAULT; + return -EFAULT; \
+ buf[cnt] = '\0'; + buf[cnt] = '\0'; \
+ +\
+ if (kstrtouint(buf, 10, &value)) + if (kstrtouint(buf, 10, &value)) \
+ return -EINVAL; + return -EINVAL; \
+\
+ if (!value) + sysctl_sched_##name = value; \
+ return -EINVAL; + sched_update_##update_func(); \
+ +\
+ sysctl_sched_min_base_slice = value; + *ppos += cnt; \
+ sched_update_min_base_slice(); + return cnt; \
+ +} \
+ *ppos += cnt; +\
+ return cnt; +static int sched_##name##_show(struct seq_file *m, void *v) \
+} +{ \
+ + seq_printf(m, "%d\n", sysctl_sched_##name); \
+static int sched_min_base_slice_show(struct seq_file *m, void *v) + return 0; \
+{ +} \
+ seq_printf(m, "%d\n", sysctl_sched_min_base_slice); +\
+ return 0; +static int sched_##name##_open(struct inode *inode, struct file *filp) \
+} +{ \
+ + return single_open(filp, sched_##name##_show, NULL); \
+static int sched_min_base_slice_open(struct inode *inode, struct file *filp) +} \
+{ +\
+ return single_open(filp, sched_min_base_slice_show, NULL); +static const struct file_operations sched_##name##_fops = { \
+} + .open = sched_##name##_open, \
+ + .write = sched_##name##_write, \
+static const struct file_operations sched_min_base_slice_fops = { + .read = seq_read, \
+ .open = sched_min_base_slice_open, + .llseek = seq_lseek, \
+ .write = sched_min_base_slice_write, + .release = single_release, \
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+}; +};
+
+DEFINE_SYSCTL_SCHED_FUNC(min_base_slice, min_base_slice)
+DEFINE_SYSCTL_SCHED_FUNC(migration_cost_base, migration_cost)
+DEFINE_SYSCTL_SCHED_FUNC(migration_cost_step, migration_cost)
+#undef DEFINE_SYSCTL_SCHED_FUNC
+#else // !CONFIG_SCHED_BORE +#else // !CONFIG_SCHED_BORE
static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf, static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
@@ -213,7 +258,7 @@ static const struct file_operations sched_scaling_fops = { @@ -213,7 +261,7 @@ static const struct file_operations sched_scaling_fops = {
.llseek = seq_lseek, .llseek = seq_lseek,
.release = single_release, .release = single_release,
}; };
@ -672,13 +701,13 @@ index c057ef46c5f8..3cab39e34824 100644
#endif /* SMP */ #endif /* SMP */
#ifdef CONFIG_PREEMPT_DYNAMIC #ifdef CONFIG_PREEMPT_DYNAMIC
@@ -347,13 +392,20 @@ static __init int sched_init_debug(void) @@ -347,14 +395,25 @@ static __init int sched_init_debug(void)
debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops); debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
#endif #endif
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+ debugfs_create_file("min_base_slice_ns", 0644, debugfs_sched, NULL, &sched_min_base_slice_fops); + debugfs_create_file("min_base_slice_ns", 0644, debugfs_sched, NULL, &sched_min_base_slice_fops);
+ debugfs_create_u32("base_slice_ns", 0400, debugfs_sched, &sysctl_sched_base_slice); + debugfs_create_u32("base_slice_ns", 0444, debugfs_sched, &sysctl_sched_base_slice);
+#else // !CONFIG_SCHED_BORE +#else // !CONFIG_SCHED_BORE
debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice); debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
+#endif // CONFIG_SCHED_BORE +#endif // CONFIG_SCHED_BORE
@ -687,13 +716,18 @@ index c057ef46c5f8..3cab39e34824 100644
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once); debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
+#if !defined(CONFIG_SCHED_BORE) +#ifdef CONFIG_SCHED_BORE
+ debugfs_create_file("migration_cost_base_ns", 0644, debugfs_sched, NULL, &sched_migration_cost_base_fops);
+ debugfs_create_file("migration_cost_step_ns", 0644, debugfs_sched, NULL, &sched_migration_cost_step_fops);
+ debugfs_create_u32("migration_cost_ns", 0444, debugfs_sched, &sysctl_sched_migration_cost);
+#else // !CONFIG_SCHED_BORE
debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops); debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
+#endif // CONFIG_SCHED_BORE
debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost); debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
+#endif // CONFIG_SCHED_BORE
debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate); debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
@@ -596,6 +648,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) mutex_lock(&sched_domains_mutex);
@@ -596,6 +655,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
@ -703,7 +737,7 @@ index c057ef46c5f8..3cab39e34824 100644
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif #endif
@@ -1069,6 +1124,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, @@ -1069,6 +1131,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.load.weight); P(se.load.weight);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -714,7 +748,7 @@ index c057ef46c5f8..3cab39e34824 100644
P(se.avg.runnable_sum); P(se.avg.runnable_sum);
P(se.avg.util_sum); P(se.avg.util_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a36e37a674e8..603d72b9e6e8 100644 index a36e37a674e8..bdd7366db711 100644
--- a/kernel/sched/fair.c --- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c +++ b/kernel/sched/fair.c
@@ -55,6 +55,8 @@ @@ -55,6 +55,8 @@
@ -726,13 +760,13 @@ index a36e37a674e8..603d72b9e6e8 100644
/* /*
* The initial- and re-scaling of tunables is configurable * The initial- and re-scaling of tunables is configurable
* *
@@ -64,28 +66,31 @@ @@ -64,28 +66,38 @@
* SCHED_TUNABLESCALING_LOG - scaled logarithmically, *1+ilog(ncpus) * SCHED_TUNABLESCALING_LOG - scaled logarithmically, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
* *
- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) - * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
+ * (BORE default SCHED_TUNABLESCALING_NONE = *1 constant) + * BORE : default SCHED_TUNABLESCALING_NONE = *1 constant
+ * (EEVDF default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) + * EEVDF: default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
*/ */
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; +unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
@ -744,17 +778,18 @@ index a36e37a674e8..603d72b9e6e8 100644
* Minimal preemption granularity for CPU-bound tasks: * Minimal preemption granularity for CPU-bound tasks:
* *
- * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) - * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
+ * (BORE default: max(1 sec / HZ, min_base_slice) constant, units: nanoseconds) + * BORE : base_slice = minimum multiple of nsecs_per_tick >= min_base_slice
+ * (EEVDF default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + * (default min_base_slice = 2000000 constant, units: nanoseconds)
+ * EEVDF: default 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds
*/ */
-#ifdef CONFIG_CACHY -#ifdef CONFIG_CACHY
-unsigned int sysctl_sched_base_slice = 350000ULL; -unsigned int sysctl_sched_base_slice = 350000ULL;
-static unsigned int normalized_sysctl_sched_base_slice = 350000ULL; -static unsigned int normalized_sysctl_sched_base_slice = 350000ULL;
-#else -#else
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+unsigned int sysctl_sched_base_slice = 1000000000ULL / HZ; +const static uint nsecs_per_tick = 1000000000ULL / HZ;
+static unsigned int configured_sched_base_slice = 1000000000ULL / HZ; +const_debug uint sysctl_sched_min_base_slice = CONFIG_MIN_BASE_SLICE_NS;
+unsigned int sysctl_sched_min_base_slice = CONFIG_MIN_BASE_SLICE_NS; +__read_mostly uint sysctl_sched_base_slice = nsecs_per_tick;
+#else // !CONFIG_SCHED_BORE +#else // !CONFIG_SCHED_BORE
unsigned int sysctl_sched_base_slice = 750000ULL; unsigned int sysctl_sched_base_slice = 750000ULL;
static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
@ -764,12 +799,18 @@ index a36e37a674e8..603d72b9e6e8 100644
-#ifdef CONFIG_CACHY -#ifdef CONFIG_CACHY
-const_debug unsigned int sysctl_sched_migration_cost = 300000UL; -const_debug unsigned int sysctl_sched_migration_cost = 300000UL;
-#else -#else
+#ifdef CONFIG_SCHED_BORE
+const_debug uint sysctl_sched_migration_cost_base = CONFIG_MIGRATION_COST_BASE_NS;
+const_debug uint sysctl_sched_migration_cost_step = CONFIG_MIGRATION_COST_STEP_NS;
+__read_mostly uint sysctl_sched_migration_cost = CONFIG_MIGRATION_COST_BASE_NS;
+#else // !CONFIG_SCHED_BORE
const_debug unsigned int sysctl_sched_migration_cost = 500000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
-#endif -#endif
+#endif // CONFIG_SCHED_BORE
static int __init setup_sched_thermal_decay_shift(char *str) static int __init setup_sched_thermal_decay_shift(char *str)
{ {
@@ -130,12 +135,8 @@ int __weak arch_asym_cpu_priority(int cpu) @@ -130,12 +142,8 @@ int __weak arch_asym_cpu_priority(int cpu)
* *
* (default: 5 msec, units: microseconds) * (default: 5 msec, units: microseconds)
*/ */
@ -782,34 +823,48 @@ index a36e37a674e8..603d72b9e6e8 100644
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
/* Restrict the NUMA promotion throughput (MB/s) for each target node. */ /* Restrict the NUMA promotion throughput (MB/s) for each target node. */
@@ -201,6 +202,18 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w) @@ -201,6 +209,18 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
* *
* This idea comes from the SD scheduler of Con Kolivas: * This idea comes from the SD scheduler of Con Kolivas:
*/ */
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+static void update_sysctl(void) { +static void auto_calculate_base_slice(void) {
+ unsigned int base_slice = configured_sched_base_slice; + sysctl_sched_base_slice = nsecs_per_tick *
+ unsigned int min_base_slice = sysctl_sched_min_base_slice; + max(1UL, DIV_ROUND_UP(sysctl_sched_min_base_slice, nsecs_per_tick));
+
+ if (min_base_slice)
+ base_slice *= DIV_ROUND_UP(min_base_slice, base_slice);
+
+ sysctl_sched_base_slice = base_slice;
+} +}
+void sched_update_min_base_slice(void) { update_sysctl(); } +static void auto_calculate_migration_cost(void) {
+ sysctl_sched_migration_cost = sysctl_sched_migration_cost_base +
+ ilog2(num_online_cpus()) * sysctl_sched_migration_cost_step;
+}
+void sched_update_min_base_slice(void) { auto_calculate_base_slice(); }
+void sched_update_migration_cost(void) { auto_calculate_migration_cost(); }
+#else // !CONFIG_SCHED_BORE +#else // !CONFIG_SCHED_BORE
static unsigned int get_update_sysctl_factor(void) static unsigned int get_update_sysctl_factor(void)
{ {
unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
@@ -231,6 +244,7 @@ static void update_sysctl(void) @@ -221,15 +241,21 @@ static unsigned int get_update_sysctl_factor(void)
SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL return factor;
} }
+#endif // CONFIG_SCHED_BORE +#endif // CONFIG_SCHED_BORE
void __init sched_init_granularity(void) static void update_sysctl(void)
{ {
@@ -708,6 +722,9 @@ static s64 entity_lag(u64 avruntime, struct sched_entity *se) +#ifdef CONFIG_SCHED_BORE
+ auto_calculate_base_slice();
+ auto_calculate_migration_cost();
+#else // !CONFIG_SCHED_BORE
unsigned int factor = get_update_sysctl_factor();
#define SET_SYSCTL(name) \
(sysctl_##name = (factor) * normalized_sysctl_##name)
SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL
+#endif // CONFIG_SCHED_BORE
}
void __init sched_init_granularity(void)
@@ -708,6 +734,9 @@ static s64 entity_lag(u64 avruntime, struct sched_entity *se)
vlag = avruntime - se->vruntime; vlag = avruntime - se->vruntime;
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
@ -819,7 +874,7 @@ index a36e37a674e8..603d72b9e6e8 100644
return clamp(vlag, -limit, limit); return clamp(vlag, -limit, limit);
} }
@@ -909,6 +926,10 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq) @@ -909,6 +938,10 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
* until it gets a new slice. See the HACK in set_next_entity(). * until it gets a new slice. See the HACK in set_next_entity().
*/ */
if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline) if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
@ -830,7 +885,7 @@ index a36e37a674e8..603d72b9e6e8 100644
return curr; return curr;
/* Pick the leftmost entity if it's eligible */ /* Pick the leftmost entity if it's eligible */
@@ -967,6 +988,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) @@ -967,6 +1000,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
* Scheduling class statistics methods: * Scheduling class statistics methods:
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -838,7 +893,7 @@ index a36e37a674e8..603d72b9e6e8 100644
int sched_update_scaling(void) int sched_update_scaling(void)
{ {
unsigned int factor = get_update_sysctl_factor(); unsigned int factor = get_update_sysctl_factor();
@@ -978,6 +1000,7 @@ int sched_update_scaling(void) @@ -978,6 +1012,7 @@ int sched_update_scaling(void)
return 0; return 0;
} }
@ -846,7 +901,7 @@ index a36e37a674e8..603d72b9e6e8 100644
#endif #endif
#endif #endif
@@ -1178,6 +1201,10 @@ static void update_curr(struct cfs_rq *cfs_rq) @@ -1178,6 +1213,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
if (unlikely(delta_exec <= 0)) if (unlikely(delta_exec <= 0))
return; return;
@ -857,7 +912,7 @@ index a36e37a674e8..603d72b9e6e8 100644
curr->vruntime += calc_delta_fair(delta_exec, curr); curr->vruntime += calc_delta_fair(delta_exec, curr);
update_deadline(cfs_rq, curr); update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq); update_min_vruntime(cfs_rq);
@@ -3804,7 +3831,7 @@ static void reweight_eevdf(struct sched_entity *se, u64 avruntime, @@ -3804,7 +3843,7 @@ static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
se->deadline = avruntime + vslice; se->deadline = avruntime + vslice;
} }
@ -866,7 +921,7 @@ index a36e37a674e8..603d72b9e6e8 100644
unsigned long weight) unsigned long weight)
{ {
bool curr = cfs_rq->curr == se; bool curr = cfs_rq->curr == se;
@@ -5212,6 +5239,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) @@ -5212,6 +5251,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* *
* EEVDF: placement strategy #1 / #2 * EEVDF: placement strategy #1 / #2
*/ */
@ -876,7 +931,7 @@ index a36e37a674e8..603d72b9e6e8 100644
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) { if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr; struct sched_entity *curr = cfs_rq->curr;
unsigned long load; unsigned long load;
@@ -5282,6 +5312,16 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) @@ -5282,6 +5324,16 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
se->vruntime = vruntime - lag; se->vruntime = vruntime - lag;
@ -893,7 +948,7 @@ index a36e37a674e8..603d72b9e6e8 100644
/* /*
* When joining the competition; the existing tasks will be, * When joining the competition; the existing tasks will be,
* on average, halfway through their slice, as such start tasks * on average, halfway through their slice, as such start tasks
@@ -5391,6 +5431,7 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); @@ -5391,6 +5443,7 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{ {
@ -901,7 +956,7 @@ index a36e37a674e8..603d72b9e6e8 100644
int action = UPDATE_TG; int action = UPDATE_TG;
if (entity_is_task(se) && task_on_rq_migrating(task_of(se))) if (entity_is_task(se) && task_on_rq_migrating(task_of(se)))
@@ -5418,6 +5459,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) @@ -5418,6 +5471,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
clear_buddies(cfs_rq, se); clear_buddies(cfs_rq, se);
update_entity_lag(cfs_rq, se); update_entity_lag(cfs_rq, se);
@ -913,7 +968,7 @@ index a36e37a674e8..603d72b9e6e8 100644
if (se != cfs_rq->curr) if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se); __dequeue_entity(cfs_rq, se);
se->on_rq = 0; se->on_rq = 0;
@@ -6869,6 +6915,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) @@ -6869,6 +6927,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bool was_sched_idle = sched_idle_rq(rq); bool was_sched_idle = sched_idle_rq(rq);
util_est_dequeue(&rq->cfs, p); util_est_dequeue(&rq->cfs, p);
@ -928,7 +983,7 @@ index a36e37a674e8..603d72b9e6e8 100644
for_each_sched_entity(se) { for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se); cfs_rq = cfs_rq_of(se);
@@ -8651,16 +8705,25 @@ static void yield_task_fair(struct rq *rq) @@ -8651,16 +8717,25 @@ static void yield_task_fair(struct rq *rq)
/* /*
* Are we the only task in the tree? * Are we the only task in the tree?
*/ */
@ -954,7 +1009,7 @@ index a36e37a674e8..603d72b9e6e8 100644
/* /*
* Tell update_rq_clock() that we've just updated, * Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule() * so we don't do microscopic update in schedule()
@@ -12725,6 +12788,9 @@ static void task_fork_fair(struct task_struct *p) @@ -12725,6 +12800,9 @@ static void task_fork_fair(struct task_struct *p)
curr = cfs_rq->curr; curr = cfs_rq->curr;
if (curr) if (curr)
update_curr(cfs_rq); update_curr(cfs_rq);
@ -964,7 +1019,7 @@ index a36e37a674e8..603d72b9e6e8 100644
place_entity(cfs_rq, se, ENQUEUE_INITIAL); place_entity(cfs_rq, se, ENQUEUE_INITIAL);
rq_unlock(rq, &rf); rq_unlock(rq, &rf);
} }
@@ -12837,6 +12903,10 @@ static void attach_task_cfs_rq(struct task_struct *p) @@ -12837,6 +12915,10 @@ static void attach_task_cfs_rq(struct task_struct *p)
static void switched_from_fair(struct rq *rq, struct task_struct *p) static void switched_from_fair(struct rq *rq, struct task_struct *p)
{ {
@ -991,30 +1046,43 @@ index 143f55df890b..e97b7b68bdd3 100644
/* /*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 48d893de632b..62e7e9e5fd9c 100644 index 48d893de632b..8c3fa2ffa177 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -2065,7 +2065,11 @@ static inline void update_sched_domain_debugfs(void) { } @@ -2065,7 +2065,12 @@ static inline void update_sched_domain_debugfs(void) { }
static inline void dirty_sched_domain_sysctl(int cpu) { } static inline void dirty_sched_domain_sysctl(int cpu) { }
#endif #endif
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+extern void sched_update_min_base_slice(void); +extern void sched_update_min_base_slice(void);
+extern void sched_update_migration_cost(void);
+#else // !CONFIG_SCHED_BORE +#else // !CONFIG_SCHED_BORE
extern int sched_update_scaling(void); extern int sched_update_scaling(void);
+#endif // CONFIG_SCHED_BORE +#endif // CONFIG_SCHED_BORE
static inline const struct cpumask *task_user_cpus(struct task_struct *p) static inline const struct cpumask *task_user_cpus(struct task_struct *p)
{ {
@@ -2738,6 +2742,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate; @@ -2735,9 +2740,20 @@ extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags);
extern const_debug unsigned int sysctl_sched_migration_cost; #endif
extern unsigned int sysctl_sched_base_slice; extern const_debug unsigned int sysctl_sched_nr_migrate;
+#ifdef CONFIG_SCHED_BORE +#ifdef CONFIG_SCHED_BORE
+extern unsigned int sysctl_sched_min_base_slice; +extern const_debug unsigned int sysctl_sched_migration_cost_base;
+extern const_debug unsigned int sysctl_sched_migration_cost_step;
+extern __read_mostly unsigned int sysctl_sched_migration_cost;
+#else // !CONFIG_SCHED_BORE
extern const_debug unsigned int sysctl_sched_migration_cost;
+#endif // CONFIG_SCHED_BORE
+#ifdef CONFIG_SCHED_BORE
+extern const_debug unsigned int sysctl_sched_min_base_slice;
+extern __read_mostly unsigned int sysctl_sched_base_slice;
+#else // !CONFIG_SCHED_BORE
extern unsigned int sysctl_sched_base_slice;
+#endif // CONFIG_SCHED_BORE +#endif // CONFIG_SCHED_BORE
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
extern int sysctl_resched_latency_warn_ms; extern int sysctl_resched_latency_warn_ms;
-- --
2.47.0 2.47.0