From 06140f2f7a609e07d9fc7d1c79343772ead98dbd Mon Sep 17 00:00:00 2001 From: Piotr Gorski Date: Sun, 23 Jul 2023 09:44:46 +0200 Subject: [PATCH] bore-eevdf Signed-off-by: Piotr Gorski --- include/linux/sched.h | 10 ++ init/Kconfig | 20 ++++ kernel/sched/core.c | 117 +++++++++++++++++++++++ kernel/sched/debug.c | 4 + kernel/sched/fair.c | 203 ++++++++++++++++++++++++++++++++++++++-- kernel/sched/features.h | 4 + kernel/sched/sched.h | 1 + 7 files changed, 351 insertions(+), 8 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 88c3e7ba8..6b4c553ae 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -560,6 +560,12 @@ struct sched_entity { u64 sum_exec_runtime; u64 prev_sum_exec_runtime; u64 vruntime; +#ifdef CONFIG_SCHED_BORE + u64 prev_burst_time; + u64 burst_time; + u64 max_burst_time; + u8 penalty_score; +#endif // CONFIG_SCHED_BORE s64 vlag; u64 slice; @@ -991,6 +997,10 @@ struct task_struct { struct list_head children; struct list_head sibling; struct task_struct *group_leader; +#ifdef CONFIG_SCHED_BORE + u64 child_burst_cache; + u64 child_burst_last_cached; +#endif // CONFIG_SCHED_BORE /* * 'ptraced' is the list of tasks this task is using ptrace() on. diff --git a/init/Kconfig b/init/Kconfig index b6d38eccc..e90546df3 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1277,6 +1277,26 @@ config CHECKPOINT_RESTORE If unsure, say N here. +config SCHED_BORE + bool "Burst-Oriented Response Enhancer" + default y + help + In Desktop and Mobile computing, one might prefer interactive + tasks to keep responsive no matter what they run in the background. + + Enabling this kernel feature modifies the scheduler to discriminate + tasks by their burst time (runtime since it last went sleeping or + yielding state) and prioritize those that run less bursty. + Such tasks usually include window compositor, widgets backend, + terminal emulator, video playback, games and so on. + With a little impact to scheduling fairness, it may improve + responsiveness especially under heavy background workload. + + You can turn it off by setting the sysctl kernel.sched_bore = 0. + Enabling this feature implies NO_GENTLE_FAIR_SLEEPERS by default. + + If unsure say Y here. + config SCHED_AUTOGROUP bool "Automatic process group scheduling" select CGROUPS diff --git a/kernel/sched/core.c b/kernel/sched/core.c index df2f22a97..df8b76e2c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4490,6 +4490,112 @@ int wake_up_state(struct task_struct *p, unsigned int state) return try_to_wake_up(p, state, 0); } +#ifdef CONFIG_SCHED_BORE +#define CHILD_BURST_CUTOFF_BITS 9 +extern unsigned int sched_burst_cache_lifetime; +extern unsigned int sched_burst_fork_atavistic; + +void __init sched_init_bore(void) { + init_task.child_burst_cache = 0; + init_task.child_burst_last_cached = 0; + init_task.se.prev_burst_time = 0; + init_task.se.burst_time = 0; + init_task.se.max_burst_time = 0; +} + +void inline sched_fork_bore(struct task_struct *p) { + p->child_burst_cache = 0; + p->child_burst_last_cached = 0; + p->se.burst_time = 0; +} + +static u32 count_child_tasks(struct task_struct *p) { + struct task_struct *child; + u32 cnt = 0; + list_for_each_entry(child, &p->children, sibling) {cnt++;} + return cnt; +} + +static inline bool child_burst_cache_expired(struct task_struct *p, u64 now) { + return (p->child_burst_last_cached + sched_burst_cache_lifetime < now); +} + +static void __update_child_burst_cache( + struct task_struct *p, u32 cnt, u64 sum, u64 now) { + u64 avg = 0; + if (cnt) avg = div_u64(sum, cnt) << CHILD_BURST_CUTOFF_BITS; + p->child_burst_cache = max(avg, p->se.max_burst_time); + p->child_burst_last_cached = now; +} + +static void update_child_burst_cache(struct task_struct *p, u64 now) { + struct task_struct *child; + u32 cnt = 0; + u64 sum = 0; + + list_for_each_entry(child, &p->children, sibling) { + cnt++; + sum += child->se.max_burst_time >> CHILD_BURST_CUTOFF_BITS; + } + + __update_child_burst_cache(p, cnt, sum, now); +} + +static void update_child_burst_cache_atavistic( + struct task_struct *p, u64 now, u32 depth, u32 *acnt, u64 *asum) { + struct task_struct *child, *dec; + u32 cnt = 0, dcnt = 0; + u64 sum = 0; + + list_for_each_entry(child, &p->children, sibling) { + dec = child; + while ((dcnt = count_child_tasks(dec)) == 1) + dec = list_first_entry(&dec->children, struct task_struct, sibling); + + if (!dcnt || !depth) { + cnt++; + sum += dec->se.max_burst_time >> CHILD_BURST_CUTOFF_BITS; + } else { + if (child_burst_cache_expired(dec, now)) + update_child_burst_cache_atavistic(dec, now, depth - 1, &cnt, &sum); + else { + cnt += dcnt; + sum += (dec->child_burst_cache >> CHILD_BURST_CUTOFF_BITS) * dcnt; + } + } + } + + __update_child_burst_cache(p, cnt, sum, now); + *acnt += cnt; + *asum += sum; +} + +static void update_task_initial_burst_time(struct task_struct *p) { + struct sched_entity *se = &p->se; + struct task_struct *anc = p->real_parent; + u64 now = ktime_get_ns(); + u32 cnt = 0; + u64 sum = 0; + + read_lock(&tasklist_lock); + + if (sched_burst_fork_atavistic) { + while ((anc->real_parent != anc) && (count_child_tasks(anc) == 1)) + anc = anc->real_parent; + if (child_burst_cache_expired(anc, now)) + update_child_burst_cache_atavistic( + anc, now, sched_burst_fork_atavistic - 1, &cnt, &sum); + } else + if (child_burst_cache_expired(anc, now)) + update_child_burst_cache(anc, now); + + read_unlock(&tasklist_lock); + + se->max_burst_time = se->prev_burst_time = + max(se->prev_burst_time, anc->child_burst_cache); +} +#endif // CONFIG_SCHED_BORE + /* * Perform scheduler related setup for a newly forked process p. * p is forked by current. @@ -4506,6 +4612,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) p->se.prev_sum_exec_runtime = 0; p->se.nr_migrations = 0; p->se.vruntime = 0; +#ifdef CONFIG_SCHED_BORE + sched_fork_bore(p); +#endif // CONFIG_SCHED_BORE p->se.vlag = 0; INIT_LIST_HEAD(&p->se.group_node); @@ -4827,6 +4936,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) void sched_post_fork(struct task_struct *p) { +#ifdef CONFIG_SCHED_BORE + update_task_initial_burst_time(p); +#endif // CONFIG_SCHED_BORE uclamp_post_fork(p); } @@ -9968,6 +10080,11 @@ void __init sched_init(void) BUG_ON(&dl_sched_class != &stop_sched_class + 1); #endif +#ifdef CONFIG_SCHED_BORE + sched_init_bore(); + printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 2.5.3 by Masahito Suzuki"); +#endif // CONFIG_SCHED_BORE + wait_bit_init(); #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 5c743bcb3..755ef4c8d 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -348,6 +348,7 @@ static __init int sched_init_debug(void) #endif debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice); + debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity); debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms); debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once); @@ -595,6 +596,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); +#ifdef CONFIG_SCHED_BORE + SEQ_printf(m, " %2d", p->se.penalty_score); +#endif #ifdef CONFIG_NUMA_BALANCING SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); #endif diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 15167f12b..51f1b7a67 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -19,6 +19,9 @@ * * Adaptive scheduling granularity, math enhancements by Peter Zijlstra * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * + * Burst-Oriented Response Enhancer (BORE) CPU Scheduler + * Copyright (C) 2021-2023 Masahito Suzuki */ #include #include @@ -66,17 +69,17 @@ * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus * - * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) + * (default SCHED_TUNABLESCALING_NONE = *1) */ -unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; +unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; /* * Minimal preemption granularity for CPU-bound tasks: * - * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) + * (default: 3 msec * 1, units: nanoseconds) */ -unsigned int sysctl_sched_base_slice = 750000ULL; -static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; +unsigned int sysctl_sched_base_slice = 3000000ULL; +static unsigned int normalized_sysctl_sched_base_slice = 3000000ULL; /* * After fork, child runs first. If set to 0 (default) then @@ -84,8 +87,76 @@ static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; */ unsigned int sysctl_sched_child_runs_first __read_mostly; +/* + * SCHED_OTHER wake-up granularity. + * + * This option delays the preemption effects of decoupled workloads + * and reduces their over-scheduling. Synchronous workloads will still + * have immediate wakeup/sleep latencies. + * + * (default: 3.2 msec * 1, units: nanoseconds) + */ +unsigned int sysctl_sched_wakeup_granularity = 3200000UL; +static unsigned int normalized_sysctl_sched_wakeup_granularity = 3200000UL; + const_debug unsigned int sysctl_sched_migration_cost = 500000UL; +#ifdef CONFIG_SCHED_BORE +unsigned int __read_mostly sched_bore = 1; +unsigned int __read_mostly sched_burst_cache_lifetime = 60000000; +unsigned int __read_mostly sched_burst_penalty_offset = 12; +unsigned int __read_mostly sched_burst_penalty_scale = 1292; +unsigned int __read_mostly sched_burst_smoothness = 2; +unsigned int __read_mostly sched_burst_fork_atavistic = 2; +static int three = 3; +static int sixty_four = 64; +static int maxval_12_bits = 4095; + +#define FIXED_SHIFT 10 +#define FIXED_ONE (1 << FIXED_SHIFT) +typedef u32 fixed; + +static void update_burst_score(struct sched_entity *se) { + u64 burst_time = se->max_burst_time; + + int msb = fls64(burst_time); + fixed integer_part = msb << FIXED_SHIFT; + fixed fractional_part = burst_time << (64 - msb) << 1 >> (64 - FIXED_SHIFT); + fixed greed = integer_part | fractional_part; + + fixed tolerance = sched_burst_penalty_offset << FIXED_SHIFT; + fixed penalty = max(0, (s32)greed - (s32)tolerance); + fixed scaled_penalty = penalty * sched_burst_penalty_scale >> 10; + + u8 score = min(39U, scaled_penalty >> FIXED_SHIFT); + se->penalty_score = score; +} + +static inline u64 penalty_scale(u64 delta, struct sched_entity *se) { + return mul_u64_u32_shr(delta, sched_prio_to_wmult[se->penalty_score], 22); +} + +static inline u64 __binary_smooth(u64 new, u64 old, unsigned int smoothness) { + return (new <= old)? new: (new + old * ((1 << smoothness) - 1)) >> smoothness; +} + +void restart_burst(struct sched_entity *se) { + se->max_burst_time = se->prev_burst_time = __binary_smooth( + se->burst_time, se->prev_burst_time, sched_burst_smoothness); + se->burst_time = 0; +} + +#define calc_delta_fair(delta, se) __calc_delta_fair(delta, se, true) +#define calc_delta_fair_unscaled(delta, se) __calc_delta_fair(delta, se, false) +static inline u64 +__calc_delta_fair(u64 delta, struct sched_entity *se, bool bscale); + +static s64 wakeup_preempt_backstep_delta(u64 rtime, struct sched_entity *se) { + u64 delta = calc_delta_fair_unscaled(rtime, se); + return delta - penalty_scale(delta, se); +} +#endif // CONFIG_SCHED_BORE + int sched_thermal_decay_shift; static int __init setup_sched_thermal_decay_shift(char *str) { @@ -145,6 +216,60 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536; #ifdef CONFIG_SYSCTL static struct ctl_table sched_fair_sysctls[] = { +#ifdef CONFIG_SCHED_BORE + { + .procname = "sched_bore", + .data = &sched_bore, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { + .procname = "sched_burst_cache_lifetime", + .data = &sched_burst_cache_lifetime, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sched_burst_fork_atavistic", + .data = &sched_burst_fork_atavistic, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &three, + }, + { + .procname = "sched_burst_penalty_offset", + .data = &sched_burst_penalty_offset, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &sixty_four, + }, + { + .procname = "sched_burst_penalty_scale", + .data = &sched_burst_penalty_scale, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &maxval_12_bits, + }, + { + .procname = "sched_burst_smoothness", + .data = &sched_burst_smoothness, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &three, + }, +#endif // CONFIG_SCHED_BORE { .procname = "sched_child_runs_first", .data = &sysctl_sched_child_runs_first, @@ -238,6 +363,7 @@ static void update_sysctl(void) #define SET_SYSCTL(name) \ (sysctl_##name = (factor) * normalized_sysctl_##name) SET_SYSCTL(sched_base_slice); + SET_SYSCTL(sched_wakeup_granularity); #undef SET_SYSCTL } @@ -308,11 +434,19 @@ static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight /* * delta /= w */ +#ifdef CONFIG_SCHED_BORE +static inline u64 +__calc_delta_fair(u64 delta, struct sched_entity *se, bool bscale) +#else // CONFIG_SCHED_BORE static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) +#endif // CONFIG_SCHED_BORE { if (unlikely(se->load.weight != NICE_0_LOAD)) delta = __calc_delta(delta, NICE_0_LOAD, &se->load); +#ifdef CONFIG_SCHED_BORE + if (bscale && likely(sched_bore)) delta = penalty_scale(delta, se); +#endif // CONFIG_SCHED_BORE return delta; } @@ -708,7 +842,11 @@ void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se) SCHED_WARN_ON(!se->on_rq); lag = avg_vruntime(cfs_rq) - se->vruntime; +#ifdef CONFIG_SCHED_BORE + limit = calc_delta_fair_unscaled(max_t(u64, 2*se->slice, TICK_NSEC), se); +#else // CONFIG_SCHED_BORE limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se); +#endif // CONFIG_SCHED_BORE se->vlag = clamp(lag, -limit, limit); } @@ -946,6 +1084,7 @@ int sched_update_scaling(void) #define WRT_SYSCTL(name) \ (normalized_sysctl_##name = sysctl_##name / (factor)) WRT_SYSCTL(sched_base_slice); + WRT_SYSCTL(sched_wakeup_granularity); #undef WRT_SYSCTL return 0; @@ -1123,6 +1262,11 @@ static void update_curr(struct cfs_rq *cfs_rq) curr->sum_exec_runtime += delta_exec; schedstat_add(cfs_rq->exec_clock, delta_exec); +#ifdef CONFIG_SCHED_BORE + curr->burst_time += delta_exec; + curr->max_burst_time = max(curr->max_burst_time, curr->burst_time); + update_burst_score(curr); +#endif // CONFIG_SCHED_BORE curr->vruntime += calc_delta_fair(delta_exec, curr); update_deadline(cfs_rq, curr); update_min_vruntime(cfs_rq); @@ -5237,6 +5381,9 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) se->prev_sum_exec_runtime = se->sum_exec_runtime; } +static int +wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); + /* * Pick the next process, keeping these things in mind, in this order: * 1) keep things fair between processes/task groups @@ -5247,14 +5394,16 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) static struct sched_entity * pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) { + struct sched_entity *candidate = pick_eevdf(cfs_rq); /* * Enabling NEXT_BUDDY will affect latency but not fairness. */ if (sched_feat(NEXT_BUDDY) && - cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) + cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next) && + wakeup_preempt_entity(cfs_rq->next, candidate) < 1) return cfs_rq->next; - return pick_eevdf(cfs_rq); + return candidate; } static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); @@ -6522,6 +6671,38 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) hrtick_update(rq); } +static unsigned long wakeup_gran(struct sched_entity *se) +{ + unsigned long gran = sysctl_sched_wakeup_granularity; +#ifdef CONFIG_SCHED_BORE + return calc_delta_fair_unscaled(gran, se); +#else // CONFIG_SCHED_BORE + return calc_delta_fair(gran, se); +#endif // CONFIG_SCHED_BORE +} + +static int +wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) +{ + s64 gran, vdiff = curr->vruntime - se->vruntime; +#ifdef CONFIG_SCHED_BORE + if (likely(sched_bore)) { + u64 rtime = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; + vdiff += wakeup_preempt_backstep_delta(rtime, curr) + - wakeup_preempt_backstep_delta(rtime, se); + } +#endif // CONFIG_SCHED_BORE + + if (vdiff <= 0) + return -1; + + gran = wakeup_gran(se); + if (vdiff > gran) + return 1; + + return 0; +} + static void set_next_buddy(struct sched_entity *se); /* @@ -6540,6 +6721,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) util_est_dequeue(&rq->cfs, p); for_each_sched_entity(se) { +#ifdef CONFIG_SCHED_BORE + if (task_sleep) restart_burst(se); +#endif // CONFIG_SCHED_BORE cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); @@ -8030,7 +8214,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ /* * XXX pick_eevdf(cfs_rq) != se ? */ - if (pick_eevdf(cfs_rq) == pse) + if ((pick_eevdf(cfs_rq) == pse) && (wakeup_preempt_entity(se, pse) == 1)) goto preempt; return; @@ -8243,6 +8427,9 @@ static void yield_task_fair(struct rq *rq) struct task_struct *curr = rq->curr; struct cfs_rq *cfs_rq = task_cfs_rq(curr); struct sched_entity *se = &curr->se; +#ifdef CONFIG_SCHED_BORE + restart_burst(se); +#endif // CONFIG_SCHED_BORE /* * Are we the only task in the tree? diff --git a/kernel/sched/features.h b/kernel/sched/features.h index ca95044a7..a7d34d1b2 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -13,7 +13,11 @@ SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) * wakeup-preemption), since its likely going to consume data we * touched, increases cache locality. */ +#ifdef CONFIG_SCHED_BORE +SCHED_FEAT(NEXT_BUDDY, true) +#else // CONFIG_SCHED_BORE SCHED_FEAT(NEXT_BUDDY, false) +#endif // CONFIG_SCHED_BORE /* * Consider buddies to be cache hot, decreases the likeliness of a diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index abf5a48b5..a9f9e80a1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2504,6 +2504,7 @@ extern const_debug unsigned int sysctl_sched_nr_migrate; extern const_debug unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_base_slice; +extern unsigned int sysctl_sched_wakeup_granularity; #ifdef CONFIG_SCHED_DEBUG extern int sysctl_resched_latency_warn_ms; -- 2.41.0.159.g0bfa463d37