Linux 6.3 release - update cachy patch

This commit is contained in:
ferrreo 2023-04-24 14:51:26 +01:00
parent 7241695ec0
commit c835b111d3

View File

@ -1,4 +1,4 @@
From bd9c7beae4e03db956033c99f6640bf4dedbb6f6 Mon Sep 17 00:00:00 2001 From 9a8d83b362088f3eca0fbde0b2cc1a66ee142103 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 6 Mar 2023 18:43:03 +0100 Date: Mon, 6 Mar 2023 18:43:03 +0100
Subject: [PATCH 01/12] bbr2 Subject: [PATCH 01/12] bbr2
@ -3283,7 +3283,7 @@ index cb79127f45c3..70e4de876a7f 100644
-- --
2.40.0 2.40.0
From bd314565dd3b8ca45d413c352eef71d019e52420 Mon Sep 17 00:00:00 2001 From 44f2b9d76af75aab59d14b879403aa02cecb2b32 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Apr 2023 18:21:50 +0200 Date: Mon, 17 Apr 2023 18:21:50 +0200
Subject: [PATCH 02/12] bfq Subject: [PATCH 02/12] bfq
@ -3329,7 +3329,7 @@ index d9ed3108c17a..66146bbcd4af 100644
-- --
2.40.0 2.40.0
From d7ea7df27f510a3e4a89f373dad28f4b3fa8f2c5 Mon Sep 17 00:00:00 2001 From f96cbff0d52f3343956e5fb0f8e481ac33ad47fa Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Sat, 22 Apr 2023 11:43:07 +0200 Date: Sat, 22 Apr 2023 11:43:07 +0200
Subject: [PATCH 03/12] cachy Subject: [PATCH 03/12] cachy
@ -9259,7 +9259,7 @@ index ab0c5bd1a60f..f4989f706d7f 100644
-- --
2.40.0 2.40.0
From fb441def602c6d8b8da77b0d7c9a649d3e412eba Mon Sep 17 00:00:00 2001 From d31de1cb3de2457a3d287d96b456e1a3732165e4 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Sat, 22 Apr 2023 11:43:21 +0200 Date: Sat, 22 Apr 2023 11:43:21 +0200
Subject: [PATCH 04/12] fixes Subject: [PATCH 04/12] fixes
@ -13492,7 +13492,7 @@ index d8b5b4930412..05048ebc24d8 100644
-- --
2.40.0 2.40.0
From d493dcc3c8eff5edeee5bfdc341fecee93c21649 Mon Sep 17 00:00:00 2001 From 7fef8f4cdc6f7d630f4d11b805f4a7707b9b5e7b Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Apr 2023 18:32:06 +0200 Date: Mon, 17 Apr 2023 18:32:06 +0200
Subject: [PATCH 05/12] Implement amd-pstate guided driver Subject: [PATCH 05/12] Implement amd-pstate guided driver
@ -14159,7 +14159,7 @@ index f5f22418e64b..c10ebf8c42e6 100644
-- --
2.40.0 2.40.0
From 99e68d2a36fdd134c7a3c1ba47881696b10280cb Mon Sep 17 00:00:00 2001 From d2c339f6d5b8f4f030e6f4cfcf7fe12277dd5e39 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Apr 2023 18:28:52 +0200 Date: Mon, 17 Apr 2023 18:28:52 +0200
Subject: [PATCH 06/12] ksm Subject: [PATCH 06/12] ksm
@ -14659,7 +14659,7 @@ index 340125d08c03..36e756355f04 100644
-- --
2.40.0 2.40.0
From eec2ce146a63bd57840056f5a6debb079322ff11 Mon Sep 17 00:00:00 2001 From f64a9cb164da867b7437208dd63cf58a4faa33f2 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 24 Apr 2023 12:49:39 +0200 Date: Mon, 24 Apr 2023 12:49:39 +0200
Subject: [PATCH 07/12] maple-lru Subject: [PATCH 07/12] maple-lru
@ -15329,7 +15329,7 @@ index 4c89ff333f6f..9286d3baa12d 100644
-- --
2.40.0 2.40.0
From 42123c91ae8f93d2778a23995fe406f00ea53aa4 Mon Sep 17 00:00:00 2001 From 3d3a131234eb5f74bcd6bd84c60aa0c9ccd97eac Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 24 Apr 2023 12:50:36 +0200 Date: Mon, 24 Apr 2023 12:50:36 +0200
Subject: [PATCH 08/12] Per-VMA locks Subject: [PATCH 08/12] Per-VMA locks
@ -17459,9 +17459,9 @@ index 7f22844ed704..e030d63c031a 100644
-- --
2.40.0 2.40.0
From 618bb1079e41104cde2777dda19cb34f5336958b Mon Sep 17 00:00:00 2001 From 888661765419ab8a18ee6597356b0a0b79c2de90 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 24 Apr 2023 14:35:35 +0200 Date: Mon, 24 Apr 2023 15:36:39 +0200
Subject: [PATCH 09/12] sched Subject: [PATCH 09/12] sched
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -17474,13 +17474,13 @@ Signed-off-by: Peter Jung <admin@ptr1337.dev>
kernel/sched/core.c | 4 +- kernel/sched/core.c | 4 +-
kernel/sched/deadline.c | 1 + kernel/sched/deadline.c | 1 +
kernel/sched/debug.c | 1 + kernel/sched/debug.c | 1 +
kernel/sched/fair.c | 275 ++++++++++++++++++++------------- kernel/sched/fair.c | 265 ++++++++++++++++++++-------------
kernel/sched/features.h | 1 + kernel/sched/features.h | 1 +
kernel/sched/pelt.c | 60 +++++++ kernel/sched/pelt.c | 60 ++++++++
kernel/sched/pelt.h | 42 ++++- kernel/sched/pelt.h | 42 +++++-
kernel/sched/rt.c | 4 + kernel/sched/rt.c | 4 +
kernel/sched/sched.h | 23 ++- kernel/sched/sched.h | 23 ++-
14 files changed, 312 insertions(+), 137 deletions(-) 14 files changed, 302 insertions(+), 137 deletions(-)
diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c
index 9ff480e94511..6510883c5e81 100644 index 9ff480e94511..6510883c5e81 100644
@ -17639,7 +17639,7 @@ index 1637b65ba07a..8d64fba16cfe 100644
P(se.avg.load_sum); P(se.avg.load_sum);
P(se.avg.runnable_sum); P(se.avg.runnable_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 96c66b50ee48..59feda52fb4a 100644 index 96c66b50ee48..0f92281fbed9 100644
--- a/kernel/sched/fair.c --- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c +++ b/kernel/sched/fair.c
@@ -1082,6 +1082,23 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) @@ -1082,6 +1082,23 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
@ -17717,74 +17717,64 @@ index 96c66b50ee48..59feda52fb4a 100644
hrtick_update(rq); hrtick_update(rq);
} }
@@ -6538,6 +6551,46 @@ static int wake_wide(struct task_struct *p) @@ -6538,6 +6551,23 @@ static int wake_wide(struct task_struct *p)
return 1; return 1;
} }
+/* +/*
+ * Wake up the task on current CPU, if the following conditions are met:
+ *
+ * 1. waker A is the only running task on this_cpu
+ * 3. A is a short duration task (waker will fall asleep soon)
+ * 4. wakee B is a short duration task (impact of B on A is minor)
+ * 5. A and B wake up each other alternately
+ */
+static bool
+wake_on_current(int this_cpu, struct task_struct *p)
+{
+ if (!sched_feat(SIS_CURRENT))
+ return false;
+
+ if (cpu_rq(this_cpu)->nr_running > 1)
+ return false;
+
+ /*
+ * If a task switches in and then voluntarily relinquishes the + * If a task switches in and then voluntarily relinquishes the
+ * CPU quickly, it is regarded as a short duration task. In that + * CPU quickly, it is regarded as a short duration task.
+ * way, the short waker is likely to relinquish the CPU soon, which + *
+ * provides room for the wakee. Meanwhile, a short wakee would bring + * SIS_SHORT tries to wake up the short wakee on current CPU. This
+ * minor impact to the target rq. Put the short waker and wakee together + * aims to avoid race condition among CPUs due to frequent context
+ * bring benefit to cache-share task pairs and avoid migration overhead. + * switch. Besides, the candidate short task should not be the one
+ * that wakes up more than one tasks, otherwise SIS_SHORT might
+ * stack too many tasks on current CPU.
+ */ + */
+ if (!current->se.dur_avg || ((current->se.dur_avg * 8) >= sysctl_sched_min_granularity)) +static inline int is_short_task(struct task_struct *p)
+ return false; +{
+ + return sched_feat(SIS_SHORT) && !p->wakee_flips &&
+ if (!p->se.dur_avg || ((p->se.dur_avg * 8) >= sysctl_sched_min_granularity)) + p->se.dur_avg &&
+ return false; + ((p->se.dur_avg * 8) < sysctl_sched_min_granularity);
+
+ if (current->wakee_flips || p->wakee_flips)
+ return false;
+
+ if (current->last_wakee != p || p->last_wakee != current)
+ return false;
+
+ return true;
+} +}
+ +
/* /*
* The purpose of wake_affine() is to quickly determine on which CPU we can run * The purpose of wake_affine() is to quickly determine on which CPU we can run
* soonest. For the purpose of speed we only consider the waking and previous * soonest. For the purpose of speed we only consider the waking and previous
@@ -6631,6 +6684,9 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, @@ -6574,6 +6604,11 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync)
if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) if (available_idle_cpu(prev_cpu))
target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
+ if (target == nr_cpumask_bits && wake_on_current(this_cpu, p))
+ target = this_cpu;
+
schedstat_inc(p->stats.nr_wakeups_affine_attempts);
if (target == nr_cpumask_bits)
return prev_cpu; return prev_cpu;
@@ -7152,6 +7208,9 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
+ /* The only running task is a short duration one. */
+ if (cpu_rq(this_cpu)->nr_running == 1 &&
+ is_short_task(rcu_dereference(cpu_curr(this_cpu))))
+ return this_cpu;
+
return nr_cpumask_bits;
}
@@ -6948,6 +6983,20 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
/* overloaded LLC is unlikely to have idle cpu/core */
if (nr == 1)
return -1;
+
+ /*
+ * If the scan number suggested by SIS_UTIL is smaller
+ * than 60% of llc_weight, it indicates a util_avg% higher
+ * than 50%. System busier than this could lower its bar to
+ * choose a compromised "idle" CPU. This co-exists with
+ * !has_idle_core to not stack too many tasks on one CPU.
+ */
+ if (!has_idle_core && this == target &&
+ (5 * nr < 3 * sd->span_weight) &&
+ cpu_rq(target)->nr_running <= 1 &&
+ is_short_task(p) &&
+ is_short_task(rcu_dereference(cpu_curr(target))))
+ return target;
} }
} }
+ if (smp_processor_id() == target && wake_on_current(target, p)) @@ -9288,96 +9337,65 @@ group_type group_classify(unsigned int imbalance_pct,
+ return target;
+
i = select_idle_cpu(p, sd, has_idle_core, target);
if ((unsigned)i < nr_cpumask_bits)
return i;
@@ -9288,96 +9347,65 @@ group_type group_classify(unsigned int imbalance_pct,
} }
/** /**
@ -17796,25 +17786,25 @@ index 96c66b50ee48..59feda52fb4a 100644
- * - *
- * Check the state of the SMT siblings of both @sds::local and @sg and decide - * Check the state of the SMT siblings of both @sds::local and @sg and decide
- * if @dst_cpu can pull tasks. - * if @dst_cpu can pull tasks.
- *
- * If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of
- * the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks
- * only if @dst_cpu has higher priority.
- *
- * If both @dst_cpu and @sg have SMT siblings, and @sg has exactly one more
- * busy CPU than @sds::local, let @dst_cpu pull tasks if it has higher priority.
- * Bigger imbalances in the number of busy CPUs will be dealt with in
- * update_sd_pick_busiest().
+ * sched_use_asym_prio - Check whether asym_packing priority must be used + * sched_use_asym_prio - Check whether asym_packing priority must be used
+ * @sd: The scheduling domain of the load balancing + * @sd: The scheduling domain of the load balancing
+ * @cpu: A CPU + * @cpu: A CPU
* *
- * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings - * If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of
- * of @dst_cpu are idle and @sg has lower priority. - * the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks
- * only if @dst_cpu has higher priority.
+ * Always use CPU priority when balancing load between SMT siblings. When + * Always use CPU priority when balancing load between SMT siblings. When
+ * balancing load between cores, it is not sufficient that @cpu is idle. Only + * balancing load between cores, it is not sufficient that @cpu is idle. Only
+ * use CPU priority if the whole core is idle. + * use CPU priority if the whole core is idle.
* *
- * If both @dst_cpu and @sg have SMT siblings, and @sg has exactly one more
- * busy CPU than @sds::local, let @dst_cpu pull tasks if it has higher priority.
- * Bigger imbalances in the number of busy CPUs will be dealt with in
- * update_sd_pick_busiest().
- *
- * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings
- * of @dst_cpu are idle and @sg has lower priority.
- *
- * Return: true if @dst_cpu can pull tasks, false otherwise. - * Return: true if @dst_cpu can pull tasks, false otherwise.
+ * Returns: True if the priority of @cpu must be followed. False otherwise. + * Returns: True if the priority of @cpu must be followed. False otherwise.
*/ */
@ -17926,7 +17916,7 @@ index 96c66b50ee48..59feda52fb4a 100644
return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu); return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
} }
@@ -9567,10 +9595,22 @@ static bool update_sd_pick_busiest(struct lb_env *env, @@ -9567,10 +9585,22 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* contention when accessing shared HW resources. * contention when accessing shared HW resources.
* *
* XXX for now avg_load is not computed and always 0 so we * XXX for now avg_load is not computed and always 0 so we
@ -17951,7 +17941,7 @@ index 96c66b50ee48..59feda52fb4a 100644
break; break;
case group_has_spare: case group_has_spare:
@@ -10045,7 +10085,6 @@ static void update_idle_cpu_scan(struct lb_env *env, @@ -10045,7 +10075,6 @@ static void update_idle_cpu_scan(struct lb_env *env,
static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
{ {
@ -17959,7 +17949,7 @@ index 96c66b50ee48..59feda52fb4a 100644
struct sched_group *sg = env->sd->groups; struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat; struct sg_lb_stats *local = &sds->local_stat;
struct sg_lb_stats tmp_sgs; struct sg_lb_stats tmp_sgs;
@@ -10086,8 +10125,13 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd @@ -10086,8 +10115,13 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
sg = sg->next; sg = sg->next;
} while (sg != env->sd->groups); } while (sg != env->sd->groups);
@ -17975,7 +17965,7 @@ index 96c66b50ee48..59feda52fb4a 100644
if (env->sd->flags & SD_NUMA) if (env->sd->flags & SD_NUMA)
@@ -10397,7 +10441,10 @@ static struct sched_group *find_busiest_group(struct lb_env *env) @@ -10397,7 +10431,10 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
goto out_balanced; goto out_balanced;
} }
@ -17987,7 +17977,7 @@ index 96c66b50ee48..59feda52fb4a 100644
if (sds.prefer_sibling && local->group_type == group_has_spare && if (sds.prefer_sibling && local->group_type == group_has_spare &&
busiest->sum_nr_running > local->sum_nr_running + 1) busiest->sum_nr_running > local->sum_nr_running + 1)
goto force_balance; goto force_balance;
@@ -10499,8 +10546,15 @@ static struct rq *find_busiest_queue(struct lb_env *env, @@ -10499,8 +10536,15 @@ static struct rq *find_busiest_queue(struct lb_env *env,
nr_running == 1) nr_running == 1)
continue; continue;
@ -18004,7 +17994,7 @@ index 96c66b50ee48..59feda52fb4a 100644
sched_asym_prefer(i, env->dst_cpu) && sched_asym_prefer(i, env->dst_cpu) &&
nr_running == 1) nr_running == 1)
continue; continue;
@@ -10589,12 +10643,19 @@ static inline bool @@ -10589,12 +10633,19 @@ static inline bool
asym_active_balance(struct lb_env *env) asym_active_balance(struct lb_env *env)
{ {
/* /*
@ -18028,7 +18018,7 @@ index 96c66b50ee48..59feda52fb4a 100644
} }
static inline bool static inline bool
@@ -11328,9 +11389,13 @@ static void nohz_balancer_kick(struct rq *rq) @@ -11328,9 +11379,13 @@ static void nohz_balancer_kick(struct rq *rq)
* When ASYM_PACKING; see if there's a more preferred CPU * When ASYM_PACKING; see if there's a more preferred CPU
* currently idle; in which case, kick the ILB to move tasks * currently idle; in which case, kick the ILB to move tasks
* around. * around.
@ -18044,14 +18034,14 @@ index 96c66b50ee48..59feda52fb4a 100644
goto unlock; goto unlock;
} }
diff --git a/kernel/sched/features.h b/kernel/sched/features.h diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index ee7f23c76bd3..a3e05827f7e8 100644 index ee7f23c76bd3..efdc29c42161 100644
--- a/kernel/sched/features.h --- a/kernel/sched/features.h
+++ b/kernel/sched/features.h +++ b/kernel/sched/features.h
@@ -62,6 +62,7 @@ SCHED_FEAT(TTWU_QUEUE, true) @@ -62,6 +62,7 @@ SCHED_FEAT(TTWU_QUEUE, true)
*/ */
SCHED_FEAT(SIS_PROP, false) SCHED_FEAT(SIS_PROP, false)
SCHED_FEAT(SIS_UTIL, true) SCHED_FEAT(SIS_UTIL, true)
+SCHED_FEAT(SIS_CURRENT, true) +SCHED_FEAT(SIS_SHORT, true)
/* /*
* Issue a WARN when we do multiple update_rq_clock() calls * Issue a WARN when we do multiple update_rq_clock() calls
@ -18287,7 +18277,7 @@ index 3e8df6d31c1e..7331d436ebc4 100644
-- --
2.40.0 2.40.0
From f03484256980f170e534635f6960a79c3952afca Mon Sep 17 00:00:00 2001 From 7ea2532fa27ecd8a5b0300c93bfc66cf5b0aadf1 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Sat, 22 Apr 2023 11:46:19 +0200 Date: Sat, 22 Apr 2023 11:46:19 +0200
Subject: [PATCH 10/12] Surface Subject: [PATCH 10/12] Surface
@ -23978,7 +23968,7 @@ index 6beb00858c33..d82d77387a0a 100644
-- --
2.40.0 2.40.0
From c29ba223d07c151853ba4741378f7fe9e54e44cd Mon Sep 17 00:00:00 2001 From e21ef74a910e423fa4ecdfa291a5022e8002fbaf Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Sat, 22 Apr 2023 11:46:32 +0200 Date: Sat, 22 Apr 2023 11:46:32 +0200
Subject: [PATCH 11/12] zram Subject: [PATCH 11/12] zram
@ -24590,7 +24580,7 @@ index c5254626f051..ca7a15bd4845 100644
-- --
2.40.0 2.40.0
From cf36bf6be527caed469342959a843a93999ca49e Mon Sep 17 00:00:00 2001 From 41a5cc0ad4fbe3706a6829e152b63303684f55fd Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Sat, 22 Apr 2023 11:46:46 +0200 Date: Sat, 22 Apr 2023 11:46:46 +0200
Subject: [PATCH 12/12] zstd: import 1.5.5 Subject: [PATCH 12/12] zstd: import 1.5.5