Update patches/0003-lazy.patch

This commit is contained in:
ferreo 2024-12-15 15:27:30 +01:00
parent 45a6d1dbce
commit cabf8a112e

View File

@ -1,32 +1,38 @@
From 58a9cf052bf4f9bca16b7a4626b00643d35a2c2a Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 11 Nov 2024 09:25:43 +0100
From 5ddf15cb65a8c14868cdc743474bd0a4fa9b586f Mon Sep 17 00:00:00 2001
From: Eric Naim <dnaim@cachyos.org>
Date: Fri, 13 Dec 2024 23:03:09 +0800
Subject: [PATCH] preempt-lazy
Signed-off-by: Peter Jung <admin@ptr1337.dev>
Signed-off-by: Eric Naim <dnaim@cachyos.org>
---
arch/x86/Kconfig | 1 +
arch/x86/include/asm/thread_info.h | 6 +-
include/linux/entry-common.h | 3 +-
include/linux/entry-kvm.h | 5 +-
include/linux/preempt.h | 8 ++-
include/linux/preempt.h | 8 +-
include/linux/rcupdate.h | 2 +-
include/linux/rcutree.h | 2 +-
include/linux/sched.h | 3 +-
include/linux/thread_info.h | 21 ++++--
include/linux/trace_events.h | 8 +--
kernel/Kconfig.preempt | 25 +++++--
include/linux/srcutiny.h | 2 +-
include/linux/thread_info.h | 21 +++++-
include/linux/trace_events.h | 8 +-
kernel/Kconfig.preempt | 25 ++++++-
kernel/entry/common.c | 2 +-
kernel/entry/kvm.c | 4 +-
kernel/rcu/srcutiny.c | 2 +-
kernel/sched/core.c | 112 +++++++++++++++++++++++++----
kernel/rcu/Kconfig | 4 +-
kernel/rcu/srcutiny.c | 14 ++--
kernel/rcu/tree_plugin.h | 22 ++++--
kernel/sched/core.c | 116 +++++++++++++++++++++++++----
kernel/sched/debug.c | 7 +-
kernel/sched/fair.c | 6 +-
kernel/sched/sched.h | 1 +
kernel/trace/trace.c | 2 +
kernel/trace/trace_output.c | 16 ++++-
18 files changed, 186 insertions(+), 46 deletions(-)
kernel/trace/trace_osnoise.c | 32 ++++----
kernel/trace/trace_output.c | 16 +++-
24 files changed, 232 insertions(+), 80 deletions(-)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 89b8fc452a7c..b7316721ebf2 100644
index f127d0f1024e..4b28c191ae31 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -93,6 +93,7 @@ config X86
@ -38,7 +44,7 @@ index 89b8fc452a7c..b7316721ebf2 100644
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_HW_PTE_YOUNG
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 12da7dfd5ef1..75bb390f7baf 100644
index 12da7dfd5ef1..a55c214f3ba6 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -87,8 +87,9 @@ struct thread_info {
@ -47,7 +53,7 @@ index 12da7dfd5ef1..75bb390f7baf 100644
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
-#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
-#define TIF_SSBD 5 /* Speculative store bypass disable */
+#define TIF_NEED_RESCHED_LAZY 4 /* rescheduling necessary */
+#define TIF_NEED_RESCHED_LAZY 4 /* Lazy rescheduling needed */
+#define TIF_SINGLESTEP 5 /* reenable singlestep on user return*/
+#define TIF_SSBD 6 /* Speculative store bypass disable */
#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
@ -124,6 +130,34 @@ index ce76f1a45722..ca86235ac15c 100644
}
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 48e5c03df1dd..257e9ae34414 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -95,9 +95,9 @@ static inline void __rcu_read_lock(void)
static inline void __rcu_read_unlock(void)
{
- preempt_enable();
if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
rcu_read_unlock_strict();
+ preempt_enable();
}
static inline int rcu_preempt_depth(void)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 90a684f94776..ae8b5cb475a3 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -104,7 +104,7 @@ extern int rcu_scheduler_active;
void rcu_end_inkernel_boot(void);
bool rcu_inkernel_boot_has_ended(void);
bool rcu_is_watching(void);
-#ifndef CONFIG_PREEMPTION
+#ifndef CONFIG_PREEMPT_RCU
void rcu_all_qs(void);
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bb343136ddd0..ade641760900 100644
--- a/include/linux/sched.h
@ -138,6 +172,19 @@ index bb343136ddd0..ade641760900 100644
}
static inline int test_tsk_need_resched(struct task_struct *tsk)
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index 4d96bbdb45f0..1635c5e2662f 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -64,7 +64,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
- preempt_disable(); // Needed for PREEMPT_AUTO
+ preempt_disable(); // Needed for PREEMPT_LAZY
idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
preempt_enable();
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 9ea0b28068f4..cf2446c9c30d 100644
--- a/include/linux/thread_info.h
@ -330,22 +377,141 @@ index 2e0f75bcb7fd..8485f63863af 100644
return 0;
}
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
index 3e079de0f5b4..9d52f87fac27 100644
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
@@ -18,7 +18,7 @@ config TREE_RCU
config PREEMPT_RCU
bool
- default y if PREEMPTION
+ default y if (PREEMPT || PREEMPT_RT || PREEMPT_DYNAMIC)
select TREE_RCU
help
This option selects the RCU implementation that is
@@ -91,7 +91,7 @@ config NEED_TASKS_RCU
config TASKS_RCU
bool
- default NEED_TASKS_RCU && (PREEMPTION || PREEMPT_AUTO)
+ default NEED_TASKS_RCU && PREEMPTION
select IRQ_WORK
config FORCE_TASKS_RUDE_RCU
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index 549c03336ee9..4dcbf8aa80ff 100644
index 4dcbf8aa80ff..f688bdad293e 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -122,8 +122,8 @@ void srcu_drive_gp(struct work_struct *wp)
ssp = container_of(wp, struct srcu_struct, srcu_work);
preempt_disable(); // Needed for PREEMPT_AUTO
if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) {
- return; /* Already running or nothing to do. */
preempt_enable();
+ return; /* Already running or nothing to do. */
}
@@ -98,7 +98,7 @@ void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{
int newval;
/* Remove recently arrived callbacks and wait for readers. */
- preempt_disable(); // Needed for PREEMPT_AUTO
+ preempt_disable(); // Needed for PREEMPT_LAZY
newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
preempt_enable();
@@ -120,7 +120,7 @@ void srcu_drive_gp(struct work_struct *wp)
struct srcu_struct *ssp;
ssp = container_of(wp, struct srcu_struct, srcu_work);
- preempt_disable(); // Needed for PREEMPT_AUTO
+ preempt_disable(); // Needed for PREEMPT_LAZY
if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) {
preempt_enable();
return; /* Already running or nothing to do. */
@@ -138,7 +138,7 @@ void srcu_drive_gp(struct work_struct *wp)
WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
preempt_enable();
swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
- preempt_disable(); // Needed for PREEMPT_AUTO
+ preempt_disable(); // Needed for PREEMPT_LAZY
WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
preempt_enable();
@@ -159,7 +159,7 @@ void srcu_drive_gp(struct work_struct *wp)
* at interrupt level, but the ->srcu_gp_running checks will
* straighten that out.
*/
- preempt_disable(); // Needed for PREEMPT_AUTO
+ preempt_disable(); // Needed for PREEMPT_LAZY
WRITE_ONCE(ssp->srcu_gp_running, false);
idx = ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max));
preempt_enable();
@@ -172,7 +172,7 @@ static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
{
unsigned long cookie;
- preempt_disable(); // Needed for PREEMPT_AUTO
+ preempt_disable(); // Needed for PREEMPT_LAZY
cookie = get_state_synchronize_srcu(ssp);
if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) {
preempt_enable();
@@ -199,7 +199,7 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rhp->func = func;
rhp->next = NULL;
- preempt_disable(); // Needed for PREEMPT_AUTO
+ preempt_disable(); // Needed for PREEMPT_LAZY
local_irq_save(flags);
*ssp->srcu_cb_tail = rhp;
ssp->srcu_cb_tail = &rhp->next;
@@ -261,7 +261,7 @@ unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
{
unsigned long ret;
- preempt_disable(); // Needed for PREEMPT_AUTO
+ preempt_disable(); // Needed for PREEMPT_LAZY
ret = get_state_synchronize_srcu(ssp);
srcu_gp_start_if_needed(ssp);
preempt_enable();
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 1c7cbd145d5e..304e3405e6ec 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -832,8 +832,17 @@ void rcu_read_unlock_strict(void)
{
struct rcu_data *rdp;
- if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
+ if (irqs_disabled() || in_atomic_preempt_off() || !rcu_state.gp_kthread)
return;
+
+ /*
+ * rcu_report_qs_rdp() can only be invoked with a stable rdp and
+ * from the local CPU.
+ *
+ * The in_atomic_preempt_off() check ensures that we come here holding
+ * the last preempt_count (which will get dropped once we return to
+ * __rcu_read_unlock().
+ */
rdp = this_cpu_ptr(&rcu_data);
rdp->cpu_no_qs.b.norm = false;
rcu_report_qs_rdp(rdp);
@@ -974,13 +983,16 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
*/
static void rcu_flavor_sched_clock_irq(int user)
{
- if (user || rcu_is_cpu_rrupt_from_idle()) {
+ if (user || rcu_is_cpu_rrupt_from_idle() ||
+ (IS_ENABLED(CONFIG_PREEMPT_COUNT) &&
+ (preempt_count() == HARDIRQ_OFFSET))) {
/*
* Get here if this CPU took its interrupt from user
- * mode or from the idle loop, and if this is not a
- * nested interrupt. In this case, the CPU is in
- * a quiescent state, so note it.
+ * mode, from the idle loop without this being a nested
+ * interrupt, or while not holding the task preempt count
+ * (with PREEMPT_COUNT=y). In this case, the CPU is in a
+ * quiescent state, so note it.
*
* No memory barrier is required here because rcu_qs()
* references only CPU-local variables that other CPUs
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b35752fdbcc0..a1b01cd25e6d 100644
index 76b27b2a9c56..e82948e247c1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -941,10 +941,9 @@ static inline void hrtick_rq_init(struct rq *rq)
@ -429,7 +595,7 @@ index b35752fdbcc0..a1b01cd25e6d 100644
+#else
+static __always_inline bool dynamic_preempt_lazy(void)
+{
+ return IS_ENABLED(PREEMPT_LAZY);
+ return IS_ENABLED(CONFIG_PREEMPT_LAZY);
+}
+#endif
+
@ -456,7 +622,7 @@ index b35752fdbcc0..a1b01cd25e6d 100644
smp_send_reschedule(cpu);
else
trace_sched_wake_idle_without_ipi(cpu);
@@ -5613,6 +5650,10 @@ void sched_tick(void)
@@ -5604,6 +5641,10 @@ void sched_tick(void)
update_rq_clock(rq);
hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
@ -467,7 +633,25 @@ index b35752fdbcc0..a1b01cd25e6d 100644
curr->sched_class->task_tick(rq, curr, 0);
if (sched_feat(LATENCY_WARN))
resched_latency = cpu_resched_latency(rq);
@@ -7358,6 +7399,7 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
@@ -7219,7 +7260,7 @@ int __sched __cond_resched(void)
return 1;
}
/*
- * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
+ * In PREEMPT_RCU kernels, ->rcu_read_lock_nesting tells the tick
* whether the current CPU is in an RCU read-side critical section,
* so the tick can report quiescent states even for CPUs looping
* in kernel context. In contrast, in non-preemptible kernels,
@@ -7228,6 +7269,8 @@ int __sched __cond_resched(void)
* RCU quiescent state. Therefore, the following code causes
* cond_resched() to report a quiescent state, but only when RCU
* is in urgent need of one.
+ * A third case, preemptible, but non-PREEMPT_RCU provides for
+ * urgently needed quiescent states via rcu_flavor_sched_clock_irq().
*/
#ifndef CONFIG_PREEMPT_RCU
rcu_all_qs();
@@ -7352,6 +7395,7 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
* preempt_schedule <- NOP
* preempt_schedule_notrace <- NOP
* irqentry_exit_cond_resched <- NOP
@ -475,7 +659,7 @@ index b35752fdbcc0..a1b01cd25e6d 100644
*
* VOLUNTARY:
* cond_resched <- __cond_resched
@@ -7365,6 +7407,7 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
@@ -7359,6 +7403,7 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
* preempt_schedule <- NOP
* preempt_schedule_notrace <- NOP
* irqentry_exit_cond_resched <- NOP
@ -483,7 +667,7 @@ index b35752fdbcc0..a1b01cd25e6d 100644
*
* FULL:
* cond_resched <- RET0
@@ -7372,6 +7415,15 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
@@ -7366,6 +7411,15 @@ EXPORT_SYMBOL(__cond_resched_rwlock_write);
* preempt_schedule <- preempt_schedule
* preempt_schedule_notrace <- preempt_schedule_notrace
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched
@ -499,7 +683,7 @@ index b35752fdbcc0..a1b01cd25e6d 100644
*/
enum {
@@ -7379,30 +7431,41 @@ enum {
@@ -7373,30 +7427,41 @@ enum {
preempt_dynamic_none,
preempt_dynamic_voluntary,
preempt_dynamic_full,
@ -543,7 +727,7 @@ index b35752fdbcc0..a1b01cd25e6d 100644
#else
#error "Unsupported PREEMPT_DYNAMIC mechanism"
#endif
@@ -7422,6 +7485,7 @@ static void __sched_dynamic_update(int mode)
@@ -7416,6 +7481,7 @@ static void __sched_dynamic_update(int mode)
preempt_dynamic_enable(preempt_schedule);
preempt_dynamic_enable(preempt_schedule_notrace);
preempt_dynamic_enable(irqentry_exit_cond_resched);
@ -551,7 +735,7 @@ index b35752fdbcc0..a1b01cd25e6d 100644
switch (mode) {
case preempt_dynamic_none:
@@ -7431,6 +7495,7 @@ static void __sched_dynamic_update(int mode)
@@ -7425,6 +7491,7 @@ static void __sched_dynamic_update(int mode)
preempt_dynamic_disable(preempt_schedule);
preempt_dynamic_disable(preempt_schedule_notrace);
preempt_dynamic_disable(irqentry_exit_cond_resched);
@ -559,7 +743,7 @@ index b35752fdbcc0..a1b01cd25e6d 100644
if (mode != preempt_dynamic_mode)
pr_info("Dynamic Preempt: none\n");
break;
@@ -7442,6 +7507,7 @@ static void __sched_dynamic_update(int mode)
@@ -7436,6 +7503,7 @@ static void __sched_dynamic_update(int mode)
preempt_dynamic_disable(preempt_schedule);
preempt_dynamic_disable(preempt_schedule_notrace);
preempt_dynamic_disable(irqentry_exit_cond_resched);
@ -567,7 +751,7 @@ index b35752fdbcc0..a1b01cd25e6d 100644
if (mode != preempt_dynamic_mode)
pr_info("Dynamic Preempt: voluntary\n");
break;
@@ -7453,9 +7519,22 @@ static void __sched_dynamic_update(int mode)
@@ -7447,9 +7515,22 @@ static void __sched_dynamic_update(int mode)
preempt_dynamic_enable(preempt_schedule);
preempt_dynamic_enable(preempt_schedule_notrace);
preempt_dynamic_enable(irqentry_exit_cond_resched);
@ -590,7 +774,7 @@ index b35752fdbcc0..a1b01cd25e6d 100644
}
preempt_dynamic_mode = mode;
@@ -7518,6 +7597,8 @@ static void __init preempt_dynamic_init(void)
@@ -7512,6 +7593,8 @@ static void __init preempt_dynamic_init(void)
sched_dynamic_update(preempt_dynamic_none);
} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
sched_dynamic_update(preempt_dynamic_voluntary);
@ -599,7 +783,7 @@ index b35752fdbcc0..a1b01cd25e6d 100644
} else {
/* Default static call setting, nothing to do */
WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
@@ -7538,6 +7619,7 @@ static void __init preempt_dynamic_init(void)
@@ -7532,6 +7615,7 @@ static void __init preempt_dynamic_init(void)
PREEMPT_MODEL_ACCESSOR(none);
PREEMPT_MODEL_ACCESSOR(voluntary);
PREEMPT_MODEL_ACCESSOR(full);
@ -659,10 +843,10 @@ index 54e7c4c3e2c5..10e9484d1d43 100644
static struct task_struct *pick_task_fair(struct rq *rq)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e7fbb1d0f316..1f2b6a50c344 100644
index c5d6012794de..b5f3890f3050 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2700,6 +2700,7 @@ extern void init_sched_rt_class(void);
@@ -2696,6 +2696,7 @@ extern void init_sched_rt_class(void);
extern void init_sched_fair_class(void);
extern void resched_curr(struct rq *rq);
@ -671,10 +855,10 @@ index e7fbb1d0f316..1f2b6a50c344 100644
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2b64b3ec67d9..7466b8713076 100644
index 6a891e00aa7f..acbed0ffe083 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2544,6 +2544,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
@@ -2563,6 +2563,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
@ -683,6 +867,53 @@ index 2b64b3ec67d9..7466b8713076 100644
return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
}
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index a50ed23bee77..4a9087112526 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -1537,27 +1537,25 @@ static int run_osnoise(void)
/*
* In some cases, notably when running on a nohz_full CPU with
- * a stopped tick PREEMPT_RCU has no way to account for QSs.
- * This will eventually cause unwarranted noise as PREEMPT_RCU
- * will force preemption as the means of ending the current
- * grace period. We avoid this problem by calling
- * rcu_momentary_eqs(), which performs a zero duration
- * EQS allowing PREEMPT_RCU to end the current grace period.
- * This call shouldn't be wrapped inside an RCU critical
- * section.
+ * a stopped tick PREEMPT_RCU or PREEMPT_LAZY have no way to
+ * account for QSs. This will eventually cause unwarranted
+ * noise as RCU forces preemption as the means of ending the
+ * current grace period. We avoid this by calling
+ * rcu_momentary_eqs(), which performs a zero duration EQS
+ * allowing RCU to end the current grace period. This call
+ * shouldn't be wrapped inside an RCU critical section.
*
- * Note that in non PREEMPT_RCU kernels QSs are handled through
- * cond_resched()
+ * Normally QSs for other cases are handled through cond_resched().
+ * For simplicity, however, we call rcu_momentary_eqs() for all
+ * configurations here.
*/
- if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
- if (!disable_irq)
- local_irq_disable();
+ if (!disable_irq)
+ local_irq_disable();
- rcu_momentary_eqs();
+ rcu_momentary_eqs();
- if (!disable_irq)
- local_irq_enable();
- }
+ if (!disable_irq)
+ local_irq_enable();
/*
* For the non-preemptive kernel config: let threads runs, if
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 868f2f912f28..23ca2155306b 100644
--- a/kernel/trace/trace_output.c
@ -723,5 +954,5 @@ index 868f2f912f28..23ca2155306b 100644
need_resched = '.';
break;
--
2.47.0
2.47.1