Update patches/0001-cachyos-base-all.patch

This commit is contained in:
ferreo 2025-02-23 16:24:51 +01:00
parent df18295267
commit bb222b7226

View File

@ -1,15 +1,15 @@
From 7ab1776e0655d3eee1b58834caa1a26994a08d67 Mon Sep 17 00:00:00 2001 From 521dea9a496313ed35debb9af0dcf2c0faeef35d Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:28:48 +0100 Date: Fri, 21 Feb 2025 14:37:58 +0100
Subject: [PATCH 01/12] amd-pstate Subject: [PATCH 01/12] amd-pstate
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
--- ---
drivers/cpufreq/amd-pstate-trace.h | 52 ++- drivers/cpufreq/amd-pstate-trace.h | 52 +++-
drivers/cpufreq/amd-pstate-ut.c | 12 +- drivers/cpufreq/amd-pstate-ut.c | 12 +-
drivers/cpufreq/amd-pstate.c | 490 ++++++++++++++--------------- drivers/cpufreq/amd-pstate.c | 397 +++++++++++++++--------------
drivers/cpufreq/amd-pstate.h | 3 - drivers/cpufreq/amd-pstate.h | 3 -
4 files changed, 284 insertions(+), 273 deletions(-) 4 files changed, 259 insertions(+), 205 deletions(-)
diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h diff --git a/drivers/cpufreq/amd-pstate-trace.h b/drivers/cpufreq/amd-pstate-trace.h
index 35f38ae67fb1..8d692415d905 100644 index 35f38ae67fb1..8d692415d905 100644
@ -156,7 +156,7 @@ index a261d7300951..3a0a380c3590 100644
} }
} else { } else {
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index f6d04eb40af9..6a1e02389831 100644 index f71057c2cf90..6a1e02389831 100644
--- a/drivers/cpufreq/amd-pstate.c --- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c
@@ -22,6 +22,7 @@ @@ -22,6 +22,7 @@
@ -543,21 +543,15 @@ index f6d04eb40af9..6a1e02389831 100644
policy->max = policy->cpuinfo.max_freq; policy->max = policy->cpuinfo.max_freq;
@@ -727,12 +744,10 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state) @@ -730,8 +747,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
pr_err("Boost mode is not supported by this processor or SBIOS\n"); guard(mutex)(&amd_pstate_driver_lock);
return -EOPNOTSUPP;
}
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
+
ret = amd_pstate_cpu_boost_update(policy, state); ret = amd_pstate_cpu_boost_update(policy, state);
- WRITE_ONCE(cpudata->boost_state, !ret ? state : false); - WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
- policy->boost_enabled = !ret ? state : false; - policy->boost_enabled = !ret ? state : false;
refresh_frequency_limits(policy); refresh_frequency_limits(policy);
- mutex_unlock(&amd_pstate_driver_lock);
return ret; return ret;
}
@@ -752,9 +767,6 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata) @@ -752,9 +767,6 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
goto exit_err; goto exit_err;
} }
@ -568,57 +562,7 @@ index f6d04eb40af9..6a1e02389831 100644
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val); ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
if (ret) { if (ret) {
pr_err_once("failed to read initial CPU boost state!\n"); pr_err_once("failed to read initial CPU boost state!\n");
@@ -809,24 +821,28 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata) @@ -906,29 +918,29 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
static void amd_pstate_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
int ret;
bool highest_perf_changed = false;
+ if (!amd_pstate_prefcore)
+ return;
+
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
cpudata = policy->driver_data;
- if (!amd_pstate_prefcore)
- return;
+ guard(mutex)(&amd_pstate_driver_lock);
- mutex_lock(&amd_pstate_driver_lock);
ret = amd_get_highest_perf(cpu, &cur_high);
- if (ret)
- goto free_cpufreq_put;
+ if (ret) {
+ cpufreq_cpu_put(policy);
+ return;
+ }
prev_high = READ_ONCE(cpudata->prefcore_ranking);
highest_perf_changed = (prev_high != cur_high);
@@ -836,14 +852,11 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (cur_high < CPPC_MAX_PERF)
sched_set_itmt_core_prio((int)cur_high, cpu);
}
-
-free_cpufreq_put:
cpufreq_cpu_put(policy);
if (!highest_perf_changed)
cpufreq_update_policy(cpu);
- mutex_unlock(&amd_pstate_driver_lock);
}
/*
@@ -905,29 +918,29 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
return ret; return ret;
if (quirks && quirks->lowest_freq) if (quirks && quirks->lowest_freq)
@ -657,7 +601,7 @@ index f6d04eb40af9..6a1e02389831 100644
/** /**
* Below values need to be initialized correctly, otherwise driver will fail to load * Below values need to be initialized correctly, otherwise driver will fail to load
@@ -937,13 +950,13 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata) @@ -938,13 +950,13 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
*/ */
if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) { if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n", pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
@ -674,7 +618,7 @@ index f6d04eb40af9..6a1e02389831 100644
return -EINVAL; return -EINVAL;
} }
@@ -1160,7 +1173,6 @@ static ssize_t show_energy_performance_available_preferences( @@ -1161,7 +1173,6 @@ static ssize_t show_energy_performance_available_preferences(
static ssize_t store_energy_performance_preference( static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count) struct cpufreq_policy *policy, const char *buf, size_t count)
{ {
@ -682,23 +626,16 @@ index f6d04eb40af9..6a1e02389831 100644
char str_preference[21]; char str_preference[21];
ssize_t ret; ssize_t ret;
@@ -1172,11 +1184,11 @@ static ssize_t store_energy_performance_preference( @@ -1175,7 +1186,7 @@ static ssize_t store_energy_performance_preference(
if (ret < 0)
return -EINVAL; guard(mutex)(&amd_pstate_limits_lock);
- mutex_lock(&amd_pstate_limits_lock);
- ret = amd_pstate_set_energy_pref_index(cpudata, ret); - ret = amd_pstate_set_energy_pref_index(cpudata, ret);
- mutex_unlock(&amd_pstate_limits_lock);
+ guard(mutex)(&amd_pstate_limits_lock);
- return ret ?: count;
+ ret = amd_pstate_set_energy_pref_index(policy, ret); + ret = amd_pstate_set_energy_pref_index(policy, ret);
+
+ return ret ? ret : count;
}
static ssize_t show_energy_performance_preference( return ret ? ret : count;
@@ -1185,9 +1197,22 @@ static ssize_t show_energy_performance_preference( }
@@ -1186,9 +1197,22 @@ static ssize_t show_energy_performance_preference(
struct amd_cpudata *cpudata = policy->driver_data; struct amd_cpudata *cpudata = policy->driver_data;
int preference; int preference;
@ -724,7 +661,7 @@ index f6d04eb40af9..6a1e02389831 100644
return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]); return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
} }
@@ -1236,6 +1261,9 @@ static int amd_pstate_register_driver(int mode) @@ -1237,6 +1261,9 @@ static int amd_pstate_register_driver(int mode)
return ret; return ret;
} }
@ -734,34 +671,7 @@ index f6d04eb40af9..6a1e02389831 100644
ret = cpufreq_register_driver(current_pstate_driver); ret = cpufreq_register_driver(current_pstate_driver);
if (ret) { if (ret) {
amd_pstate_driver_cleanup(); amd_pstate_driver_cleanup();
@@ -1340,13 +1368,10 @@ EXPORT_SYMBOL_GPL(amd_pstate_update_status); @@ -1448,7 +1475,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
static ssize_t status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- ssize_t ret;
- mutex_lock(&amd_pstate_driver_lock);
- ret = amd_pstate_show_status(buf);
- mutex_unlock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
- return ret;
+ return amd_pstate_show_status(buf);
}
static ssize_t status_store(struct device *a, struct device_attribute *b,
@@ -1355,9 +1380,8 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- mutex_lock(&amd_pstate_driver_lock);
+ guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_update_status(buf, p ? p - buf : count);
- mutex_unlock(&amd_pstate_driver_lock);
return ret < 0 ? ret : count;
}
@@ -1451,7 +1475,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return -ENOMEM; return -ENOMEM;
cpudata->cpu = policy->cpu; cpudata->cpu = policy->cpu;
@ -769,7 +679,7 @@ index f6d04eb40af9..6a1e02389831 100644
ret = amd_pstate_init_perf(cpudata); ret = amd_pstate_init_perf(cpudata);
if (ret) if (ret)
@@ -1477,8 +1500,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) @@ -1474,8 +1500,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata; policy->driver_data = cpudata;
@ -778,7 +688,7 @@ index f6d04eb40af9..6a1e02389831 100644
policy->min = policy->cpuinfo.min_freq; policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq; policy->max = policy->cpuinfo.max_freq;
@@ -1489,10 +1510,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) @@ -1486,10 +1510,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
* the default cpufreq governor is neither powersave nor performance. * the default cpufreq governor is neither powersave nor performance.
*/ */
if (amd_pstate_acpi_pm_profile_server() || if (amd_pstate_acpi_pm_profile_server() ||
@ -794,7 +704,7 @@ index f6d04eb40af9..6a1e02389831 100644
if (cpu_feature_enabled(X86_FEATURE_CPPC)) { if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value); ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
@@ -1505,6 +1529,9 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy) @@ -1502,6 +1529,9 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
return ret; return ret;
WRITE_ONCE(cpudata->cppc_cap1_cached, value); WRITE_ONCE(cpudata->cppc_cap1_cached, value);
} }
@ -804,7 +714,7 @@ index f6d04eb40af9..6a1e02389831 100644
current_pstate_driver->adjust_perf = NULL; current_pstate_driver->adjust_perf = NULL;
@@ -1530,51 +1557,24 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy) @@ -1527,51 +1557,24 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy) static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{ {
struct amd_cpudata *cpudata = policy->driver_data; struct amd_cpudata *cpudata = policy->driver_data;
@ -867,39 +777,29 @@ index f6d04eb40af9..6a1e02389831 100644
} }
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1603,87 +1603,63 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy) @@ -1600,8 +1603,9 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
return 0; return 0;
} }
-static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata) -static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+static int amd_pstate_epp_reenable(struct cpufreq_policy *policy) +static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
{ {
- struct cppc_perf_ctrls perf_ctrls;
- u64 value, max_perf;
+ struct amd_cpudata *cpudata = policy->driver_data; + struct amd_cpudata *cpudata = policy->driver_data;
+ u64 max_perf; u64 max_perf;
int ret; int ret;
ret = amd_pstate_cppc_enable(true); @@ -1611,17 +1615,26 @@ static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
if (ret)
pr_err("failed to enable amd pstate during resume, return %d\n", ret);
- value = READ_ONCE(cpudata->cppc_req_cached);
max_perf = READ_ONCE(cpudata->highest_perf); max_perf = READ_ONCE(cpudata->highest_perf);
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) { - amd_pstate_update_perf(cpudata, 0, 0, max_perf, false);
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value); - amd_pstate_set_epp(cpudata, cpudata->epp_cached);
- } else {
- perf_ctrls.max_perf = max_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
- cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (trace_amd_pstate_epp_perf_enabled()) { + if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, + trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ cpudata->epp_cached, + cpudata->epp_cached,
+ FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached), + FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
+ max_perf, policy->boost_enabled); + max_perf, policy->boost_enabled);
} + }
+ +
+ return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false); + return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
} }
@ -911,93 +811,41 @@ index f6d04eb40af9..6a1e02389831 100644
pr_debug("AMD CPU Core %d going online\n", cpudata->cpu); pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
- if (cppc_state == AMD_PSTATE_ACTIVE) {
- amd_pstate_epp_reenable(cpudata); - amd_pstate_epp_reenable(cpudata);
- cpudata->suspended = false;
- }
+ ret = amd_pstate_epp_reenable(policy); + ret = amd_pstate_epp_reenable(policy);
+ if (ret) + if (ret)
+ return ret; + return ret;
+ cpudata->suspended = false; cpudata->suspended = false;
return 0; return 0;
} @@ -1639,10 +1652,14 @@ static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
-static void amd_pstate_epp_offline(struct cpufreq_policy *policy) guard(mutex)(&amd_pstate_limits_lock);
-{
- struct amd_cpudata *cpudata = policy->driver_data;
- struct cppc_perf_ctrls perf_ctrls;
- int min_perf;
- u64 value;
-
- min_perf = READ_ONCE(cpudata->lowest_perf);
- value = READ_ONCE(cpudata->cppc_req_cached);
-
- mutex_lock(&amd_pstate_limits_lock);
- if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
- cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
-
- /* Set max perf same as min perf */
- value &= ~AMD_CPPC_MAX_PERF(~0L);
- value |= AMD_CPPC_MAX_PERF(min_perf);
- value &= ~AMD_CPPC_MIN_PERF(~0L);
- value |= AMD_CPPC_MIN_PERF(min_perf);
- wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
- } else {
- perf_ctrls.desired_perf = 0;
- perf_ctrls.min_perf = min_perf;
- perf_ctrls.max_perf = min_perf;
- cppc_set_perf(cpudata->cpu, &perf_ctrls);
- perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
- cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
- }
- mutex_unlock(&amd_pstate_limits_lock);
-}
-
static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
{
struct amd_cpudata *cpudata = policy->driver_data;
-
- pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
+ int min_perf;
if (cpudata->suspended) - amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, false);
return 0; - amd_pstate_set_epp(cpudata, AMD_CPPC_EPP_BALANCE_POWERSAVE);
- if (cppc_state == AMD_PSTATE_ACTIVE)
- amd_pstate_epp_offline(policy);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
- return 0;
+ guard(mutex)(&amd_pstate_limits_lock);
+
+ if (trace_amd_pstate_epp_perf_enabled()) { + if (trace_amd_pstate_epp_perf_enabled()) {
+ trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, + trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
+ AMD_CPPC_EPP_BALANCE_POWERSAVE, + AMD_CPPC_EPP_BALANCE_POWERSAVE,
+ min_perf, min_perf, policy->boost_enabled); + min_perf, min_perf, policy->boost_enabled);
+ } + }
+
- return 0;
+ return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf, + return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
+ AMD_CPPC_EPP_BALANCE_POWERSAVE, false); + AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
} }
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy) static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
@@ -1711,12 +1687,10 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy) @@ -1673,7 +1690,7 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
struct amd_cpudata *cpudata = policy->driver_data; guard(mutex)(&amd_pstate_limits_lock);
if (cpudata->suspended) {
- mutex_lock(&amd_pstate_limits_lock);
+ guard(mutex)(&amd_pstate_limits_lock);
/* enable amd pstate from suspend state*/ /* enable amd pstate from suspend state*/
- amd_pstate_epp_reenable(cpudata); - amd_pstate_epp_reenable(cpudata);
-
- mutex_unlock(&amd_pstate_limits_lock);
+ amd_pstate_epp_reenable(policy); + amd_pstate_epp_reenable(policy);
cpudata->suspended = false; cpudata->suspended = false;
} }
@@ -1869,6 +1843,8 @@ static int __init amd_pstate_init(void) @@ -1826,6 +1843,8 @@ static int __init amd_pstate_init(void)
static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable); static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
static_call_update(amd_pstate_init_perf, shmem_init_perf); static_call_update(amd_pstate_init_perf, shmem_init_perf);
static_call_update(amd_pstate_update_perf, shmem_update_perf); static_call_update(amd_pstate_update_perf, shmem_update_perf);
@ -1035,9 +883,9 @@ index cd573bc6b6db..9747e3be6cee 100644
-- --
2.48.0.rc1 2.48.0.rc1
From c01c10767015f219e780922e3c96f575a12209c1 Mon Sep 17 00:00:00 2001 From 31240ebeb2bb55bec0be0f5a3d1949980a0d5531 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:30:20 +0100 Date: Fri, 21 Feb 2025 14:45:47 +0100
Subject: [PATCH 02/12] amd-tlb-broadcast Subject: [PATCH 02/12] amd-tlb-broadcast
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -1048,18 +896,18 @@ Signed-off-by: Peter Jung <admin@ptr1337.dev>
arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/include/asm/disabled-features.h | 8 +- arch/x86/include/asm/disabled-features.h | 8 +-
arch/x86/include/asm/invlpgb.h | 107 +++++ arch/x86/include/asm/invlpgb.h | 107 +++++
arch/x86/include/asm/mmu.h | 8 + arch/x86/include/asm/mmu.h | 6 +
arch/x86/include/asm/mmu_context.h | 15 + arch/x86/include/asm/mmu_context.h | 14 +
arch/x86/include/asm/msr-index.h | 2 + arch/x86/include/asm/msr-index.h | 2 +
arch/x86/include/asm/paravirt.h | 5 - arch/x86/include/asm/paravirt.h | 5 -
arch/x86/include/asm/paravirt_types.h | 2 - arch/x86/include/asm/paravirt_types.h | 2 -
arch/x86/include/asm/tlbflush.h | 101 ++++- arch/x86/include/asm/tlbflush.h | 100 ++++-
arch/x86/kernel/alternative.c | 10 +- arch/x86/kernel/alternative.c | 10 +-
arch/x86/kernel/cpu/amd.c | 12 + arch/x86/kernel/cpu/amd.c | 12 +
arch/x86/kernel/kvm.c | 1 - arch/x86/kernel/kvm.c | 1 -
arch/x86/kernel/paravirt.c | 6 - arch/x86/kernel/paravirt.c | 6 -
arch/x86/mm/pgtable.c | 16 +- arch/x86/mm/pgtable.c | 16 +-
arch/x86/mm/tlb.c | 553 +++++++++++++++++++++-- arch/x86/mm/tlb.c | 518 +++++++++++++++++++++--
arch/x86/xen/mmu_pv.c | 1 - arch/x86/xen/mmu_pv.c | 1 -
include/linux/mm_types.h | 1 + include/linux/mm_types.h | 1 +
mm/memory.c | 1 - mm/memory.c | 1 -
@ -1067,11 +915,11 @@ Signed-off-by: Peter Jung <admin@ptr1337.dev>
mm/swap_state.c | 1 - mm/swap_state.c | 1 -
mm/vma.c | 2 - mm/vma.c | 2 -
tools/arch/x86/include/asm/msr-index.h | 2 + tools/arch/x86/include/asm/msr-index.h | 2 +
25 files changed, 768 insertions(+), 97 deletions(-) 25 files changed, 732 insertions(+), 94 deletions(-)
create mode 100644 arch/x86/include/asm/invlpgb.h create mode 100644 arch/x86/include/asm/invlpgb.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ef6cfea9df73..1f824dcab4dc 100644 index c2fb8fe86a45..2a4653d19299 100644
--- a/arch/x86/Kconfig --- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig +++ b/arch/x86/Kconfig
@@ -273,7 +273,7 @@ config X86 @@ -273,7 +273,7 @@ config X86
@ -1268,19 +1116,10 @@ index 000000000000..220aba708b72
+ +
+#endif /* _ASM_X86_INVLPGB */ +#endif /* _ASM_X86_INVLPGB */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index ce4677b8b735..d71cd599fec4 100644 index 3b496cdcb74b..d71cd599fec4 100644
--- a/arch/x86/include/asm/mmu.h --- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h
@@ -37,6 +37,8 @@ typedef struct { @@ -69,6 +69,12 @@ typedef struct {
*/
atomic64_t tlb_gen;
+ unsigned long next_trim_cpumask;
+
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct rw_semaphore ldt_usr_sem;
struct ldt_struct *ldt;
@@ -67,6 +69,12 @@ typedef struct {
u16 pkey_allocation_map; u16 pkey_allocation_map;
s16 execute_only_pkey; s16 execute_only_pkey;
#endif #endif
@ -1294,7 +1133,7 @@ index ce4677b8b735..d71cd599fec4 100644
#define INIT_MM_CONTEXT(mm) \ #define INIT_MM_CONTEXT(mm) \
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 2886cb668d7f..d670699d32c2 100644 index 795fdd53bd0a..d670699d32c2 100644
--- a/arch/x86/include/asm/mmu_context.h --- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h
@@ -139,6 +139,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm) @@ -139,6 +139,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
@ -1306,15 +1145,7 @@ index 2886cb668d7f..d670699d32c2 100644
/* /*
* Init a new mm. Used on mm copies, like at fork() * Init a new mm. Used on mm copies, like at fork()
* and on mm's that are brand-new, like at execve(). * and on mm's that are brand-new, like at execve().
@@ -151,6 +153,7 @@ static inline int init_new_context(struct task_struct *tsk, @@ -161,6 +163,14 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
atomic64_set(&mm->context.tlb_gen, 0);
+ mm->context.next_trim_cpumask = jiffies + HZ;
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
@@ -160,6 +163,14 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.execute_only_pkey = -1; mm->context.execute_only_pkey = -1;
} }
#endif #endif
@ -1329,7 +1160,7 @@ index 2886cb668d7f..d670699d32c2 100644
mm_reset_untag_mask(mm); mm_reset_untag_mask(mm);
init_new_context_ldt(mm); init_new_context_ldt(mm);
return 0; return 0;
@@ -169,6 +180,10 @@ static inline int init_new_context(struct task_struct *tsk, @@ -170,6 +180,10 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void destroy_context(struct mm_struct *mm) static inline void destroy_context(struct mm_struct *mm)
{ {
destroy_context_ldt(mm); destroy_context_ldt(mm);
@ -1341,7 +1172,7 @@ index 2886cb668d7f..d670699d32c2 100644
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 3ae84c3b8e6d..dc1c1057f26e 100644 index 61e991507353..6844ebeed377 100644
--- a/arch/x86/include/asm/msr-index.h --- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h
@@ -25,6 +25,7 @@ @@ -25,6 +25,7 @@
@ -1390,7 +1221,7 @@ index 8d4fbe1be489..13405959e4db 100644
void (*exit_mmap)(struct mm_struct *mm); void (*exit_mmap)(struct mm_struct *mm);
void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc); void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc);
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 69e79fff41b8..89dddbcd1322 100644 index 02fc2aa06e9e..89dddbcd1322 100644
--- a/arch/x86/include/asm/tlbflush.h --- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h
@@ -6,10 +6,12 @@ @@ -6,10 +6,12 @@
@ -1430,15 +1261,7 @@ index 69e79fff41b8..89dddbcd1322 100644
extern void initialize_tlbstate_and_flush(void); extern void initialize_tlbstate_and_flush(void);
/* /*
@@ -222,6 +234,7 @@ struct flush_tlb_info { @@ -231,6 +243,82 @@ void flush_tlb_one_kernel(unsigned long addr);
unsigned int initiating_cpu;
u8 stride_shift;
u8 freed_tables;
+ u8 trim_cpumask;
};
void flush_tlb_local(void);
@@ -230,6 +243,82 @@ void flush_tlb_one_kernel(unsigned long addr);
void flush_tlb_multi(const struct cpumask *cpumask, void flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info); const struct flush_tlb_info *info);
@ -1521,7 +1344,7 @@ index 69e79fff41b8..89dddbcd1322 100644
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h> #include <asm/paravirt.h>
#endif #endif
@@ -277,21 +366,15 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) @@ -278,21 +366,15 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
return atomic64_inc_return(&mm->context.tlb_gen); return atomic64_inc_return(&mm->context.tlb_gen);
} }
@ -1707,7 +1530,7 @@ index 5745a354a241..3dc4af1f7868 100644
#endif /* CONFIG_PGTABLE_LEVELS > 4 */ #endif /* CONFIG_PGTABLE_LEVELS > 4 */
#endif /* CONFIG_PGTABLE_LEVELS > 3 */ #endif /* CONFIG_PGTABLE_LEVELS > 3 */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index a2becb85bea7..482b7def3677 100644 index 90a9e4740913..482b7def3677 100644
--- a/arch/x86/mm/tlb.c --- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c
@@ -74,13 +74,15 @@ @@ -74,13 +74,15 @@
@ -2202,46 +2025,7 @@ index a2becb85bea7..482b7def3677 100644
if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID && if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID &&
f->new_tlb_gen <= local_tlb_gen)) { f->new_tlb_gen <= local_tlb_gen)) {
/* /*
@@ -893,9 +1243,36 @@ static void flush_tlb_func(void *info) @@ -953,7 +1303,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
nr_invalidate);
}
-static bool tlb_is_not_lazy(int cpu, void *data)
+static bool should_flush_tlb(int cpu, void *data)
+{
+ struct flush_tlb_info *info = data;
+
+ /* Lazy TLB will get flushed at the next context switch. */
+ if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
+ return false;
+
+ /* No mm means kernel memory flush. */
+ if (!info->mm)
+ return true;
+
+ /* The target mm is loaded, and the CPU is not lazy. */
+ if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
+ return true;
+
+ /* In cpumask, but not the loaded mm? Periodically remove by flushing. */
+ if (info->trim_cpumask)
+ return true;
+
+ return false;
+}
+
+static bool should_trim_cpumask(struct mm_struct *mm)
{
- return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
+ if (time_after(jiffies, READ_ONCE(mm->context.next_trim_cpumask))) {
+ WRITE_ONCE(mm->context.next_trim_cpumask, jiffies + HZ);
+ return true;
+ }
+ return false;
}
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
@@ -926,10 +1303,10 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
* up on the new contents of what used to be page tables, while * up on the new contents of what used to be page tables, while
* doing a speculative memory access. * doing a speculative memory access.
*/ */
@ -2249,17 +2033,11 @@ index a2becb85bea7..482b7def3677 100644
+ if (info->freed_tables || in_asid_transition(info->mm)) + if (info->freed_tables || in_asid_transition(info->mm))
on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true); on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
else else
- on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func, on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
+ on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func, @@ -1009,6 +1359,15 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
(void *)info, 1, cpumask);
}
@@ -980,6 +1357,16 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
info->freed_tables = freed_tables;
info->new_tlb_gen = new_tlb_gen;
info->initiating_cpu = smp_processor_id(); info->initiating_cpu = smp_processor_id();
+ info->trim_cpumask = 0; info->trim_cpumask = 0;
+
+ /* + /*
+ * If the number of flushes is so large that a full flush + * If the number of flushes is so large that a full flush
+ * would be faster, do a full flush. + * would be faster, do a full flush.
@ -2268,10 +2046,11 @@ index a2becb85bea7..482b7def3677 100644
+ info->start = 0; + info->start = 0;
+ info->end = TLB_FLUSH_ALL; + info->end = TLB_FLUSH_ALL;
+ } + }
+
return info; return info;
} }
@@ -998,17 +1385,8 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
@@ -1026,17 +1385,8 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
bool freed_tables) bool freed_tables)
{ {
struct flush_tlb_info *info; struct flush_tlb_info *info;
@ -2290,7 +2069,7 @@ index a2becb85bea7..482b7def3677 100644
/* This is also a barrier that synchronizes with switch_mm(). */ /* This is also a barrier that synchronizes with switch_mm(). */
new_tlb_gen = inc_mm_tlb_gen(mm); new_tlb_gen = inc_mm_tlb_gen(mm);
@@ -1021,8 +1399,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, @@ -1049,9 +1399,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
* a local TLB flush is needed. Optimize this use-case by calling * a local TLB flush is needed. Optimize this use-case by calling
* flush_tlb_func_local() directly in this case. * flush_tlb_func_local() directly in this case.
*/ */
@ -2298,13 +2077,13 @@ index a2becb85bea7..482b7def3677 100644
+ if (mm_global_asid(mm)) { + if (mm_global_asid(mm)) {
+ broadcast_tlb_flush(info); + broadcast_tlb_flush(info);
+ } else if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) { + } else if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
+ info->trim_cpumask = should_trim_cpumask(mm); info->trim_cpumask = should_trim_cpumask(mm);
flush_tlb_multi(mm_cpumask(mm), info); flush_tlb_multi(mm_cpumask(mm), info);
+ consider_global_asid(mm); + consider_global_asid(mm);
} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { } else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
lockdep_assert_irqs_enabled(); lockdep_assert_irqs_enabled();
local_irq_disable(); local_irq_disable();
@@ -1036,6 +1418,16 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, @@ -1065,6 +1418,16 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
} }
@ -2321,7 +2100,7 @@ index a2becb85bea7..482b7def3677 100644
static void do_flush_tlb_all(void *info) static void do_flush_tlb_all(void *info)
{ {
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
@@ -1044,10 +1436,34 @@ static void do_flush_tlb_all(void *info) @@ -1073,10 +1436,34 @@ static void do_flush_tlb_all(void *info)
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
@ -2356,7 +2135,7 @@ index a2becb85bea7..482b7def3677 100644
static void do_kernel_range_flush(void *info) static void do_kernel_range_flush(void *info)
{ {
struct flush_tlb_info *f = info; struct flush_tlb_info *f = info;
@@ -1060,22 +1476,21 @@ static void do_kernel_range_flush(void *info) @@ -1089,22 +1476,21 @@ static void do_kernel_range_flush(void *info)
void flush_tlb_kernel_range(unsigned long start, unsigned long end) void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{ {
@ -2367,15 +2146,15 @@ index a2becb85bea7..482b7def3677 100644
- } else { - } else {
- struct flush_tlb_info *info; - struct flush_tlb_info *info;
+ struct flush_tlb_info *info; + struct flush_tlb_info *info;
+
+ guard(preempt)();
- preempt_disable(); - preempt_disable();
- info = get_flush_tlb_info(NULL, start, end, 0, false, - info = get_flush_tlb_info(NULL, start, end, 0, false,
- TLB_GENERATION_INVALID); - TLB_GENERATION_INVALID);
+ guard(preempt)();
+ info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false, + info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false,
+ TLB_GENERATION_INVALID); + TLB_GENERATION_INVALID);
+
+ if (broadcast_kernel_range_flush(info)) + if (broadcast_kernel_range_flush(info))
+ ; /* Fall through. */ + ; /* Fall through. */
+ else if (info->end == TLB_FLUSH_ALL) + else if (info->end == TLB_FLUSH_ALL)
@ -2390,7 +2169,7 @@ index a2becb85bea7..482b7def3677 100644
} }
/* /*
@@ -1263,12 +1678,52 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) @@ -1292,12 +1678,52 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
local_irq_enable(); local_irq_enable();
} }
@ -2444,10 +2223,10 @@ index a2becb85bea7..482b7def3677 100644
* Blindly accessing user memory from NMI context can be dangerous * Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or * if we're in the middle of switching the current user task or
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 55a4996d0c04..041e17282af0 100644 index d078de2c952b..38971c6dcd4b 100644
--- a/arch/x86/xen/mmu_pv.c --- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c
@@ -2137,7 +2137,6 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = { @@ -2189,7 +2189,6 @@ static const typeof(pv_ops) xen_mmu_ops __initconst = {
.flush_tlb_kernel = xen_flush_tlb, .flush_tlb_kernel = xen_flush_tlb,
.flush_tlb_one_user = xen_flush_tlb_one_user, .flush_tlb_one_user = xen_flush_tlb_one_user,
.flush_tlb_multi = xen_flush_tlb_multi, .flush_tlb_multi = xen_flush_tlb_multi,
@ -2554,9 +2333,9 @@ index 3ae84c3b8e6d..dc1c1057f26e 100644
-- --
2.48.0.rc1 2.48.0.rc1
From 479754290512670400557c6fb91a252676261db8 Mon Sep 17 00:00:00 2001 From 29126d387284698bd160aeea6086ed8bafc53134 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:30:31 +0100 Date: Fri, 21 Feb 2025 14:39:56 +0100
Subject: [PATCH 03/12] bbr3 Subject: [PATCH 03/12] bbr3
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -5940,9 +5719,9 @@ index b412ed88ccd9..d70f8b742b21 100644
-- --
2.48.0.rc1 2.48.0.rc1
From dbc43ac67696e3fbebdc6930aa7f057b255c2d6c Mon Sep 17 00:00:00 2001 From a0e342962d38af24849200d7089cbd54d6576fe8 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:30:44 +0100 Date: Fri, 21 Feb 2025 14:40:07 +0100
Subject: [PATCH 04/12] cachy Subject: [PATCH 04/12] cachy
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -6156,7 +5935,7 @@ index f48eaa98d22d..fc777c14cff6 100644
unprivileged_userfaultfd unprivileged_userfaultfd
======================== ========================
diff --git a/Makefile b/Makefile diff --git a/Makefile b/Makefile
index 423d087afad2..4af7030f914e 100644 index c436a6e64971..c6bd6363ed96 100644
--- a/Makefile --- a/Makefile
+++ b/Makefile +++ b/Makefile
@@ -860,11 +860,19 @@ KBUILD_CFLAGS += -fno-delete-null-pointer-checks @@ -860,11 +860,19 @@ KBUILD_CFLAGS += -fno-delete-null-pointer-checks
@ -7603,10 +7382,10 @@ index e8ae7681bf0a..8a0d873983f3 100644
} }
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 21bd635bcdfc..6f4032038fc7 100644 index c0b98749dde7..3aff1eea82ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2809,7 +2809,10 @@ int smu_get_power_limit(void *handle, @@ -2810,7 +2810,10 @@ int smu_get_power_limit(void *handle,
*limit = smu->max_power_limit; *limit = smu->max_power_limit;
break; break;
case SMU_PPT_LIMIT_MIN: case SMU_PPT_LIMIT_MIN:
@ -7618,7 +7397,7 @@ index 21bd635bcdfc..6f4032038fc7 100644
break; break;
default: default:
return -EINVAL; return -EINVAL;
@@ -2833,7 +2836,14 @@ static int smu_set_power_limit(void *handle, uint32_t limit) @@ -2834,7 +2837,14 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
if (smu->ppt_funcs->set_power_limit) if (smu->ppt_funcs->set_power_limit)
return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
@ -11971,7 +11750,7 @@ index 000000000000..e105e6f5cc91
+MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>"); +MODULE_AUTHOR("Daniel Drake <drake@endlessm.com>");
+MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 76f4df75b08a..49c1a91c611d 100644 index 0a1f668999ce..d4163fa9c27a 100644
--- a/drivers/pci/quirks.c --- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c
@@ -3746,6 +3746,106 @@ static void quirk_no_bus_reset(struct pci_dev *dev) @@ -3746,6 +3746,106 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
@ -13552,7 +13331,7 @@ index 7d0a05660e5e..3a3116dca89c 100644
#ifdef CONFIG_NUMA_BALANCING #ifdef CONFIG_NUMA_BALANCING
/* Restrict the NUMA promotion throughput (MB/s) for each target node. */ /* Restrict the NUMA promotion throughput (MB/s) for each target node. */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c5d67a43fe52..da653eba7884 100644 index 66744d60904d..4b3fffa1d5f5 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -2820,7 +2820,7 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); @@ -2820,7 +2820,7 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
@ -14133,9 +13912,9 @@ index 6872b5aff73e..1910fe1b2471 100644
-- --
2.48.0.rc1 2.48.0.rc1
From 65f50bcb3c1ee4ac40786548409e07869bdaece7 Mon Sep 17 00:00:00 2001 From c92d718c7ad95a5eae66eb820b6d7879fa127443 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:32:16 +0100 Date: Fri, 21 Feb 2025 14:40:38 +0100
Subject: [PATCH 05/12] crypto Subject: [PATCH 05/12] crypto
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -14907,9 +14686,9 @@ index fbf43482e1f5..11e95fc62636 100644
-- --
2.48.0.rc1 2.48.0.rc1
From 5a1609eb06ee64b2b55edacaf5e79b521a46bbb3 Mon Sep 17 00:00:00 2001 From 3cb3e2023181b4be0e5a454b75e86ecddca9646a Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:32:27 +0100 Date: Fri, 21 Feb 2025 14:41:10 +0100
Subject: [PATCH 06/12] fixes Subject: [PATCH 06/12] fixes
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -15167,10 +14946,10 @@ index b027a4030976..5cc750200f19 100644
return ret; return ret;
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 76030e54a3f5..83cae9398987 100644 index c1dec2453af4..1a2553498f89 100644
--- a/kernel/sched/ext.c --- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c +++ b/kernel/sched/ext.c
@@ -5264,9 +5264,10 @@ static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx, @@ -5279,9 +5279,10 @@ static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx,
scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK,
p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK,
ops_state >> SCX_OPSS_QSEQ_SHIFT); ops_state >> SCX_OPSS_QSEQ_SHIFT);
@ -15203,9 +14982,9 @@ index dca706617adc..89d3aef160b7 100644
-- --
2.48.0.rc1 2.48.0.rc1
From c683c53853220a36525db834e14be617dda17d0a Mon Sep 17 00:00:00 2001 From 3ff29239964e763e644499525f6abb1e6b5de3cb Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:32:45 +0100 Date: Fri, 21 Feb 2025 14:41:23 +0100
Subject: [PATCH 07/12] itmt-core-ranking Subject: [PATCH 07/12] itmt-core-ranking
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -15521,7 +15300,7 @@ index 3a3116dca89c..a27896a05103 100644
case group_misfit_task: case group_misfit_task:
/* /*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index da653eba7884..dee2797009e3 100644 index 4b3fffa1d5f5..fa4c60eb4043 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -2056,7 +2056,6 @@ struct sched_group { @@ -2056,7 +2056,6 @@ struct sched_group {
@ -15568,9 +15347,9 @@ index 9748a4c8d668..59b8157cb114 100644
-- --
2.48.0.rc1 2.48.0.rc1
From 98dab1891e0b8e8cae7ae4fbddaf87b94040c2a9 Mon Sep 17 00:00:00 2001 From b822a217d4e67f24160f4a0602b9d5bb66c1d5a8 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:32:59 +0100 Date: Fri, 21 Feb 2025 14:42:10 +0100
Subject: [PATCH 08/12] ntsync Subject: [PATCH 08/12] ntsync
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -18618,9 +18397,9 @@ index 000000000000..3aad311574c4
-- --
2.48.0.rc1 2.48.0.rc1
From e959305333470311ce6636ab70e094197ce99351 Mon Sep 17 00:00:00 2001 From 975a79a5278fd9b12781af42cab0ca820ee4598c Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:33:10 +0100 Date: Fri, 21 Feb 2025 14:42:22 +0100
Subject: [PATCH 09/12] perf-per-core Subject: [PATCH 09/12] perf-per-core
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -19516,9 +19295,9 @@ index 8277c64f88db..b5a5e1411469 100644
-- --
2.48.0.rc1 2.48.0.rc1
From cbe148fadf7905c3c9d85f4792427a5286b3d548 Mon Sep 17 00:00:00 2001 From 73a7a93279dfd9eca8703374f7e45340f2cd9f5b Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:33:25 +0100 Date: Fri, 21 Feb 2025 14:42:40 +0100
Subject: [PATCH 10/12] pksm Subject: [PATCH 10/12] pksm
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -19949,9 +19728,9 @@ index e9115b4d8b63..2afc778f2d17 100644
-- --
2.48.0.rc1 2.48.0.rc1
From ee21e489dd618c304f7f31415e389ba57ee5548e Mon Sep 17 00:00:00 2001 From edfb199a3ed4ad6d5524c21801f40a406c3c85df Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:33:34 +0100 Date: Fri, 21 Feb 2025 14:43:09 +0100
Subject: [PATCH 11/12] t2 Subject: [PATCH 11/12] t2
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -19978,7 +19757,6 @@ Signed-off-by: Peter Jung <admin@ptr1337.dev>
drivers/hid/hid-quirks.c | 8 +- drivers/hid/hid-quirks.c | 8 +-
drivers/hwmon/applesmc.c | 1138 ++++++++++++----- drivers/hwmon/applesmc.c | 1138 ++++++++++++-----
drivers/input/mouse/bcm5974.c | 138 ++ drivers/input/mouse/bcm5974.c | 138 ++
.../broadcom/brcm80211/brcmfmac/pcie.c | 4 +-
drivers/pci/vgaarb.c | 1 + drivers/pci/vgaarb.c | 1 +
drivers/platform/x86/apple-gmux.c | 18 + drivers/platform/x86/apple-gmux.c | 18 +
drivers/staging/Kconfig | 2 + drivers/staging/Kconfig | 2 +
@ -20013,7 +19791,7 @@ Signed-off-by: Peter Jung <admin@ptr1337.dev>
lib/test_printf.c | 20 +- lib/test_printf.c | 20 +-
lib/vsprintf.c | 36 +- lib/vsprintf.c | 36 +-
scripts/checkpatch.pl | 2 +- scripts/checkpatch.pl | 2 +-
57 files changed, 8350 insertions(+), 338 deletions(-) 56 files changed, 8348 insertions(+), 336 deletions(-)
create mode 100644 Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd create mode 100644 Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd
create mode 100644 drivers/gpu/drm/tiny/appletbdrm.c create mode 100644 drivers/gpu/drm/tiny/appletbdrm.c
create mode 100644 drivers/hid/hid-appletb-bl.c create mode 100644 drivers/hid/hid-appletb-bl.c
@ -21900,7 +21678,7 @@ index 000000000000..fa28a691da6a
+MODULE_DESCRIPTION("MacBookPro Touch Bar Keyboard Mode Driver"); +MODULE_DESCRIPTION("MacBookPro Touch Bar Keyboard Mode Driver");
+MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 82900857bfd8..c16edd3f4a37 100644 index e50887a6d22c..c436340331b4 100644
--- a/drivers/hid/hid-multitouch.c --- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c
@@ -73,6 +73,7 @@ MODULE_LICENSE("GPL"); @@ -73,6 +73,7 @@ MODULE_LICENSE("GPL");
@ -22034,7 +21812,7 @@ index 82900857bfd8..c16edd3f4a37 100644
if (cls->is_indirect) if (cls->is_indirect)
app->mt_flags |= INPUT_MT_POINTER; app->mt_flags |= INPUT_MT_POINTER;
@@ -1769,6 +1791,15 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) @@ -1772,6 +1794,15 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
} }
} }
@ -22050,7 +21828,7 @@ index 82900857bfd8..c16edd3f4a37 100644
td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL); td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL);
if (!td) { if (!td) {
dev_err(&hdev->dev, "cannot allocate multitouch data\n"); dev_err(&hdev->dev, "cannot allocate multitouch data\n");
@@ -1816,10 +1847,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) @@ -1819,10 +1850,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
timer_setup(&td->release_timer, mt_expired_timeout, 0); timer_setup(&td->release_timer, mt_expired_timeout, 0);
@ -22061,7 +21839,7 @@ index 82900857bfd8..c16edd3f4a37 100644
if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID) if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID)
mt_fix_const_fields(hdev, HID_DG_CONTACTID); mt_fix_const_fields(hdev, HID_DG_CONTACTID);
@@ -2301,6 +2328,11 @@ static const struct hid_device_id mt_devices[] = { @@ -2304,6 +2331,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR2) }, USB_DEVICE_ID_XIROKU_CSR2) },
@ -24248,28 +24026,6 @@ index dfdfb59cc8b5..e0da70576167 100644
{} {}
}; };
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index e4395b1f8c11..d2caa80e9412 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -2712,7 +2712,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC_SEED),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID, WCC),
@@ -2723,7 +2723,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC_SEED),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID, BCA),
diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c
index 78748e8d2dba..2b2b558cebe6 100644 index 78748e8d2dba..2b2b558cebe6 100644
--- a/drivers/pci/vgaarb.c --- a/drivers/pci/vgaarb.c
@ -30299,9 +30055,9 @@ index 9eed3683ad76..7ddbf75f4c26 100755
-- --
2.48.0.rc1 2.48.0.rc1
From 3cff5b38b4b38bf4e6c96b0d7c0a9950c42f5673 Mon Sep 17 00:00:00 2001 From 047728cdbfdbf23f914674f8fb9cbae2bce866e0 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev> From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 17 Feb 2025 16:33:45 +0100 Date: Fri, 21 Feb 2025 14:43:30 +0100
Subject: [PATCH 12/12] zstd Subject: [PATCH 12/12] zstd
Signed-off-by: Peter Jung <admin@ptr1337.dev> Signed-off-by: Peter Jung <admin@ptr1337.dev>