6.11.0 release
Some checks failed
PikaOS Package Build & Release (amd64-v3) / build (push) Failing after 20s

This commit is contained in:
ferreo 2024-09-16 13:10:53 +01:00
parent 0a35d494e0
commit 913ddec6ee
5 changed files with 3656 additions and 994 deletions

View File

@ -1 +1 @@
3
4

18
config
View File

@ -1,8 +1,8 @@
#
# Automatically generated file; DO NOT EDIT.
# Linux/x86 6.11.0-rc6 Kernel Configuration
# Linux/x86 6.11.0 Kernel Configuration
#
CONFIG_CC_VERSION_TEXT="gcc (GCC) 14.2.1 20240805"
CONFIG_CC_VERSION_TEXT="gcc (GCC) 14.2.1 20240910"
CONFIG_CC_IS_GCC=y
CONFIG_GCC_VERSION=140201
CONFIG_CLANG_VERSION=0
@ -223,7 +223,7 @@ CONFIG_CGROUPS=y
CONFIG_PAGE_COUNTER=y
# CONFIG_CGROUP_FAVOR_DYNMODS is not set
CONFIG_MEMCG=y
CONFIG_MEMCG_V1=y
# CONFIG_MEMCG_V1 is not set
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_WRITEBACK=y
CONFIG_CGROUP_SCHED=y
@ -2744,7 +2744,7 @@ CONFIG_NVME_TCP=m
CONFIG_NVME_TCP_TLS=y
CONFIG_NVME_HOST_AUTH=y
CONFIG_NVME_TARGET=m
# CONFIG_NVME_TARGET_DEBUGFS is not set
CONFIG_NVME_TARGET_DEBUGFS=y
CONFIG_NVME_TARGET_PASSTHRU=y
CONFIG_NVME_TARGET_LOOP=m
CONFIG_NVME_TARGET_RDMA=m
@ -3470,7 +3470,7 @@ CONFIG_FM10K=m
CONFIG_IGC=m
CONFIG_IGC_LEDS=y
CONFIG_IDPF=m
CONFIG_IDPF_SINGLEQ=y
# CONFIG_IDPF_SINGLEQ is not set
CONFIG_JME=m
CONFIG_NET_VENDOR_ADI=y
CONFIG_ADIN1110=m
@ -4562,7 +4562,7 @@ CONFIG_INPUT_AD714X_SPI=m
CONFIG_INPUT_ARIZONA_HAPTICS=m
CONFIG_INPUT_ATC260X_ONKEY=m
CONFIG_INPUT_BMA150=m
CONFIG_INPUT_CS40L50_VIBRA=m
# CONFIG_INPUT_CS40L50_VIBRA is not set
CONFIG_INPUT_E3X0_BUTTON=m
CONFIG_INPUT_PCSPKR=m
CONFIG_INPUT_MAX77693_HAPTIC=m
@ -5192,7 +5192,7 @@ CONFIG_GPIO_SIM=m
#
# GPIO Debugging utilities
#
CONFIG_GPIO_VIRTUSER=m
# CONFIG_GPIO_VIRTUSER is not set
# end of GPIO Debugging utilities
CONFIG_W1=m
@ -10730,7 +10730,7 @@ CONFIG_OVERLAY_FS_METACOPY=y
#
CONFIG_NETFS_SUPPORT=m
CONFIG_NETFS_STATS=y
# CONFIG_NETFS_DEBUG is not set
CONFIG_NETFS_DEBUG=y
CONFIG_FSCACHE=y
CONFIG_FSCACHE_STATS=y
CONFIG_CACHEFILES=m
@ -11654,7 +11654,7 @@ CONFIG_PAHOLE_HAS_SPLIT_BTF=y
CONFIG_PAHOLE_HAS_LANG_EXCLUDE=y
CONFIG_DEBUG_INFO_BTF_MODULES=y
# CONFIG_MODULE_ALLOW_BTF_MISMATCH is not set
# CONFIG_GDB_SCRIPTS is not set
CONFIG_GDB_SCRIPTS=y
CONFIG_FRAME_WARN=2048
CONFIG_STRIP_ASM_SYMS=y
# CONFIG_READABLE_ASM is not set

View File

@ -1,18 +1,53 @@
From 89404bebea127570b279beb4ed0a30ace5403370 Mon Sep 17 00:00:00 2001
From 67efcf30522cda8a81d47d35a9a89c24f5cdd00a Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 1 Sep 2024 10:38:28 +0200
Date: Sun, 15 Sep 2024 17:28:12 +0200
Subject: [PATCH 01/11] amd-pstate
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
Documentation/admin-guide/pm/amd-pstate.rst | 15 +-
arch/x86/include/asm/processor.h | 3 -
arch/x86/kernel/acpi/cppc.c | 159 ++++++++++++++++++++++++++++++-
arch/x86/kernel/cpu/amd.c | 16 ----
drivers/cpufreq/acpi-cpufreq.c | 12 ++-
drivers/cpufreq/amd-pstate.c | 143 +++++++--------------------
include/acpi/cppc_acpi.h | 10 ++
6 files changed, 208 insertions(+), 135 deletions(-)
arch/x86/kernel/acpi/cppc.c | 172 ++++++++++++++++++--
arch/x86/kernel/cpu/amd.c | 16 --
drivers/acpi/cppc_acpi.c | 10 +-
drivers/cpufreq/acpi-cpufreq.c | 12 +-
drivers/cpufreq/amd-pstate.c | 133 ++++-----------
include/acpi/cppc_acpi.h | 41 +++--
8 files changed, 254 insertions(+), 148 deletions(-)
diff --git a/Documentation/admin-guide/pm/amd-pstate.rst b/Documentation/admin-guide/pm/amd-pstate.rst
index d0324d44f548..210a808b74ec 100644
--- a/Documentation/admin-guide/pm/amd-pstate.rst
+++ b/Documentation/admin-guide/pm/amd-pstate.rst
@@ -251,7 +251,9 @@ performance supported in `AMD CPPC Performance Capability <perf_cap_>`_).
In some ASICs, the highest CPPC performance is not the one in the ``_CPC``
table, so we need to expose it to sysfs. If boost is not active, but
still supported, this maximum frequency will be larger than the one in
-``cpuinfo``.
+``cpuinfo``. On systems that support preferred core, the driver will have
+different values for some cores than others and this will reflect the values
+advertised by the platform at bootup.
This attribute is read-only.
``amd_pstate_lowest_nonlinear_freq``
@@ -262,6 +264,17 @@ lowest non-linear performance in `AMD CPPC Performance Capability
<perf_cap_>`_.)
This attribute is read-only.
+``amd_pstate_hw_prefcore``
+
+Whether the platform supports the preferred core feature and it has been
+enabled. This attribute is read-only.
+
+``amd_pstate_prefcore_ranking``
+
+The performance ranking of the core. This number doesn't have any unit, but
+larger numbers are preferred at the time of reading. This can change at
+runtime based on platform conditions. This attribute is read-only.
+
``energy_performance_available_preferences``
A list of all the supported EPP preferences that could be used for
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a75a07f4931f..775acbdea1a9 100644
--- a/arch/x86/include/asm/processor.h
@ -35,7 +70,7 @@ index a75a07f4931f..775acbdea1a9 100644
static inline void amd_check_microcode(void) { }
#endif
diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c
index ff8f25faca3d..44b13a4e2874 100644
index ff8f25faca3d..956984054bf3 100644
--- a/arch/x86/kernel/acpi/cppc.c
+++ b/arch/x86/kernel/acpi/cppc.c
@@ -9,6 +9,17 @@
@ -56,7 +91,14 @@ index ff8f25faca3d..44b13a4e2874 100644
/* Refer to drivers/acpi/cppc_acpi.c for the description of functions */
bool cpc_supported_by_cpu(void)
@@ -75,15 +86,17 @@ static void amd_set_max_freq_ratio(void)
@@ -69,31 +80,30 @@ int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
static void amd_set_max_freq_ratio(void)
{
struct cppc_perf_caps perf_caps;
- u64 highest_perf, nominal_perf;
+ u64 numerator, nominal_perf;
u64 perf_ratio;
int rc;
rc = cppc_get_perf_caps(0, &perf_caps);
if (rc) {
@ -66,9 +108,11 @@ index ff8f25faca3d..44b13a4e2874 100644
}
- highest_perf = amd_get_highest_perf();
+ rc = amd_get_boost_ratio_numerator(0, &highest_perf);
+ if (rc)
+ pr_warn("Could not retrieve highest performance\n");
+ rc = amd_get_boost_ratio_numerator(0, &numerator);
+ if (rc) {
+ pr_warn("Could not retrieve highest performance (%d)\n", rc);
+ return;
+ }
nominal_perf = perf_caps.nominal_perf;
- if (!highest_perf || !nominal_perf) {
@ -78,16 +122,18 @@ index ff8f25faca3d..44b13a4e2874 100644
return;
}
@@ -91,7 +104,7 @@ static void amd_set_max_freq_ratio(void)
- perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
/* midpoint between max_boost and max_P */
perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
if (!perf_ratio) {
- perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
- if (!perf_ratio) {
- pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
+ pr_warn("Non-zero highest/nominal perf values led to a 0 ratio\n");
return;
}
- return;
- }
+ perf_ratio = (div_u64(numerator * SCHED_CAPACITY_SCALE, nominal_perf) + SCHED_CAPACITY_SCALE) >> 1;
@@ -116,3 +129,139 @@ void init_freq_invariance_cppc(void)
freq_invariance_set_perf_ratio(perf_ratio, false);
}
@@ -116,3 +126,143 @@ void init_freq_invariance_cppc(void)
init_done = true;
mutex_unlock(&freq_invariance_lock);
}
@ -191,6 +237,10 @@ index ff8f25faca3d..44b13a4e2874 100644
+ * a CPU. On systems that support preferred cores, this will be a hardcoded
+ * value. On other systems this will the highest performance register value.
+ *
+ * If booting the system with amd-pstate enabled but preferred cores disabled then
+ * the correct boost numerator will be returned to match hardware capabilities
+ * even if the preferred cores scheduling hints are not enabled.
+ *
+ * Return: 0 for success, negative error code otherwise.
+ */
+int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
@ -254,6 +304,36 @@ index 1e0fe5f8ab84..015971adadfc 100644
static void zenbleed_check_cpu(void *unused)
{
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index dd3d3082c8c7..3b5b695bb80b 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -103,6 +103,11 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
(cpc)->cpc_entry.reg.space_id == \
ACPI_ADR_SPACE_PLATFORM_COMM)
+/* Check if a CPC register is in FFH */
+#define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
+ (cpc)->cpc_entry.reg.space_id == \
+ ACPI_ADR_SPACE_FIXED_HARDWARE)
+
/* Check if a CPC register is in SystemMemory */
#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
(cpc)->cpc_entry.reg.space_id == \
@@ -1486,9 +1491,12 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
/* after writing CPC, transfer the ownership of PCC to platform */
ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
up_write(&pcc_ss_data->pcc_lock);
+ } else if (osc_cpc_flexible_adr_space_confirmed &&
+ CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) {
+ ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
} else {
ret = -ENOTSUPP;
- pr_debug("_CPC in PCC is not supported\n");
+ pr_debug("_CPC in PCC and _CPC in FFH are not supported\n");
}
return ret;
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index a8ca625a98b8..0f04feb6cafa 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
@ -279,7 +359,7 @@ index a8ca625a98b8..0f04feb6cafa 100644
nominal_perf = perf_caps.nominal_perf;
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 89bda7a2bb8d..93cac81e1cbe 100644
index 259a917da75f..113f82130a30 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -52,8 +52,6 @@
@ -337,7 +417,13 @@ index 89bda7a2bb8d..93cac81e1cbe 100644
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
@@ -426,12 +398,7 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
@@ -420,19 +392,13 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
static int cppc_init_perf(struct amd_cpudata *cpudata)
{
struct cppc_perf_caps cppc_perf;
- u32 highest_perf;
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
if (ret)
return ret;
@ -347,11 +433,13 @@ index 89bda7a2bb8d..93cac81e1cbe 100644
- highest_perf = cppc_perf.highest_perf;
-
- WRITE_ONCE(cpudata->highest_perf, highest_perf);
- WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
+ WRITE_ONCE(cpudata->highest_perf, cppc_perf.highest_perf);
WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
+ WRITE_ONCE(cpudata->max_limit_perf, cppc_perf.highest_perf);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
@@ -554,12 +521,15 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
cppc_perf.lowest_nonlinear_perf);
@@ -554,12 +520,15 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
}
if (value == prev)
@ -368,7 +456,7 @@ index 89bda7a2bb8d..93cac81e1cbe 100644
}
static int amd_pstate_verify(struct cpufreq_policy_data *policy)
@@ -803,66 +773,22 @@ static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
@@ -803,66 +772,22 @@ static void amd_pstste_sched_prefcore_workfn(struct work_struct *work)
}
static DECLARE_WORK(sched_prefcore_work, amd_pstste_sched_prefcore_workfn);
@ -438,7 +526,7 @@ index 89bda7a2bb8d..93cac81e1cbe 100644
schedule_work(&sched_prefcore_work);
}
@@ -875,17 +801,17 @@ static void amd_pstate_update_limits(unsigned int cpu)
@@ -875,17 +800,17 @@ static void amd_pstate_update_limits(unsigned int cpu)
int ret;
bool highest_perf_changed = false;
@ -462,7 +550,7 @@ index 89bda7a2bb8d..93cac81e1cbe 100644
WRITE_ONCE(cpudata->prefcore_ranking, cur_high);
if (cur_high < CPPC_MAX_PERF)
@@ -949,8 +875,8 @@ static u32 amd_pstate_get_transition_latency(unsigned int cpu)
@@ -949,8 +874,8 @@ static u32 amd_pstate_get_transition_latency(unsigned int cpu)
static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
{
int ret;
@ -473,7 +561,7 @@ index 89bda7a2bb8d..93cac81e1cbe 100644
u32 nominal_perf, nominal_freq;
u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
u32 boost_ratio, lowest_nonlinear_ratio;
@@ -972,8 +898,10 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
@@ -972,8 +897,10 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
nominal_perf = READ_ONCE(cpudata->nominal_perf);
@ -486,7 +574,7 @@ index 89bda7a2bb8d..93cac81e1cbe 100644
max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
@@ -1028,12 +956,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
@@ -1028,12 +955,12 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
@ -501,21 +589,7 @@ index 89bda7a2bb8d..93cac81e1cbe 100644
ret = amd_pstate_init_freq(cpudata);
if (ret)
goto free_cpudata1;
@@ -1187,12 +1115,7 @@ static ssize_t show_amd_pstate_prefcore_ranking(struct cpufreq_policy *policy,
static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
char *buf)
{
- bool hw_prefcore;
- struct amd_cpudata *cpudata = policy->driver_data;
-
- hw_prefcore = READ_ONCE(cpudata->hw_prefcore);
-
- return sysfs_emit(buf, "%s\n", str_enabled_disabled(hw_prefcore));
+ return sysfs_emit(buf, "%s\n", str_enabled_disabled(amd_pstate_prefcore));
}
static ssize_t show_energy_performance_available_preferences(
@@ -1483,12 +1406,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
@@ -1483,12 +1410,12 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
cpudata->cpu = policy->cpu;
cpudata->epp_policy = 0;
@ -530,20 +604,7 @@ index 89bda7a2bb8d..93cac81e1cbe 100644
ret = amd_pstate_init_freq(cpudata);
if (ret)
goto free_cpudata1;
@@ -1841,10 +1764,8 @@ static bool amd_cppc_supported(void)
* the code is added for debugging purposes.
*/
if (!cpu_feature_enabled(X86_FEATURE_CPPC)) {
- if (cpu_feature_enabled(X86_FEATURE_ZEN1) || cpu_feature_enabled(X86_FEATURE_ZEN2)) {
- if (c->x86_model > 0x60 && c->x86_model < 0xaf)
- warn = true;
- } else if (cpu_feature_enabled(X86_FEATURE_ZEN3) || cpu_feature_enabled(X86_FEATURE_ZEN4)) {
+ if (cpu_feature_enabled(X86_FEATURE_ZEN3) ||
+ cpu_feature_enabled(X86_FEATURE_ZEN4)) {
if ((c->x86_model > 0x10 && c->x86_model < 0x1F) ||
(c->x86_model > 0x40 && c->x86_model < 0xaf))
warn = true;
@@ -1933,6 +1854,12 @@ static int __init amd_pstate_init(void)
@@ -1947,6 +1874,12 @@ static int __init amd_pstate_init(void)
static_call_update(amd_pstate_update_perf, cppc_update_perf);
}
@ -557,32 +618,106 @@ index 89bda7a2bb8d..93cac81e1cbe 100644
ret = amd_pstate_enable(true);
if (ret) {
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 930b6afba6f4..1d79320a2349 100644
index 930b6afba6f4..482e0587a041 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -136,6 +136,16 @@ struct cppc_cpudata {
cpumask_var_t shared_cpu_map;
};
+#ifdef CONFIG_CPU_SUP_AMD
+extern int amd_detect_prefcore(bool *detected);
@@ -159,34 +159,37 @@ extern int cppc_get_epp_perf(int cpunum, u64 *epp_perf);
extern int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable);
extern int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps);
extern int cppc_set_auto_sel(int cpu, bool enable);
+extern int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf);
+extern int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator);
+#else /* !CONFIG_CPU_SUP_AMD */
+static inline int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf) { return -ENODEV; }
+static inline int amd_detect_prefcore(bool *detected) { return -ENODEV; }
+static inline int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator) { return -ENODEV; }
+#endif /* !CONFIG_CPU_SUP_AMD */
+
#ifdef CONFIG_ACPI_CPPC_LIB
extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
extern int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf);
--
2.46.0
+extern int amd_detect_prefcore(bool *detected);
#else /* !CONFIG_ACPI_CPPC_LIB */
static inline int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_enable(int cpu, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline bool cppc_perf_ctrs_in_pcc(void)
{
@@ -210,27 +213,39 @@ static inline bool cpc_ffh_supported(void)
}
static inline int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_set_auto_sel(int cpu, bool enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+static inline int amd_get_highest_perf(unsigned int cpu, u32 *highest_perf)
+{
+ return -ENODEV;
+}
+static inline int amd_get_boost_ratio_numerator(unsigned int cpu, u64 *numerator)
+{
+ return -EOPNOTSUPP;
+}
+static inline int amd_detect_prefcore(bool *detected)
+{
+ return -ENODEV;
}
#endif /* !CONFIG_ACPI_CPPC_LIB */
From c03b9d435583136f64f0b91d4ac79f27d0e176cd Mon Sep 17 00:00:00 2001
--
2.46.1
From 2676833deb16654c45007f79fb6725a3409899ff Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 1 Sep 2024 10:38:38 +0200
Date: Sun, 15 Sep 2024 17:28:27 +0200
Subject: [PATCH 02/11] bbr3
Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -3964,11 +4099,11 @@ index 4d40615dc8fc..f27941201ef2 100644
event = icsk->icsk_pending;
--
2.46.0
2.46.1
From cfadd59d3bf4eb2ba75e1b778510b13bd2299f1f Mon Sep 17 00:00:00 2001
From 8f73cbbad2683b2bebffdf85fb133c78e44603a4 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 1 Sep 2024 10:38:49 +0200
Date: Sun, 15 Sep 2024 17:28:36 +0200
Subject: [PATCH 03/11] block
Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -4449,11 +4584,11 @@ index acdc28756d9d..8b214233a061 100644
if (dd_has_work_for_prio(&dd->per_prio[prio]))
return true;
--
2.46.0
2.46.1
From 0dbaf3e34fbdd41800ee182694dc6b4a16bd5021 Mon Sep 17 00:00:00 2001
From e1cb9da59b75cd677cd7af9923864344099b0973 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 1 Sep 2024 10:38:57 +0200
Date: Sun, 15 Sep 2024 17:28:46 +0200
Subject: [PATCH 04/11] cachy
Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -4517,8 +4652,8 @@ Signed-off-by: Peter Jung <admin@ptr1337.dev>
mm/vmpressure.c | 4 +
mm/vmscan.c | 142 +
scripts/Makefile.package | 3 +-
scripts/package/PKGBUILD | 39 +-
60 files changed, 6100 insertions(+), 110 deletions(-)
scripts/package/PKGBUILD | 52 +-
60 files changed, 6113 insertions(+), 110 deletions(-)
create mode 100644 drivers/i2c/busses/i2c-nct6775.c
create mode 100644 drivers/media/v4l2-core/v4l2loopback.c
create mode 100644 drivers/media/v4l2-core/v4l2loopback.h
@ -4653,10 +4788,10 @@ index f48eaa98d22d..fc777c14cff6 100644
unprivileged_userfaultfd
========================
diff --git a/Makefile b/Makefile
index d57cfc6896b8..e280a998f618 100644
index 34bd1d5f9672..7b497ab43754 100644
--- a/Makefile
+++ b/Makefile
@@ -802,6 +802,9 @@ KBUILD_CFLAGS += -fno-delete-null-pointer-checks
@@ -803,6 +803,9 @@ KBUILD_CFLAGS += -fno-delete-null-pointer-checks
ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
KBUILD_CFLAGS += -O2
KBUILD_RUSTFLAGS += -Copt-level=2
@ -4666,7 +4801,7 @@ index d57cfc6896b8..e280a998f618 100644
else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
KBUILD_RUSTFLAGS += -Copt-level=s
@@ -990,9 +993,9 @@ KBUILD_CFLAGS += -fno-strict-overflow
@@ -991,9 +994,9 @@ KBUILD_CFLAGS += -fno-strict-overflow
# Make sure -fstack-check isn't enabled (like gentoo apparently did)
KBUILD_CFLAGS += -fno-stack-check
@ -5436,7 +5571,7 @@ index f9bc95f4488d..e85dd2bf39ed 100644
slab_kill:
diff --git a/block/elevator.c b/block/elevator.c
index f13d552a32c8..c9422523e393 100644
index c355b55d0107..41cf94c3671e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -567,9 +567,19 @@ static struct elevator_type *elevator_get_default(struct request_queue *q)
@ -5676,10 +5811,10 @@ index df17e79c45c7..e454488c1a31 100644
+
endmenu
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 983a977632ff..68357fb6b551 100644
index 1e069fa5211e..f16a43106eb0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4384,7 +4384,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
@@ -4408,7 +4408,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
return r;
}
@ -5778,10 +5913,10 @@ index d5d6ab484e5a..dccba7bcdf97 100644
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 74e35f8ddefc..c2c2c915db99 100644
index 2cf951184561..1a53bf05f8fc 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2760,7 +2760,10 @@ int smu_get_power_limit(void *handle,
@@ -2762,7 +2762,10 @@ int smu_get_power_limit(void *handle,
*limit = smu->max_power_limit;
break;
case SMU_PPT_LIMIT_MIN:
@ -5793,7 +5928,7 @@ index 74e35f8ddefc..c2c2c915db99 100644
break;
default:
return -EINVAL;
@@ -2784,7 +2787,14 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
@@ -2786,7 +2789,14 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
if (smu->ppt_funcs->set_power_limit)
return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
@ -10986,10 +11121,10 @@ index d4d2f4d1d7cb..e0e19d9c1323 100644
char name[CPUFREQ_NAME_LEN];
int (*init)(struct cpufreq_policy *policy);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6549d0979b28..dca9a4444101 100644
index 147073601716..9fafa99d56d1 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -197,6 +197,14 @@ static inline void __mm_zero_struct_page(struct page *page)
@@ -201,6 +201,14 @@ static inline void __mm_zero_struct_page(struct page *page)
extern int sysctl_max_map_count;
@ -11466,7 +11601,7 @@ index 4430ac68e4c4..3bd08b60a9b3 100644
EXPORT_SYMBOL_GPL(dirty_writeback_interval);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c565de8f48e9..ef44703d2070 100644
index 91ace8ca97e2..f8b4dae35fc3 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -271,7 +271,11 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
@ -11519,7 +11654,7 @@ index bd5183dfd879..3a410f53a07c 100644
/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index cfa839284b92..9b7bb2a5626c 100644
index bd489c1af228..fb8fc07523b9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -147,6 +147,15 @@ struct scan_control {
@ -11572,7 +11707,7 @@ index cfa839284b92..9b7bb2a5626c 100644
/*
* The number of dirty pages determines if a node is marked
* reclaim_congested. kswapd will stall and start writing
@@ -2411,6 +2436,15 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
@@ -2391,6 +2416,15 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
goto out;
}
@ -11588,7 +11723,7 @@ index cfa839284b92..9b7bb2a5626c 100644
/*
* If there is enough inactive page cache, we do not reclaim
* anything from the anonymous working right now.
@@ -2555,6 +2589,14 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
@@ -2535,6 +2569,14 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
BUG();
}
@ -11603,7 +11738,7 @@ index cfa839284b92..9b7bb2a5626c 100644
nr[lru] = scan;
}
}
@@ -3988,7 +4030,11 @@ static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc
@@ -3968,7 +4010,11 @@ static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc
}
/* to protect the working set of the last N jiffies */
@ -11615,7 +11750,7 @@ index cfa839284b92..9b7bb2a5626c 100644
static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{
@@ -4026,6 +4072,96 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
@@ -4006,6 +4052,96 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
}
}
@ -11712,7 +11847,7 @@ index cfa839284b92..9b7bb2a5626c 100644
/******************************************************************************
* rmap/PT walk feedback
******************************************************************************/
@@ -4519,6 +4655,8 @@ static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int sw
@@ -4499,6 +4635,8 @@ static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int sw
*/
if (!swappiness)
type = LRU_GEN_FILE;
@ -11721,7 +11856,7 @@ index cfa839284b92..9b7bb2a5626c 100644
else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE])
type = LRU_GEN_ANON;
else if (swappiness == 1)
@@ -4798,6 +4936,8 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
@@ -4778,6 +4916,8 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc)
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
@ -11730,7 +11865,7 @@ index cfa839284b92..9b7bb2a5626c 100644
/* lru_gen_age_node() called mem_cgroup_calculate_protection() */
if (mem_cgroup_below_min(NULL, memcg))
return MEMCG_LRU_YOUNG;
@@ -5945,6 +6085,8 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
@@ -5925,6 +6065,8 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
prepare_scan_control(pgdat, sc);
@ -11754,7 +11889,7 @@ index 4a80584ec771..11d53f240a2b 100644
KBUILD_MAKEFLAGS="$(MAKEFLAGS)" \
KBUILD_REVISION="$(shell $(srctree)/scripts/build-version)" \
diff --git a/scripts/package/PKGBUILD b/scripts/package/PKGBUILD
index 663ce300dd06..839cd5e634d2 100644
index 663ce300dd06..f83493838cf9 100644
--- a/scripts/package/PKGBUILD
+++ b/scripts/package/PKGBUILD
@@ -3,10 +3,13 @@
@ -11767,7 +11902,7 @@ index 663ce300dd06..839cd5e634d2 100644
-fi
+pkgname=("${pkgbase}")
+
+_extrapackages=${PACMAN_EXTRAPACKAGES-headers api-headers}
+_extrapackages=${PACMAN_EXTRAPACKAGES-headers api-headers debug}
+for pkg in $_extrapackages; do
+ pkgname+=("${pkgbase}-${pkg}")
+done
@ -11830,7 +11965,7 @@ index 663ce300dd06..839cd5e634d2 100644
cp System.map "${builddir}/System.map"
cp .config "${builddir}/.config"
@@ -94,8 +106,7 @@ _package-api-headers() {
@@ -94,12 +106,24 @@ _package-api-headers() {
provides=(linux-api-headers)
conflicts=(linux-api-headers)
@ -11840,12 +11975,29 @@ index 663ce300dd06..839cd5e634d2 100644
${MAKE} headers_install INSTALL_HDR_PATH="${pkgdir}/usr"
}
--
2.46.0
From 8ece8492ab552375f939b01887f695b4c8bd7d39 Mon Sep 17 00:00:00 2001
+_package-debug(){
+ pkgdesc="Non-stripped vmlinux file for the ${pkgdesc} kernel"
+
+ local debugdir="${pkgdir}/usr/src/debug/${pkgbase}"
+ local builddir="${pkgdir}/usr/${MODLIB}/build"
+
+ _prologue
+
+ install -Dt "${debugdir}" -m644 vmlinux
+ mkdir -p "${builddir}"
+ ln -sr "${debugdir}/vmlinux" "${builddir}/vmlinux"
+}
+
for _p in "${pkgname[@]}"; do
eval "package_$_p() {
$(declare -f "_package${_p#$pkgbase}")
--
2.46.1
From c4201124b983cba28153bd6385dd44b26ffad1e7 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 1 Sep 2024 10:39:07 +0200
Date: Sun, 15 Sep 2024 17:28:56 +0200
Subject: [PATCH 05/11] fixes
Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -12097,11 +12249,11 @@ index 3cffa6c79538..8b7a5a31e8c1 100644
{}
};
--
2.46.0
2.46.1
From ce9f099c5a2244989b1cb027580ead7a80d4fa55 Mon Sep 17 00:00:00 2001
From b24e8834eb51bed12079009ec0ab23b16bc73198 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 1 Sep 2024 10:39:19 +0200
Date: Sun, 15 Sep 2024 17:29:06 +0200
Subject: [PATCH 06/11] intel-pstate
Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -12581,11 +12733,11 @@ index 7d92f16a430a..86ad1fed71f1 100644
}
--
2.46.0
2.46.1
From f9f37e11d53bf4044bb8bf7b8a74e19090fc9bda Mon Sep 17 00:00:00 2001
From 8eb5e816f13a599dd0385bccc1df837664cc7233 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 1 Sep 2024 10:39:28 +0200
Date: Sun, 15 Sep 2024 17:29:23 +0200
Subject: [PATCH 07/11] ksm
Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -13023,11 +13175,11 @@ index 01071182763e..7394bad8178e 100644
+464 common process_ksm_disable sys_process_ksm_disable sys_process_ksm_disable
+465 common process_ksm_status sys_process_ksm_status sys_process_ksm_status
--
2.46.0
2.46.1
From 374d5a89939e019d2d3abd22f683486c82230791 Mon Sep 17 00:00:00 2001
From c2703b85f5713426c2ab1e6f25f2364582f053fe Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 1 Sep 2024 10:39:38 +0200
Date: Sun, 15 Sep 2024 17:29:35 +0200
Subject: [PATCH 08/11] ntsync
Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -13467,10 +13619,10 @@ index 000000000000..767844637a7d
+ ``objs`` and in ``alert``. If this is attempted, the function fails
+ with ``EINVAL``.
diff --git a/MAINTAINERS b/MAINTAINERS
index fe83ba7194ea..d9681e662200 100644
index cc40a9d9b8cd..2cd7168dc401 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -16306,6 +16306,15 @@ T: git https://github.com/Paragon-Software-Group/linux-ntfs3.git
@@ -16319,6 +16319,15 @@ T: git https://github.com/Paragon-Software-Group/linux-ntfs3.git
F: Documentation/filesystems/ntfs3.rst
F: fs/ntfs3/
@ -16112,11 +16264,11 @@ index 000000000000..5fa2c9a0768c
+
+TEST_HARNESS_MAIN
--
2.46.0
2.46.1
From 99b646a36247a68818da737fd76441bdfe531213 Mon Sep 17 00:00:00 2001
From bdd3f4dab12fd8eb06357b2b5593820eb3128651 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 1 Sep 2024 10:39:50 +0200
Date: Sun, 15 Sep 2024 17:29:46 +0200
Subject: [PATCH 09/11] perf-per-core
Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -17020,11 +17172,11 @@ index 9a6069e7133c..23722aa21e2f 100644
/* Package relative core ID */
--
2.46.0
2.46.1
From ca5746bedb2da2feb67f1cfed4e80ace86b3c240 Mon Sep 17 00:00:00 2001
From f2a149176007718766b709be8299b005ddd6b158 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 1 Sep 2024 10:40:01 +0200
Date: Sun, 15 Sep 2024 17:30:08 +0200
Subject: [PATCH 10/11] t2
Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -17183,10 +17335,10 @@ index 4451ef501936..c726a846f752 100644
----
diff --git a/MAINTAINERS b/MAINTAINERS
index d9681e662200..67ef02a08b8b 100644
index 2cd7168dc401..16df466c205d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6894,6 +6894,12 @@ S: Supported
@@ -6895,6 +6895,12 @@ S: Supported
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/sun4i/sun8i*
@ -17326,10 +17478,10 @@ index 49a1ac4f5491..c8c10a6104c4 100644
fb->base.width, fb->base.height,
sizes->fb_width, sizes->fb_height);
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 14d5fefc9c5b..727639b8f6a6 100644
index dfd8b4960e6d..7232f9acd0a0 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -59,6 +59,18 @@ static void quirk_increase_ddi_disabled_time(struct intel_display *display)
@@ -64,6 +64,18 @@ static void quirk_increase_ddi_disabled_time(struct intel_display *display)
drm_info(display->drm, "Applying Increase DDI Disabled quirk\n");
}
@ -17348,7 +17500,7 @@ index 14d5fefc9c5b..727639b8f6a6 100644
static void quirk_no_pps_backlight_power_hook(struct intel_display *display)
{
intel_set_quirk(display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK);
@@ -201,6 +213,9 @@ static struct intel_quirk intel_quirks[] = {
@@ -229,6 +241,9 @@ static struct intel_quirk intel_quirks[] = {
{ 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
/* HP Notebook - 14-r206nv */
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
@ -17357,15 +17509,15 @@ index 14d5fefc9c5b..727639b8f6a6 100644
+ { 0x3e9b, 0x106b, 0x0176, quirk_ddi_a_force_4_lanes },
};
void intel_init_quirks(struct intel_display *display)
static struct intel_dpcd_quirk intel_dpcd_quirks[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index 151c8f4ae576..46e7feba88f4 100644
index cafdebda7535..a5296f82776e 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -17,6 +17,7 @@ enum intel_quirk_id {
QUIRK_INVERT_BRIGHTNESS,
@@ -20,6 +20,7 @@ enum intel_quirk_id {
QUIRK_LVDS_SSC_DISABLE,
QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
QUIRK_FW_SYNC_LEN,
+ QUIRK_DDI_A_FORCE_4_LANES,
};
@ -27435,11 +27587,11 @@ index 4427572b2477..b60c99d61882 100755
last;
}
--
2.46.0
2.46.1
From dec2bed6cdefc145bc1bc816f59c2bb833070742 Mon Sep 17 00:00:00 2001
From d312d9b44e2d51dd64ceecf38fccbfbcc8944738 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Sun, 1 Sep 2024 10:40:12 +0200
Date: Sun, 15 Sep 2024 17:30:16 +0200
Subject: [PATCH 11/11] zstd
Signed-off-by: Peter Jung <admin@ptr1337.dev>
@ -46087,4 +46239,4 @@ index f4ed952ed485..7d31518e9d5a 100644
EXPORT_SYMBOL(zstd_reset_dstream);
--
2.46.0
2.46.1

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
From 8748a0aa2fc51c5a22d17c4da434f12e91b4d211 Mon Sep 17 00:00:00 2001
From 35259c1c06596a086582bb3c63461b039e1e517d Mon Sep 17 00:00:00 2001
From: Piotr Gorski <lucjan.lucjanov@gmail.com>
Date: Mon, 12 Aug 2024 13:44:47 +0200
Date: Fri, 13 Sep 2024 14:15:05 +0200
Subject: [PATCH] bore-cachy-ext
Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
@ -8,12 +8,12 @@ Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
include/linux/sched.h | 10 ++
init/Kconfig | 17 ++
kernel/Kconfig.hz | 16 ++
kernel/sched/core.c | 143 +++++++++++++++
kernel/sched/core.c | 141 +++++++++++++++
kernel/sched/debug.c | 60 ++++++-
kernel/sched/fair.c | 388 +++++++++++++++++++++++++++++++++++++---
kernel/sched/fair.c | 379 +++++++++++++++++++++++++++++++++++++---
kernel/sched/features.h | 20 ++-
kernel/sched/sched.h | 7 +
8 files changed, 634 insertions(+), 27 deletions(-)
8 files changed, 623 insertions(+), 27 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 5b4f78fe3..b9e5ea2aa 100644
@ -37,10 +37,10 @@ index 5b4f78fe3..b9e5ea2aa 100644
u64 slice;
diff --git a/init/Kconfig b/init/Kconfig
index e24741512..511a13dcd 100644
index e1a88d48d..3aea8e43c 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1317,6 +1317,23 @@ config CHECKPOINT_RESTORE
@@ -1327,6 +1327,23 @@ config CHECKPOINT_RESTORE
If unsure, say N here.
@ -91,10 +91,10 @@ index 0f78364ef..b50189ee5 100644
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7ba808949..8c010f1f5 100644
index c792a6feb..dfb93c5f7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4336,6 +4336,138 @@ int wake_up_state(struct task_struct *p, unsigned int state)
@@ -4336,6 +4336,136 @@ int wake_up_state(struct task_struct *p, unsigned int state)
return try_to_wake_up(p, state, 0);
}
@ -125,7 +125,7 @@ index 7ba808949..8c010f1f5 100644
+ return cnt;
+}
+
+static inline bool task_is_inheritable(struct task_struct *p) {
+static inline bool task_burst_inheritable(struct task_struct *p) {
+ return (p->sched_class == &fair_sched_class);
+}
+
@ -146,11 +146,10 @@ index 7ba808949..8c010f1f5 100644
+
+static inline void update_child_burst_direct(struct task_struct *p, u64 now) {
+ struct task_struct *child;
+ u32 cnt = 0;
+ u32 sum = 0;
+ u32 cnt = 0, sum = 0;
+
+ list_for_each_entry(child, &p->children, sibling) {
+ if (!task_is_inheritable(child)) continue;
+ if (!task_burst_inheritable(child)) continue;
+ cnt++;
+ sum += child->se.burst_penalty;
+ }
@ -169,8 +168,7 @@ index 7ba808949..8c010f1f5 100644
+static void update_child_burst_topological(
+ struct task_struct *p, u64 now, u32 depth, u32 *acnt, u32 *asum) {
+ struct task_struct *child, *dec;
+ u32 cnt = 0, dcnt = 0;
+ u32 sum = 0;
+ u32 cnt = 0, dcnt = 0, sum = 0;
+
+ list_for_each_entry(child, &p->children, sibling) {
+ dec = child;
@ -178,7 +176,7 @@ index 7ba808949..8c010f1f5 100644
+ dec = list_first_entry(&dec->children, struct task_struct, sibling);
+
+ if (!dcnt || !depth) {
+ if (!task_is_inheritable(dec)) continue;
+ if (!task_burst_inheritable(dec)) continue;
+ cnt++;
+ sum += dec->se.burst_penalty;
+ continue;
@ -224,7 +222,7 @@ index 7ba808949..8c010f1f5 100644
+}
+
+static void sched_post_fork_bore(struct task_struct *p) {
+ if (p->sched_class == &fair_sched_class)
+ if (task_burst_inheritable(p))
+ inherit_burst(p);
+ p->se.burst_penalty = p->se.prev_burst_penalty;
+}
@ -233,7 +231,7 @@ index 7ba808949..8c010f1f5 100644
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
@@ -4352,6 +4484,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
@@ -4352,6 +4482,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
@ -243,7 +241,7 @@ index 7ba808949..8c010f1f5 100644
p->se.vlag = 0;
p->se.slice = sysctl_sched_base_slice;
INIT_LIST_HEAD(&p->se.group_node);
@@ -4686,6 +4821,9 @@ void sched_cancel_fork(struct task_struct *p)
@@ -4686,6 +4819,9 @@ void sched_cancel_fork(struct task_struct *p)
void sched_post_fork(struct task_struct *p)
{
@ -253,13 +251,13 @@ index 7ba808949..8c010f1f5 100644
uclamp_post_fork(p);
scx_post_fork(p);
}
@@ -8285,6 +8423,11 @@ void __init sched_init(void)
@@ -8283,6 +8419,11 @@ void __init sched_init(void)
BUG_ON(!sched_class_above(&ext_sched_class, &idle_sched_class));
#endif
+#ifdef CONFIG_SCHED_BORE
+ sched_init_bore();
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.2.10 by Masahito Suzuki");
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.2.11 by Masahito Suzuki");
+#endif // CONFIG_SCHED_BORE
+
wait_bit_init();
@ -373,7 +371,7 @@ index c057ef46c..3cab39e34 100644
P(se.avg.runnable_sum);
P(se.avg.util_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2928026d7..64987a5d1 100644
index 2928026d7..f7040962b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@
@ -386,7 +384,7 @@ index 2928026d7..64987a5d1 100644
*/
#include <linux/energy_model.h>
#include <linux/mmap_lock.h>
@@ -64,28 +67,182 @@
@@ -64,28 +67,174 @@
* SCHED_TUNABLESCALING_LOG - scaled logarithmically, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
*
@ -447,8 +445,7 @@ index 2928026d7..64987a5d1 100644
+
+static inline u32 log2plus1_u64_u32f8(u64 v) {
+ u32 msb = fls64(v);
+ s32 excess_bits = msb - 9;
+ u8 fractional = (0 <= excess_bits)? v >> excess_bits: v << -excess_bits;
+ u8 fractional = (v << (64 - msb) >> 55);
+ return msb << 8 | fractional;
+}
+
@ -487,7 +484,6 @@ index 2928026d7..64987a5d1 100644
+
+static inline u8 effective_prio(struct task_struct *p) {
+ u8 prio = p->static_prio - MAX_RT_PRIO;
+
+ if (likely(sched_bore))
+ prio += p->se.burst_score;
+ return min(39, prio);
@ -499,9 +495,8 @@ index 2928026d7..64987a5d1 100644
+ u8 prev_prio = effective_prio(p);
+
+ u8 burst_score = 0;
+ if (!(sched_burst_exclude_kthreads && (p->flags & PF_KTHREAD)))
+ if (!((p->flags & PF_KTHREAD) && likely(sched_burst_exclude_kthreads)))
+ burst_score = se->burst_penalty >> 2;
+
+ se->burst_score = burst_score;
+
+ u8 new_prio = effective_prio(p);
@ -551,17 +546,12 @@ index 2928026d7..64987a5d1 100644
+ struct rq_flags rf;
+
+ write_lock_irq(&tasklist_lock);
+
+ for_each_process(task) {
+ rq = task_rq(task);
+
+ rq_lock_irqsave(rq, &rf);
+
+ reweight_task_by_prio(task, effective_prio(task));
+
+ rq_unlock_irqrestore(rq, &rf);
+ }
+
+ write_unlock_irq(&tasklist_lock);
+}
+
@ -580,7 +570,7 @@ index 2928026d7..64987a5d1 100644
static int __init setup_sched_thermal_decay_shift(char *str)
{
@@ -130,12 +287,8 @@ int __weak arch_asym_cpu_priority(int cpu)
@@ -130,12 +279,8 @@ int __weak arch_asym_cpu_priority(int cpu)
*
* (default: 5 msec, units: microseconds)
*/
@ -593,7 +583,7 @@ index 2928026d7..64987a5d1 100644
#ifdef CONFIG_NUMA_BALANCING
/* Restrict the NUMA promotion throughput (MB/s) for each target node. */
@@ -144,6 +297,92 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
@@ -144,6 +289,92 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
#ifdef CONFIG_SYSCTL
static struct ctl_table sched_fair_sysctls[] = {
@ -686,7 +676,7 @@ index 2928026d7..64987a5d1 100644
#ifdef CONFIG_CFS_BANDWIDTH
{
.procname = "sched_cfs_bandwidth_slice_us",
@@ -201,6 +440,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
@@ -201,6 +432,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
*
* This idea comes from the SD scheduler of Con Kolivas:
*/
@ -700,7 +690,7 @@ index 2928026d7..64987a5d1 100644
static unsigned int get_update_sysctl_factor(void)
{
unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
@@ -231,6 +477,7 @@ static void update_sysctl(void)
@@ -231,6 +469,7 @@ static void update_sysctl(void)
SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL
}
@ -708,18 +698,17 @@ index 2928026d7..64987a5d1 100644
void __init sched_init_granularity(void)
{
@@ -708,6 +955,10 @@ static s64 entity_lag(u64 avruntime, struct sched_entity *se)
@@ -708,6 +947,9 @@ static s64 entity_lag(u64 avruntime, struct sched_entity *se)
vlag = avruntime - se->vruntime;
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+#ifdef CONFIG_SCHED_BORE
+ if (likely(sched_bore))
+ limit >>= 1;
+ limit >>= !!sched_bore;
+#endif // CONFIG_SCHED_BORE
return clamp(vlag, -limit, limit);
}
@@ -868,6 +1119,39 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
@@ -868,6 +1110,39 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
return __node_2_se(left);
}
@ -759,7 +748,7 @@ index 2928026d7..64987a5d1 100644
/*
* Earliest Eligible Virtual Deadline First
*
@@ -887,28 +1171,27 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
@@ -887,28 +1162,27 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
*
* Which allows tree pruning through eligibility.
*/
@ -795,7 +784,7 @@ index 2928026d7..64987a5d1 100644
return curr;
/* Pick the leftmost entity if it's eligible */
@@ -967,6 +1250,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
@@ -967,6 +1241,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
* Scheduling class statistics methods:
*/
#ifdef CONFIG_SMP
@ -803,7 +792,7 @@ index 2928026d7..64987a5d1 100644
int sched_update_scaling(void)
{
unsigned int factor = get_update_sysctl_factor();
@@ -978,6 +1262,7 @@ int sched_update_scaling(void)
@@ -978,6 +1253,7 @@ int sched_update_scaling(void)
return 0;
}
@ -811,7 +800,7 @@ index 2928026d7..64987a5d1 100644
#endif
#endif
@@ -1178,6 +1463,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
@@ -1178,6 +1454,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
if (unlikely(delta_exec <= 0))
return;
@ -822,7 +811,7 @@ index 2928026d7..64987a5d1 100644
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
@@ -5193,6 +5482,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -5193,6 +5473,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
s64 lag = 0;
se->slice = sysctl_sched_base_slice;
@ -835,7 +824,7 @@ index 2928026d7..64987a5d1 100644
vslice = calc_delta_fair(se->slice, se);
/*
@@ -5203,6 +5498,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -5203,6 +5489,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*
* EEVDF: placement strategy #1 / #2
*/
@ -845,7 +834,7 @@ index 2928026d7..64987a5d1 100644
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
@@ -5278,6 +5576,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -5278,6 +5567,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* on average, halfway through their slice, as such start tasks
* off with half a slice to ease into the competition.
*/
@ -859,7 +848,7 @@ index 2928026d7..64987a5d1 100644
if (sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL))
vslice /= 2;
@@ -5492,7 +5797,7 @@ pick_next_entity(struct cfs_rq *cfs_rq)
@@ -5492,7 +5788,7 @@ pick_next_entity(struct cfs_rq *cfs_rq)
cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
return cfs_rq->next;
@ -868,7 +857,7 @@ index 2928026d7..64987a5d1 100644
}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -6860,6 +7165,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
@@ -6860,6 +7156,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bool was_sched_idle = sched_idle_rq(rq);
util_est_dequeue(&rq->cfs, p);
@ -883,7 +872,7 @@ index 2928026d7..64987a5d1 100644
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
@@ -8428,7 +8741,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
@@ -8428,7 +8732,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
/*
* XXX pick_eevdf(cfs_rq) != se ?
*/
@ -892,7 +881,7 @@ index 2928026d7..64987a5d1 100644
goto preempt;
return;
@@ -8646,16 +8959,25 @@ static void yield_task_fair(struct rq *rq)
@@ -8646,16 +8950,25 @@ static void yield_task_fair(struct rq *rq)
/*
* Are we the only task in the tree?
*/
@ -918,7 +907,7 @@ index 2928026d7..64987a5d1 100644
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
@@ -12720,6 +13042,9 @@ static void task_fork_fair(struct task_struct *p)
@@ -12720,6 +13033,9 @@ static void task_fork_fair(struct task_struct *p)
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
@ -928,7 +917,7 @@ index 2928026d7..64987a5d1 100644
place_entity(cfs_rq, se, ENQUEUE_INITIAL);
rq_unlock(rq, &rf);
}
@@ -13303,3 +13628,16 @@ __init void init_sched_fair_class(void)
@@ -13303,3 +13619,16 @@ __init void init_sched_fair_class(void)
#endif /* SMP */
}
@ -978,10 +967,10 @@ index 143f55df8..bfeb9f653 100644
/*
* Prefer to schedule the task we woke last (assuming it failed
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 71c346fb9..c30cb4a7c 100644
index 207a04f02..c99430161 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2050,7 +2050,11 @@ static inline void update_sched_domain_debugfs(void) { }
@@ -2063,7 +2063,11 @@ static inline void update_sched_domain_debugfs(void) { }
static inline void dirty_sched_domain_sysctl(int cpu) { }
#endif
@ -993,7 +982,7 @@ index 71c346fb9..c30cb4a7c 100644
static inline const struct cpumask *task_user_cpus(struct task_struct *p)
{
@@ -2705,6 +2709,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate;
@@ -2736,6 +2740,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_base_slice;