Merge pull request #6 from PikaOS-Linux/6.10

6.10
This commit is contained in:
ferrreo 2024-07-21 14:25:21 +01:00 committed by GitHub
commit 99d43bd6f2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 29090 additions and 12644 deletions

View File

@ -35,12 +35,3 @@ jobs:
- name: Release Kernel
run: ./release.sh
- name: Purge cache
uses: strrife/cloudflare-chunked-purge-action@master
env:
# Zone is required by both authentication methods
CLOUDFLARE_ZONE: ${{ secrets.CLOUDFLARE_ZONE }}
CLOUDFLARE_TOKEN: ${{ secrets.CLOUDFLARE_TOKEN }}
PURGE_URLS: ${{ vars.PURGE_URLS }}

View File

@ -1 +1 @@
6.8.3
6.10

289
config

File diff suppressed because it is too large Load Diff

View File

@ -1,21 +1,22 @@
From 37fd243d8f075b558f54a36fc85887269310709c Mon Sep 17 00:00:00 2001
From: Piotr Gorski <lucjan.lucjanov@gmail.com>
Date: Tue, 26 Mar 2024 08:11:18 +0100
From fea4a499d6783faff756fe852c645f90aa73ccf7 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 15 Jul 2024 13:57:19 +0200
Subject: [PATCH] bore-cachy
Signed-off-by: Piotr Gorski <lucjan.lucjanov@gmail.com>
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
include/linux/sched.h | 10 ++
init/Kconfig | 17 +++
kernel/sched/core.c | 144 +++++++++++++++++++++++++
kernel/sched/debug.c | 60 ++++++++++-
kernel/sched/fair.c | 231 +++++++++++++++++++++++++++++++++++++---
kernel/sched/features.h | 4 +
kernel/sched/sched.h | 7 ++
7 files changed, 457 insertions(+), 16 deletions(-)
kernel/Kconfig.hz | 16 +++
kernel/sched/core.c | 143 ++++++++++++++++++
kernel/sched/debug.c | 60 +++++++-
kernel/sched/fair.c | 310 ++++++++++++++++++++++++++++++++++++----
kernel/sched/features.h | 22 ++-
kernel/sched/sched.h | 7 +
8 files changed, 555 insertions(+), 30 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ffe8f618a..0ab0b0424 100644
index a5f4b48fca18..df62c56b13ae 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -547,6 +547,16 @@ struct sched_entity {
@ -36,10 +37,10 @@ index ffe8f618a..0ab0b0424 100644
u64 slice;
diff --git a/init/Kconfig b/init/Kconfig
index 9ea39297f..f9bb5401f 100644
index 3ba6142f2f42..2966dec64df7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1299,6 +1299,23 @@ config CHECKPOINT_RESTORE
@@ -1303,6 +1303,23 @@ config CHECKPOINT_RESTORE
If unsure, say N here.
@ -63,16 +64,41 @@ index 9ea39297f..f9bb5401f 100644
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
select CGROUPS
diff --git a/kernel/Kconfig.hz b/kernel/Kconfig.hz
index 0f78364efd4f..b50189ee5b93 100644
--- a/kernel/Kconfig.hz
+++ b/kernel/Kconfig.hz
@@ -79,5 +79,21 @@ config HZ
default 750 if HZ_750
default 1000 if HZ_1000
+config MIN_BASE_SLICE_NS
+ int "Default value for min_base_slice_ns"
+ default 2000000
+ help
+ The BORE Scheduler automatically calculates the optimal base
+ slice for the configured HZ using the following equation:
+
+ base_slice_ns = max(min_base_slice_ns, 1000000000/HZ)
+
+ This option sets the default lower bound limit of the base slice
+ to prevent the loss of task throughput due to overscheduling.
+
+ Setting this value too high can cause the system to boot with
+ an unnecessarily large base slice, resulting in high scheduling
+ latency and poor system responsiveness.
+
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9116bcc90..fc3d7b48e 100644
index 59ce0841eb1f..c5d10b464779 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4507,6 +4507,139 @@ int wake_up_state(struct task_struct *p, unsigned int state)
@@ -4515,6 +4515,138 @@ int wake_up_state(struct task_struct *p, unsigned int state)
return try_to_wake_up(p, state, 0);
}
+#ifdef CONFIG_SCHED_BORE
+extern bool sched_bore;
+extern u8 sched_burst_fork_atavistic;
+extern uint sched_burst_cache_lifetime;
+
@ -85,7 +111,7 @@ index 9116bcc90..fc3d7b48e 100644
+ init_task.se.child_burst_last_cached = 0;
+}
+
+void inline sched_fork_bore(struct task_struct *p) {
+inline void sched_fork_bore(struct task_struct *p) {
+ p->se.burst_time = 0;
+ p->se.curr_burst_penalty = 0;
+ p->se.burst_score = 0;
@ -207,7 +233,7 @@ index 9116bcc90..fc3d7b48e 100644
/*
* Perform scheduler related setup for a newly forked process p.
* p is forked by current.
@@ -4523,6 +4656,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
@@ -4531,6 +4663,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.prev_sum_exec_runtime = 0;
p->se.nr_migrations = 0;
p->se.vruntime = 0;
@ -217,7 +243,7 @@ index 9116bcc90..fc3d7b48e 100644
p->se.vlag = 0;
p->se.slice = sysctl_sched_base_slice;
INIT_LIST_HEAD(&p->se.group_node);
@@ -4839,6 +4975,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
@@ -4846,6 +4981,9 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
void sched_post_fork(struct task_struct *p)
{
@ -227,20 +253,20 @@ index 9116bcc90..fc3d7b48e 100644
uclamp_post_fork(p);
}
@@ -9910,6 +10049,11 @@ void __init sched_init(void)
@@ -9933,6 +10071,11 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class != &stop_sched_class + 1);
#endif
+#ifdef CONFIG_SCHED_BORE
+ sched_init_bore();
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.0.3 by Masahito Suzuki");
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 5.2.5 by Masahito Suzuki");
+#endif // CONFIG_SCHED_BORE
+
wait_bit_init();
#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 8d5d98a58..b17861261 100644
index c1eb9a1afd13..e2da8d773877 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -167,7 +167,52 @@ static const struct file_operations sched_feat_fops = {
@ -326,7 +352,7 @@ index 8d5d98a58..b17861261 100644
debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
@@ -595,6 +647,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
@@ -596,6 +648,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
@ -336,7 +362,7 @@ index 8d5d98a58..b17861261 100644
#ifdef CONFIG_NUMA_BALANCING
SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
#endif
@@ -1068,6 +1123,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
@@ -1069,6 +1124,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.load.weight);
#ifdef CONFIG_SMP
@ -347,7 +373,7 @@ index 8d5d98a58..b17861261 100644
P(se.avg.runnable_sum);
P(se.avg.util_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fc0a9de42..ae55f46a8 100644
index c2bb8eb1d6ba..9e8b220f27e6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@
@ -360,7 +386,7 @@ index fc0a9de42..ae55f46a8 100644
*/
#include <linux/energy_model.h>
#include <linux/mmap_lock.h>
@@ -64,28 +67,125 @@
@@ -64,28 +67,126 @@
* SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
* SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
*
@ -388,7 +414,7 @@ index fc0a9de42..ae55f46a8 100644
+#ifdef CONFIG_SCHED_BORE
+unsigned int sysctl_sched_base_slice = 1000000000ULL / HZ;
+static unsigned int configured_sched_base_slice = 1000000000ULL / HZ;
+unsigned int sysctl_sched_min_base_slice = 2000000ULL;
+unsigned int sysctl_sched_min_base_slice = CONFIG_MIN_BASE_SLICE_NS;
+#else // !CONFIG_SCHED_BORE
unsigned int sysctl_sched_base_slice = 750000ULL;
static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
@ -409,7 +435,8 @@ index fc0a9de42..ae55f46a8 100644
+u8 __read_mostly sched_burst_penalty_offset = 22;
+uint __read_mostly sched_burst_penalty_scale = 1280;
+uint __read_mostly sched_burst_cache_lifetime = 60000000;
+static int __maybe_unused thirty_two = 32;
+uint __read_mostly sched_deadline_boost_mask = 0x81; // ENQUEUE_INITIAL | ENQUEUE_WAKEUP
+uint __read_mostly sched_deadline_preserve_mask = 0x42; // ENQUEUE_RESTORE | ENQUEUE_MIGRATED
+static int __maybe_unused sixty_four = 64;
+static int __maybe_unused maxval_12_bits = 4095;
+
@ -495,9 +522,9 @@ index fc0a9de42..ae55f46a8 100644
+}
+#endif // CONFIG_SCHED_BORE
int sched_thermal_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str)
@@ -136,12 +236,8 @@ int __weak arch_asym_cpu_priority(int cpu)
{
@@ -130,12 +231,8 @@ int __weak arch_asym_cpu_priority(int cpu)
*
* (default: 5 msec, units: microseconds)
*/
@ -510,7 +537,7 @@ index fc0a9de42..ae55f46a8 100644
#ifdef CONFIG_NUMA_BALANCING
/* Restrict the NUMA promotion throughput (MB/s) for each target node. */
@@ -150,6 +246,69 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
@@ -144,6 +241,83 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
#ifdef CONFIG_SYSCTL
static struct ctl_table sched_fair_sysctls[] = {
@ -576,11 +603,25 @@ index fc0a9de42..ae55f46a8 100644
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+ {
+ .procname = "sched_deadline_boost_mask",
+ .data = &sched_deadline_boost_mask,
+ .maxlen = sizeof(uint),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+ {
+ .procname = "sched_deadline_preserve_mask",
+ .data = &sched_deadline_preserve_mask,
+ .maxlen = sizeof(uint),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+#endif // CONFIG_SCHED_BORE
#ifdef CONFIG_CFS_BANDWIDTH
{
.procname = "sched_cfs_bandwidth_slice_us",
@@ -208,6 +367,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
@@ -201,6 +375,13 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
*
* This idea comes from the SD scheduler of Con Kolivas:
*/
@ -594,7 +635,7 @@ index fc0a9de42..ae55f46a8 100644
static unsigned int get_update_sysctl_factor(void)
{
unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
@@ -238,6 +404,7 @@ static void update_sysctl(void)
@@ -231,6 +412,7 @@ static void update_sysctl(void)
SET_SYSCTL(sched_base_slice);
#undef SET_SYSCTL
}
@ -602,17 +643,93 @@ index fc0a9de42..ae55f46a8 100644
void __init sched_init_granularity(void)
{
@@ -717,6 +884,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
lag = avg_vruntime(cfs_rq) - se->vruntime;
@@ -708,6 +890,9 @@ static s64 entity_lag(u64 avruntime, struct sched_entity *se)
vlag = avruntime - se->vruntime;
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
+#ifdef CONFIG_SCHED_BORE
+ limit >>= 1;
+#endif // CONFIG_SCHED_BORE
se->vlag = clamp(lag, -limit, limit);
return clamp(vlag, -limit, limit);
}
@@ -868,6 +1053,39 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
return __node_2_se(left);
}
@@ -968,6 +1138,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
+static inline bool pick_curr(struct cfs_rq *cfs_rq,
+ struct sched_entity *curr, struct sched_entity *wakee)
+{
+ /*
+ * Nothing to preserve...
+ */
+ if (!curr || !sched_feat(RESPECT_SLICE))
+ return false;
+
+ /*
+ * Allow preemption at the 0-lag point -- even if not all of the slice
+ * is consumed. Note: placement of positive lag can push V left and render
+ * @curr instantly ineligible irrespective the time on-cpu.
+ */
+ if (sched_feat(RUN_TO_PARITY) && !entity_eligible(cfs_rq, curr))
+ return false;
+
+ /*
+ * Don't preserve @curr when the @wakee has a shorter slice and earlier
+ * deadline. IOW, explicitly allow preemption.
+ */
+ if (sched_feat(PREEMPT_SHORT) && wakee &&
+ wakee->slice < curr->slice &&
+ (s64)(wakee->deadline - curr->deadline) < 0)
+ return false;
+
+ /*
+ * Preserve @curr to allow it to finish its first slice.
+ * See the HACK in set_next_entity().
+ */
+ return curr->vlag == curr->deadline;
+}
+
/*
* Earliest Eligible Virtual Deadline First
*
@@ -887,28 +1105,27 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
*
* Which allows tree pruning through eligibility.
*/
-static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
+static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq, struct sched_entity *wakee)
{
struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
struct sched_entity *se = __pick_first_entity(cfs_rq);
struct sched_entity *curr = cfs_rq->curr;
struct sched_entity *best = NULL;
+ if (curr && !curr->on_rq)
+ curr = NULL;
+
/*
* We can safely skip eligibility check if there is only one entity
* in this cfs_rq, saving some cycles.
*/
if (cfs_rq->nr_running == 1)
- return curr && curr->on_rq ? curr : se;
-
- if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
- curr = NULL;
+ return curr ?: se;
/*
- * Once selected, run a task until it either becomes non-eligible or
- * until it gets a new slice. See the HACK in set_next_entity().
+ * Preserve @curr to let it finish its slice.
*/
- if (sched_feat(RUN_TO_PARITY) && curr && curr->vlag == curr->deadline)
+ if (pick_curr(cfs_rq, curr, wakee))
return curr;
/* Pick the leftmost entity if it's eligible */
@@ -967,6 +1184,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
* Scheduling class statistics methods:
*/
#ifdef CONFIG_SMP
@ -620,7 +737,7 @@ index fc0a9de42..ae55f46a8 100644
int sched_update_scaling(void)
{
unsigned int factor = get_update_sysctl_factor();
@@ -979,6 +1150,7 @@ int sched_update_scaling(void)
@@ -978,6 +1196,7 @@ int sched_update_scaling(void)
return 0;
}
@ -628,7 +745,7 @@ index fc0a9de42..ae55f46a8 100644
#endif
#endif
@@ -1178,7 +1350,13 @@ static void update_curr(struct cfs_rq *cfs_rq)
@@ -1178,7 +1397,13 @@ static void update_curr(struct cfs_rq *cfs_rq)
if (unlikely(delta_exec <= 0))
return;
@ -642,7 +759,19 @@ index fc0a9de42..ae55f46a8 100644
update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);
@@ -5184,6 +5362,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -5193,6 +5418,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
s64 lag = 0;
se->slice = sysctl_sched_base_slice;
+#ifdef CONFIG_SCHED_BORE
+ if (flags & ~sched_deadline_boost_mask & sched_deadline_preserve_mask)
+ vslice = se->deadline - se->vruntime;
+ else
+#endif // CONFIG_SCHED_BORE
vslice = calc_delta_fair(se->slice, se);
/*
@@ -5203,6 +5433,9 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*
* EEVDF: placement strategy #1 / #2
*/
@ -652,7 +781,28 @@ index fc0a9de42..ae55f46a8 100644
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
@@ -6816,6 +6997,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
@@ -5278,7 +5511,11 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* on average, halfway through their slice, as such start tasks
* off with half a slice to ease into the competition.
*/
+#if !defined(CONFIG_SCHED_BORE)
if (sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL))
+#else // CONFIG_SCHED_BORE
+ if (flags & sched_deadline_boost_mask)
+#endif // CONFIG_SCHED_BORE
vslice /= 2;
/*
@@ -5492,7 +5729,7 @@ pick_next_entity(struct cfs_rq *cfs_rq)
cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next))
return cfs_rq->next;
- return pick_eevdf(cfs_rq);
+ return pick_eevdf(cfs_rq, NULL);
}
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -6860,6 +7097,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bool was_sched_idle = sched_idle_rq(rq);
util_est_dequeue(&rq->cfs, p);
@ -667,7 +817,19 @@ index fc0a9de42..ae55f46a8 100644
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
@@ -8565,16 +8754,25 @@ static void yield_task_fair(struct rq *rq)
@@ -8425,10 +8670,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int
cfs_rq = cfs_rq_of(se);
update_curr(cfs_rq);
- /*
- * XXX pick_eevdf(cfs_rq) != se ?
- */
- if (pick_eevdf(cfs_rq) == pse)
+ if (pick_eevdf(cfs_rq, pse) == pse)
goto preempt;
return;
@@ -8646,16 +8888,25 @@ static void yield_task_fair(struct rq *rq)
/*
* Are we the only task in the tree?
*/
@ -693,7 +855,7 @@ index fc0a9de42..ae55f46a8 100644
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
@@ -12664,6 +12862,9 @@ static void task_fork_fair(struct task_struct *p)
@@ -12723,6 +12974,9 @@ static void task_fork_fair(struct task_struct *p)
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
@ -704,26 +866,44 @@ index fc0a9de42..ae55f46a8 100644
rq_unlock(rq, &rf);
}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 143f55df8..3f0fe409f 100644
index 143f55df890b..3aad8900c35e 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -6,7 +6,11 @@
@@ -5,8 +5,28 @@
* sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled.
*/
SCHED_FEAT(PLACE_LAG, true)
+/*
+ * Give new tasks half a slice to ease into the competition.
+ */
+#if !defined(CONFIG_SCHED_BORE)
SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
+#ifdef CONFIG_SCHED_BORE
+SCHED_FEAT(RUN_TO_PARITY, false)
+#else // !CONFIG_SCHED_BORE
SCHED_FEAT(RUN_TO_PARITY, true)
-SCHED_FEAT(RUN_TO_PARITY, true)
+#endif // CONFIG_SCHED_BORE
+/*
+ * Inhibit (wakeup) preemption until the current task has exhausted its slice.
+ */
+#ifdef CONFIG_SCHED_BORE
+SCHED_FEAT(RESPECT_SLICE, false)
+#else // !CONFIG_SCHED_BORE
+SCHED_FEAT(RESPECT_SLICE, true)
+#endif // CONFIG_SCHED_BORE
+/*
+ * Relax RESPECT_SLICE to allow preemption once current has reached 0-lag.
+ */
+SCHED_FEAT(RUN_TO_PARITY, false)
+/*
+ * Allow tasks with a shorter slice to disregard RESPECT_SLICE
+ */
+SCHED_FEAT(PREEMPT_SHORT, true)
/*
* Prefer to schedule the task we woke last (assuming it failed
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ed5c758c7..9d62372ae 100644
index 10c1caff5e06..5d845dbd0cf9 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1965,7 +1965,11 @@ static inline void dirty_sched_domain_sysctl(int cpu)
@@ -1969,7 +1969,11 @@ static inline void dirty_sched_domain_sysctl(int cpu)
}
#endif
@ -735,7 +915,7 @@ index ed5c758c7..9d62372ae 100644
static inline const struct cpumask *task_user_cpus(struct task_struct *p)
{
@@ -2552,6 +2556,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate;
@@ -2554,6 +2558,9 @@ extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_base_slice;
@ -746,4 +926,4 @@ index ed5c758c7..9d62372ae 100644
#ifdef CONFIG_SCHED_DEBUG
extern int sysctl_resched_latency_warn_ms;
--
2.43.0.232.ge79552d197
2.46.0.rc0

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,37 +1,121 @@
From d2db737a5be989688a7a5d805b7f299d0203d228 Mon Sep 17 00:00:00 2001
From eb7e13baaf58cdede50c060633bdb14bf9603a54 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Mon, 29 Jan 2024 15:09:44 +0100
Subject: [PATCH] NVIDIA: Fixup GPL issue
Date: Mon, 3 Jun 2024 15:33:26 +0200
Subject: [PATCH] Fix 6.10 NVIDIA
Co Authord by Laio Oriel Seman <laioseman@gmail.com>
Signed-off-by: Peter Jung <admin@ptr1337.dev>
---
kernel/rcu/tree_plugin.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
include/linux/mm.h | 4 ++++
mm/memory.c | 37 ++++++++++++++++++++++++++++++++++++-
mm/nommu.c | 21 +++++++++++++++++++++
3 files changed, 61 insertions(+), 1 deletion(-)
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 41021080ad25..72474d8ec180 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -406,7 +406,7 @@ void __rcu_read_lock(void)
WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
barrier(); /* critical section after entry code. */
}
-EXPORT_SYMBOL_GPL(__rcu_read_lock);
+EXPORT_SYMBOL(__rcu_read_lock);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 9849dfda44d43..adc5a252da02e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2438,6 +2438,10 @@ int
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
int follow_pte(struct vm_area_struct *vma, unsigned long address,
pte_t **ptepp, spinlock_t **ptlp);
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+ unsigned long *pfn);
+//int follow_phys(struct vm_area_struct *vma, unsigned long address,
+// unsigned int flags, unsigned long *prot, resource_size_t *phys);
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
/*
* Preemptible RCU implementation for rcu_read_unlock().
@@ -431,7 +431,7 @@ void __rcu_read_unlock(void)
WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX);
diff --git a/mm/memory.c b/mm/memory.c
index 0f47a533014e4..0401d10b3d824 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5962,7 +5962,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
* Only IO mappings and raw PFN mappings are allowed. The mmap semaphore
* should be taken for read.
*
- * This function must not be used to modify PTE content.
+ * KVM uses this function. While it is arguably less bad than ``follow_pfn``,
+ * it is not a good general-purpose API.
*
* Return: zero on success, -ve otherwise.
*/
@@ -6012,6 +6013,40 @@ int follow_pte(struct vm_area_struct *vma, unsigned long address,
}
}
-EXPORT_SYMBOL_GPL(__rcu_read_unlock);
+EXPORT_SYMBOL(__rcu_read_unlock);
EXPORT_SYMBOL_GPL(follow_pte);
/*
* Advance a ->blkd_tasks-list pointer to the next entry, instead
+/**
+ * follow_pfn - look up PFN at a user virtual address
+ * @vma: memory mapping
+ * @address: user virtual address
+ * @pfn: location to store found PFN
+ *
+ * Only IO mappings and raw PFN mappings are allowed.
+ *
+ * This function does not allow the caller to read the permissions
+ * of the PTE. Do not use it.
+ *
+ * Return: zero and the pfn at @pfn on success, -ve otherwise.
+ */
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+ unsigned long *pfn)
+{
+ int ret = -EINVAL;
+ spinlock_t *ptl;
+ pte_t *ptep;
+
+ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+ return ret;
+
+ //ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
+ ret = follow_pte(vma, address, &ptep, &ptl);
+
+ if (ret)
+ return ret;
+ *pfn = pte_pfn(ptep_get(ptep));
+ pte_unmap_unlock(ptep, ptl);
+ return 0;
+}
+EXPORT_SYMBOL(follow_pfn);
+
#ifdef CONFIG_HAVE_IOREMAP_PROT
/**
* generic_access_phys - generic implementation for iomem mmap access
diff --git a/mm/nommu.c b/mm/nommu.c
index 7296e775e04e2..8e0deb733bfef 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -110,6 +110,27 @@ unsigned int kobjsize(const void *objp)
return page_size(page);
}
+/**
+ * follow_pfn - look up PFN at a user virtual address
+ * @vma: memory mapping
+ * @address: user virtual address
+ * @pfn: location to store found PFN
+ *
+ * Only IO mappings and raw PFN mappings are allowed.
+ *
+ * Returns zero and the pfn at @pfn on success, -ve otherwise.
+ */
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+ unsigned long *pfn)
+{
+ if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+ return -EINVAL;
+
+ *pfn = address >> PAGE_SHIFT;
+ return 0;
+}
+EXPORT_SYMBOL(follow_pfn);
+
void vfree(const void *addr)
{
kfree(addr);
--
2.43.0
2.45.1
--- a/kernel/nvidia-drm/nvidia-drm-drv.c
+++ b/kernel/nvidia-drm/nvidia-drm-drv.c
@ -125,106 +209,553 @@ index 41021080ad25..72474d8ec180 100644
drm_dev_unregister(dev);
nv_drm_dev_free(dev);
From d82eb6c87ee2e05b6bbd35f703a41e68b3adc3a7 Mon Sep 17 00:00:00 2001
From: Aaron Plattner <aplattner@nvidia.com>
Date: Tue, 26 Dec 2023 11:58:46 -0800
Subject: [PATCH] nvidia-drm: Use a workqueue to defer calling
drm_kms_helper_hotplug_event
From 612740b11c9645e0f0240b3ca5908ef225763bc8 Mon Sep 17 00:00:00 2001
From: Peter Jung <admin@ptr1337.dev>
Date: Thu, 27 Jun 2024 19:46:51 +0200
Subject: [PATCH] gsp-stutter-fix
We've been having reports of stutter issues in 555 releases related to GSP enablement. On the proprietary driver, NVreg_EnableGpuFirmware=0 makes them go away; on the open driver that's not an option.
So far, we've identified two possible causes here. One is fixed by commit 674c009 below. The other we can't fix/workaround in the kernel modules and requires usermode changes, but commit 8c1c49b should tell us if that path is actually being hit or not.
I've also augmented the logs captured by nvidia-bug-report.sh with some of the info that we found severely lacking in the bug reports so far.
My hope is that folks that have experienced these stutter issues can take these patches, try to reproduce the issue and report back with their findings (and their nvidia-bug-report logs). Many thanks in advance to anyone willing to go the extra mile(s) for us here!
We've unfortunately missed beta2 / 555.52 with this stuff (security fixes can't wait), but here it is early so we don't have to wait on the next release.
---
kernel/nvidia-drm/nvidia-drm-drv.c | 24 ++++++++++++++++++++++++
kernel/nvidia-drm/nvidia-drm-encoder.c | 4 ++--
kernel/nvidia-drm/nvidia-drm-priv.h | 1 +
3 files changed, 27 insertions(+), 2 deletions(-)
kernel-open/nvidia/nv.c | 10 +
src/nvidia/arch/nvalloc/unix/include/osapi.h | 6 -
src/nvidia/arch/nvalloc/unix/src/escape.c | 46 ----
src/nvidia/arch/nvalloc/unix/src/osapi.c | 230 ++++++++-----------
src/nvidia/exports_link_command.txt | 1 -
src/nvidia/src/kernel/disp/disp_sw.c | 23 ++
6 files changed, 132 insertions(+), 184 deletions(-)
diff --git kernel/nvidia-drm/nvidia-drm-drv.c kernel/nvidia-drm/nvidia-drm-drv.c
index e0ddb6c..9f7424d 100644
--- kernel/nvidia-drm/nvidia-drm-drv.c
+++ kernel/nvidia-drm/nvidia-drm-drv.c
@@ -74,6 +74,7 @@
#endif
diff --git a/kernel-open/nvidia/nv.c b/kernel-open/nvidia/nv.c
index 99792de9..ccef3f29 100644
--- a/kernel-open/nvidia/nv.c
+++ b/kernel-open/nvidia/nv.c
@@ -4042,6 +4042,16 @@ int NV_API_CALL nv_get_event(
nvidia_event_t *nvet;
unsigned long eflags;
#include <linux/pci.h>
+#include <linux/workqueue.h>
+ //
+ // Note that the head read/write is not atomic when done outside of the
+ // spinlock, so this might not be a valid pointer at all. But if we read
+ // NULL here that means that the value indeed was NULL and we can bail
+ // early since there's no events. Otherwise, we have to do a proper read
+ // under a spinlock.
+ //
+ if (nvlfp->event_data_head == NULL)
+ return NV_ERR_GENERIC;
+
NV_SPIN_LOCK_IRQSAVE(&nvlfp->fp_lock, eflags);
/*
* Commit fcd70cd36b9b ("drm: Split out drm_probe_helper.h")
@@ -405,6 +406,27 @@ static int nv_drm_create_properties(struct nv_drm_device *nv_dev)
return 0;
nvet = nvlfp->event_data_head;
diff --git a/src/nvidia/arch/nvalloc/unix/include/osapi.h b/src/nvidia/arch/nvalloc/unix/include/osapi.h
index f91e3aa5..640155e9 100644
--- a/src/nvidia/arch/nvalloc/unix/include/osapi.h
+++ b/src/nvidia/arch/nvalloc/unix/include/osapi.h
@@ -121,9 +121,6 @@ NvBool RmGpuHasIOSpaceEnabled (nv_state_t *);
void RmFreeUnusedClients (nv_state_t *, nv_file_private_t *);
NV_STATUS RmIoctl (nv_state_t *, nv_file_private_t *, NvU32, void *, NvU32);
-NV_STATUS RmAllocOsEvent (NvHandle, nv_file_private_t *, NvU32);
-NV_STATUS RmFreeOsEvent (NvHandle, NvU32);
-
void RmI2cAddGpuPorts(nv_state_t *);
NV_STATUS RmInitX86EmuState(OBJGPU *);
@@ -141,9 +138,6 @@ int amd_msr_c0011022_incompatible(OBJOS *);
NV_STATUS rm_get_adapter_status (nv_state_t *, NvU32 *);
-NV_STATUS rm_alloc_os_event (NvHandle, nv_file_private_t *, NvU32);
-NV_STATUS rm_free_os_event (NvHandle, NvU32);
-NV_STATUS rm_get_event_data (nv_file_private_t *, NvP64, NvU32 *);
void rm_client_free_os_events (NvHandle);
NV_STATUS rm_create_mmap_context (NvHandle, NvHandle, NvHandle, NvP64, NvU64, NvU64, NvU32, NvU32);
diff --git a/src/nvidia/arch/nvalloc/unix/src/escape.c b/src/nvidia/arch/nvalloc/unix/src/escape.c
index de099513..1046b19f 100644
--- a/src/nvidia/arch/nvalloc/unix/src/escape.c
+++ b/src/nvidia/arch/nvalloc/unix/src/escape.c
@@ -677,52 +677,6 @@ NV_STATUS RmIoctl(
break;
}
+#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
+/*
+ * We can't just call drm_kms_helper_hotplug_event directly because
+ * fbdev_generic may attempt to set a mode from inside the hotplug event
+ * handler. Because kapi event handling runs on nvkms_kthread_q, this blocks
+ * other event processing including the flip completion notifier expected by
+ * nv_drm_atomic_commit.
+ *
+ * Defer hotplug event handling to a work item so that nvkms_kthread_q can
+ * continue processing events while a DRM modeset is in progress.
+ */
+static void nv_drm_handle_hotplug_event(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct nv_drm_device *nv_dev =
+ container_of(dwork, struct nv_drm_device, hotplug_event_work);
+
+ drm_kms_helper_hotplug_event(nv_dev->dev);
+}
+#endif
+
static int nv_drm_load(struct drm_device *dev, unsigned long flags)
- case NV_ESC_ALLOC_OS_EVENT:
- {
- nv_ioctl_alloc_os_event_t *pApi = data;
-
- if (dataSize != sizeof(nv_ioctl_alloc_os_event_t))
- {
- rmStatus = NV_ERR_INVALID_ARGUMENT;
- goto done;
- }
-
- pApi->Status = rm_alloc_os_event(pApi->hClient,
- nvfp,
- pApi->fd);
- break;
- }
-
- case NV_ESC_FREE_OS_EVENT:
- {
- nv_ioctl_free_os_event_t *pApi = data;
-
- if (dataSize != sizeof(nv_ioctl_free_os_event_t))
- {
- rmStatus = NV_ERR_INVALID_ARGUMENT;
- goto done;
- }
-
- pApi->Status = rm_free_os_event(pApi->hClient, pApi->fd);
- break;
- }
-
- case NV_ESC_RM_GET_EVENT_DATA:
- {
- NVOS41_PARAMETERS *pApi = data;
-
- if (dataSize != sizeof(NVOS41_PARAMETERS))
- {
- rmStatus = NV_ERR_INVALID_ARGUMENT;
- goto done;
- }
-
- pApi->status = rm_get_event_data(nvfp,
- pApi->pEvent,
- &pApi->MoreEvents);
- break;
- }
-
case NV_ESC_STATUS_CODE:
{
#if defined(NV_DRM_ATOMIC_MODESET_AVAILABLE)
@@ -540,6 +562,7 @@ static int nv_drm_load(struct drm_device *dev, unsigned long flags)
nv_state_t *pNv;
diff --git a/src/nvidia/arch/nvalloc/unix/src/osapi.c b/src/nvidia/arch/nvalloc/unix/src/osapi.c
index fd312466..51249750 100644
--- a/src/nvidia/arch/nvalloc/unix/src/osapi.c
+++ b/src/nvidia/arch/nvalloc/unix/src/osapi.c
@@ -25,6 +25,7 @@
/* Enable event handling */
+ INIT_DELAYED_WORK(&nv_dev->hotplug_event_work, nv_drm_handle_hotplug_event);
atomic_set(&nv_dev->enable_event_handling, true);
init_waitqueue_head(&nv_dev->flip_event_wq);
@@ -567,6 +590,7 @@ static void __nv_drm_unload(struct drm_device *dev)
return;
#include <nv_ref.h>
#include <nv.h>
+#include <nv_escape.h>
#include <nv-priv.h>
#include <os/os.h>
#include <osapi.h>
@@ -406,6 +407,39 @@ static void free_os_events(
portSyncSpinlockRelease(nv->event_spinlock);
}
+ cancel_delayed_work_sync(&nv_dev->hotplug_event_work);
mutex_lock(&nv_dev->lock);
WARN_ON(nv_dev->subOwnershipGranted);
diff --git kernel/nvidia-drm/nvidia-drm-encoder.c kernel/nvidia-drm/nvidia-drm-encoder.c
index b5ef5a2..7c0c119 100644
--- kernel/nvidia-drm/nvidia-drm-encoder.c
+++ kernel/nvidia-drm/nvidia-drm-encoder.c
@@ -300,7 +300,7 @@ void nv_drm_handle_display_change(struct nv_drm_device *nv_dev,
nv_drm_connector_mark_connection_status_dirty(nv_encoder->nv_connector);
- drm_kms_helper_hotplug_event(dev);
+ schedule_delayed_work(&nv_dev->hotplug_event_work, 0);
+static NV_STATUS get_os_event_data(
+ nv_file_private_t *nvfp,
+ NvP64 pEvent,
+ NvU32 *MoreEvents
+)
+{
+ nv_event_t nv_event;
+ NvUnixEvent *nv_unix_event;
+ NV_STATUS status;
+
+ status = os_alloc_mem((void**)&nv_unix_event, sizeof(NvUnixEvent));
+ if (status != NV_OK)
+ return status;
+
+ status = nv_get_event(nvfp, &nv_event, MoreEvents);
+ if (status != NV_OK)
+ {
+ status = NV_ERR_OPERATING_SYSTEM;
+ goto done;
+ }
+
+ os_mem_set(nv_unix_event, 0, sizeof(NvUnixEvent));
+ nv_unix_event->hObject = nv_event.hObject;
+ nv_unix_event->NotifyIndex = nv_event.index;
+ nv_unix_event->info32 = nv_event.info32;
+ nv_unix_event->info16 = nv_event.info16;
+
+ status = os_memcpy_to_user(NvP64_VALUE(pEvent), nv_unix_event, sizeof(NvUnixEvent));
+done:
+ os_free_mem(nv_unix_event);
+ return status;
+}
+
void rm_client_free_os_events(
NvHandle client
)
@@ -482,6 +516,12 @@ static NV_STATUS allocate_os_event(
goto done;
}
void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
@@ -347,6 +347,6 @@ void nv_drm_handle_dynamic_display_connected(struct nv_drm_device *nv_dev,
drm_reinit_primary_mode_group(dev);
#endif
+ new_event->hParent = hParent;
+ new_event->nvfp = nvfp;
+ new_event->fd = fd;
+ new_event->active = NV_TRUE;
+ new_event->refcount = 0;
+
portSyncSpinlockAcquire(nv->event_spinlock);
for (event = nv->event_list; event; event = event->next)
{
@@ -496,45 +536,26 @@ static NV_STATUS allocate_os_event(
- drm_kms_helper_hotplug_event(dev);
+ schedule_delayed_work(&nv_dev->hotplug_event_work, 0);
new_event->next = nv->event_list;
nv->event_list = new_event;
+ nvfp->bCleanupRmapi = NV_TRUE;
portSyncSpinlockRelease(nv->event_spinlock);
done:
if (status == NV_OK)
{
- new_event->hParent = hParent;
- new_event->nvfp = nvfp;
- new_event->fd = fd;
- new_event->active = NV_TRUE;
- new_event->refcount = 0;
-
- nvfp->bCleanupRmapi = NV_TRUE;
-
NV_PRINTF(LEVEL_INFO, "allocated OS event:\n");
NV_PRINTF(LEVEL_INFO, " hParent: 0x%x\n", hParent);
NV_PRINTF(LEVEL_INFO, " fd: %d\n", fd);
}
else
{
+ NV_PRINTF(LEVEL_ERROR, "failed to allocate OS event: 0x%08x\n", status);
+ status = NV_ERR_INSUFFICIENT_RESOURCES;
portMemFree(new_event);
}
#endif
diff --git kernel/nvidia-drm/nvidia-drm-priv.h kernel/nvidia-drm/nvidia-drm-priv.h
index 253155f..c9ce727 100644
--- kernel/nvidia-drm/nvidia-drm-priv.h
+++ kernel/nvidia-drm/nvidia-drm-priv.h
@@ -126,6 +126,7 @@ struct nv_drm_device {
NvU64 modifiers[6 /* block linear */ + 1 /* linear */ + 1 /* terminator */];
#endif
+ struct delayed_work hotplug_event_work;
atomic_t enable_event_handling;
return status;
}
/**
-NV_STATUS RmAllocOsEvent(
- NvHandle hParent,
- nv_file_private_t *nvfp,
- NvU32 fd
-)
-{
- if (NV_OK != allocate_os_event(hParent, nvfp, fd))
- {
- NV_PRINTF(LEVEL_ERROR, "failed to allocate OS event\n");
- return NV_ERR_INSUFFICIENT_RESOURCES;
- }
- return NV_OK;
-}
-
static NV_STATUS free_os_event(
NvHandle hParent,
NvU32 fd
@@ -585,18 +606,6 @@ static NV_STATUS free_os_event(
return result;
}
-NV_STATUS RmFreeOsEvent(
- NvHandle hParent,
- NvU32 fd
-)
-{
- if (NV_OK != free_os_event(hParent, fd))
- {
- return NV_ERR_INVALID_EVENT;
- }
- return NV_OK;
-}
-
static void RmExecuteWorkItem(
void *pWorkItem
)
@@ -656,40 +665,6 @@ done:
portMemFree((void *)pWi);
}
-static NV_STATUS RmGetEventData(
- nv_file_private_t *nvfp,
- NvP64 pEvent,
- NvU32 *MoreEvents,
- NvBool bUserModeArgs
-)
-{
- NV_STATUS RmStatus;
- NvUnixEvent *pKernelEvent = NULL;
- nv_event_t nv_event;
- RMAPI_PARAM_COPY paramCopy;
-
- RmStatus = nv_get_event(nvfp, &nv_event, MoreEvents);
- if (RmStatus != NV_OK)
- return NV_ERR_OPERATING_SYSTEM;
-
- // setup for access to client's parameters
- RMAPI_PARAM_COPY_INIT(paramCopy, pKernelEvent, pEvent, 1, sizeof(NvUnixEvent));
- RmStatus = rmapiParamsAcquire(&paramCopy, bUserModeArgs);
- if (RmStatus != NV_OK)
- return NV_ERR_OPERATING_SYSTEM;
-
- pKernelEvent->hObject = nv_event.hObject;
- pKernelEvent->NotifyIndex = nv_event.index;
- pKernelEvent->info32 = nv_event.info32;
- pKernelEvent->info16 = nv_event.info16;
-
- // release client buffer access, with copyout as needed
- if (rmapiParamsRelease(&paramCopy) != NV_OK)
- return NV_ERR_OPERATING_SYSTEM;
-
- return NV_OK;
-}
-
static NV_STATUS RmAccessRegistry(
NvHandle hClient,
NvHandle hObject,
@@ -2738,16 +2713,68 @@ NV_STATUS NV_API_CALL rm_ioctl(
NvU32 dataSize
)
{
- NV_STATUS rmStatus;
+ NV_STATUS rmStatus = NV_OK;
THREAD_STATE_NODE threadState;
void *fp;
NV_ENTER_RM_RUNTIME(sp,fp);
- threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
- rmStatus = RmIoctl(pNv, nvfp, Command, pData, dataSize);
+ //
+ // Some ioctls are handled entirely inside the OS layer and don't need to
+ // suffer the overhead of calling into RM core.
+ //
+ switch (Command)
+ {
+ case NV_ESC_ALLOC_OS_EVENT:
+ {
+ nv_ioctl_alloc_os_event_t *pApi = pData;
+
+ if (dataSize != sizeof(nv_ioctl_alloc_os_event_t))
+ {
+ rmStatus = NV_ERR_INVALID_ARGUMENT;
+ break;
+ }
+
+ pApi->Status = allocate_os_event(pApi->hClient, nvfp, pApi->fd);
+ break;
+ }
+ case NV_ESC_FREE_OS_EVENT:
+ {
+ nv_ioctl_free_os_event_t *pApi = pData;
+
+ if (dataSize != sizeof(nv_ioctl_free_os_event_t))
+ {
+ rmStatus = NV_ERR_INVALID_ARGUMENT;
+ break;
+ }
+
+ pApi->Status = free_os_event(pApi->hClient, pApi->fd);
+ break;
+ }
+ case NV_ESC_RM_GET_EVENT_DATA:
+ {
+ NVOS41_PARAMETERS *pApi = pData;
+
+ if (dataSize != sizeof(NVOS41_PARAMETERS))
+ {
+ rmStatus = NV_ERR_INVALID_ARGUMENT;
+ break;
+ }
+
+ pApi->status = get_os_event_data(nvfp,
+ pApi->pEvent,
+ &pApi->MoreEvents);
+ break;
+ }
+ default:
+ {
+ threadStateInit(&threadState, THREAD_STATE_FLAGS_NONE);
+ rmStatus = RmIoctl(pNv, nvfp, Command, pData, dataSize);
+ threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
+ break;
+ }
+ }
- threadStateFree(&threadState, THREAD_STATE_FLAGS_NONE);
NV_EXIT_RM_RUNTIME(sp,fp);
return rmStatus;
@@ -2882,65 +2909,6 @@ void NV_API_CALL rm_unbind_lock(
NV_EXIT_RM_RUNTIME(sp,fp);
}
-NV_STATUS rm_alloc_os_event(
- NvHandle hClient,
- nv_file_private_t *nvfp,
- NvU32 fd
-)
-{
- NV_STATUS RmStatus;
-
- // LOCK: acquire API lock
- if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
- {
- RmStatus = RmAllocOsEvent(hClient, nvfp, fd);
-
- // UNLOCK: release API lock
- rmapiLockRelease();
- }
-
- return RmStatus;
-}
-
-NV_STATUS rm_free_os_event(
- NvHandle hClient,
- NvU32 fd
-)
-{
- NV_STATUS RmStatus;
-
- // LOCK: acquire API lock
- if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
- {
- RmStatus = RmFreeOsEvent(hClient, fd);
-
- // UNLOCK: release API lock
- rmapiLockRelease();
- }
-
- return RmStatus;
-}
-
-NV_STATUS rm_get_event_data(
- nv_file_private_t *nvfp,
- NvP64 pEvent,
- NvU32 *MoreEvents
-)
-{
- NV_STATUS RmStatus;
-
- // LOCK: acquire API lock
- if ((RmStatus = rmapiLockAcquire(RMAPI_LOCK_FLAGS_READ, RM_LOCK_MODULES_EVENT)) == NV_OK)
- {
- RmStatus = RmGetEventData(nvfp, pEvent, MoreEvents, NV_TRUE);
-
- // UNLOCK: release API lock
- rmapiLockRelease();
- }
-
- return RmStatus;
-}
-
NV_STATUS NV_API_CALL rm_read_registry_dword(
nvidia_stack_t *sp,
nv_state_t *nv,
diff --git a/src/nvidia/exports_link_command.txt b/src/nvidia/exports_link_command.txt
index de3cf86d..b92185de 100644
--- a/src/nvidia/exports_link_command.txt
+++ b/src/nvidia/exports_link_command.txt
@@ -1,6 +1,5 @@
--undefined=rm_disable_adapter
--undefined=rm_execute_work_item
---undefined=rm_free_os_event
--undefined=rm_free_private_state
--undefined=rm_cleanup_file_private
--undefined=rm_unbind_lock
diff --git a/src/nvidia/src/kernel/disp/disp_sw.c b/src/nvidia/src/kernel/disp/disp_sw.c
index 03ce58f7..bb7396b6 100644
--- a/src/nvidia/src/kernel/disp/disp_sw.c
+++ b/src/nvidia/src/kernel/disp/disp_sw.c
@@ -141,8 +141,15 @@ NV_STATUS dispswReleaseSemaphoreAndNotifierFill
NvBool bFound = NV_FALSE;
NV_STATUS status;
+#define PRINT_INTERVAL 3600 // At 60Hz, this will emit about once per minute.
+
if (flags & F_SEMAPHORE_ADDR_VALID)
{
+ static NvU64 counter;
+ if ((++counter % PRINT_INTERVAL) == 0) {
+ NV_PRINTF(LEVEL_ERROR, "XXXMT: NVRM debugging - F_SEMAPHORE_ADDR_VALID = %llu\n", counter);
+ }
+
bFound = CliGetDmaMappingInfo(RES_GET_CLIENT(pDevice),
RES_GET_HANDLE(pDevice),
vaSpace,
@@ -154,6 +161,11 @@ NV_STATUS dispswReleaseSemaphoreAndNotifierFill
}
else if (flags & F_SEMAPHORE_RELEASE)
{
+ static NvU64 counter;
+ if ((++counter % PRINT_INTERVAL) == 0) {
+ NV_PRINTF(LEVEL_ERROR, "XXXMT: NVRM debugging - F_SEMAPHORE_RELEASE = %llu\n", counter);
+ }
+
status = semaphoreFillGPUVA(pGpu,
pDevice,
vaSpace,
@@ -165,6 +177,11 @@ NV_STATUS dispswReleaseSemaphoreAndNotifierFill
}
else if (flags & F_NOTIFIER_FILL)
{
+ static NvU64 counter;
+ if ((++counter % PRINT_INTERVAL) == 0) {
+ NV_PRINTF(LEVEL_ERROR, "XXXMT: NVRM debugging - F_NOTIFIER_FILL = %llu\n", counter);
+ }
+
status = notifyFillNotifierGPUVA(pGpu,
pDevice,
vaSpace,
@@ -175,5 +192,11 @@ NV_STATUS dispswReleaseSemaphoreAndNotifierFill
NV9072_NOTIFIERS_NOTIFY_ON_VBLANK /* Index */);
return status;
}
+ else {
+ static NvU64 counter;
+ if ((++counter % PRINT_INTERVAL) == 0) {
+ NV_PRINTF(LEVEL_ERROR, "XXXMT: NVRM debugging - ??? 0x%08x = %llu\n", flags, counter);
+ }
+ }
return NV9072_NOTIFICATION_STATUS_DONE_SUCCESS;
}
--
2.43.0
2.45.2
--- a/nvidia-drm/nvidia-drm-linux.c
+++ b/nvidia-drm/nvidia-drm-linux.c
@@ -31,13 +31,13 @@
MODULE_PARM_DESC(
modeset,
- "Enable atomic kernel modesetting (1 = enable, 0 = disable (default))");
+ "Enable atomic kernel modesetting (1 = enable (default), 0 = disable)");
module_param_named(modeset, nv_drm_modeset_module_param, bool, 0400);
#if defined(NV_DRM_FBDEV_GENERIC_AVAILABLE)
MODULE_PARM_DESC(
fbdev,
- "Create a framebuffer device (1 = enable, 0 = disable (default)) (EXPERIMENTAL)");
+ "Create a framebuffer device (1 = enable (default), 0 = disable) (EXPERIMENTAL)");
module_param_named(fbdev, nv_drm_fbdev_module_param, bool, 0400);
#endif
--- a/nvidia-drm/nvidia-drm-os-interface.c
+++ b/nvidia-drm/nvidia-drm-os-interface.c
@@ -41,8 +41,8 @@
#include <drm/drmP.h>
#endif
-bool nv_drm_modeset_module_param = false;
-bool nv_drm_fbdev_module_param = false;
+bool nv_drm_modeset_module_param = true;
+bool nv_drm_fbdev_module_param = true;
void *nv_drm_calloc(size_t nmemb, size_t size)
{
--- a/src/nvidia-modeset/Makefile
+++ b/src/nvidia-modeset/Makefile
@@ -142,6 +142,7 @@ ifeq ($(TARGET_ARCH),x86_64)
CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -fno-jump-tables)
CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch=thunk-extern)
CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mindirect-branch-register)
+ CONDITIONAL_CFLAGS += $(call TEST_CC_ARG, -mharden-sls=all)
endif
CFLAGS += $(CONDITIONAL_CFLAGS)

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,15 @@
cachyos/0001-cachyos-base-all.patch
cachyos/0001-bore-cachy.patch
cachyos/0002-ntsync.patch
cachyos/0004-intel.patch
nobara/0001-Allow-to-set-custom-USB-pollrate-for-specific-device.patch
nobara/0001-Revert-PCI-Add-a-REBAR-size-quirk-for-Sapphire-RX-56.patch
nobara/0001-Revert-nvme-pci-drop-redundant-pci_enable_pcie_error.patch
nobara/0001-Set-amdgpu.ppfeaturemask-0xffffffff-as-default.patch
nobara/0001-acpi-proc-idle-skip-dummy-wait.patch
nobara/0001-add-acpi_call.patch
nobara/amdgpu-si-cik-default.patch
nobara/lenovo-legion-laptop.patch
asuslinux/0001-platform-x86-asus-wmi-add-support-for-2024-ROG-Mini-.patch
asuslinux/0002-platform-x86-asus-wmi-add-support-for-Vivobook-GPU-M.patch
asuslinux/0003-platform-x86-asus-wmi-add-support-variant-of-TUF-RGB.patch
asuslinux/0004-platform-x86-asus-wmi-support-toggling-POST-sound.patch
asuslinux/0005-platform-x86-asus-wmi-store-a-min-default-for-ppt-op.patch
# nobara/0001-Allow-to-set-custom-USB-pollrate-for-specific-device.patch
# nobara/0001-Revert-PCI-Add-a-REBAR-size-quirk-for-Sapphire-RX-56.patch
# nobara/0001-Revert-nvme-pci-drop-redundant-pci_enable_pcie_error.patch
# nobara/0001-Set-amdgpu.ppfeaturemask-0xffffffff-as-default.patch
# nobara/0001-acpi-proc-idle-skip-dummy-wait.patch
# nobara/0001-add-acpi_call.patch
# nobara/amdgpu-si-cik-default.patch
# nobara/lenovo-legion-laptop.patch
# asuslinux/0001-platform-x86-asus-wmi-add-support-for-2024-ROG-Mini-.patch
# asuslinux/0002-platform-x86-asus-wmi-add-support-for-Vivobook-GPU-M.patch
# asuslinux/0003-platform-x86-asus-wmi-add-support-variant-of-TUF-RGB.patch
# asuslinux/0004-platform-x86-asus-wmi-support-toggling-POST-sound.patch
# asuslinux/0005-platform-x86-asus-wmi-store-a-min-default-for-ppt-op.patch

View File

@ -1,8 +1,2 @@
# send debs to server
rsync -azP --include './' --include '*.deb' --exclude '*' ./output/ ferreo@direct.pika-os.com:/srv/www/incoming/
# add debs to repo
ssh ferreo@direct.pika-os.com 'aptly repo add -force-replace -remove-files pikauwu-main /srv/www/incoming/'
# publish the repo
ssh ferreo@direct.pika-os.com 'aptly publish update -batch -skip-contents -force-overwrite pikauwu filesystem:pikarepo:'
rsync -azP --include './' --include '*.deb' --exclude '*' ./output/ ferreo@direct.pika-os.com:/srv/www/nest-incoming/

View File

@ -2,4 +2,4 @@
echo "Pika Kernel - Building"
make -j`nproc` bindeb-pkg LOCALVERSION=-pikaos KDEB_PKGVERSION=$(make kernelversion)-100pika5
make -j`nproc` bindeb-pkg LOCALVERSION=-pikaos KDEB_PKGVERSION=$(make kernelversion)-101pika1

View File

@ -5,7 +5,7 @@ echo "Pika Kernel - Applying configuration"
cp ../config .config
scripts/config -k -d CONFIG_GENERIC_CPU
scripts/config -k -e CONFIG_GENERIC_CPU2
scripts/config -k -e CONFIG_GENERIC_CPU3
scripts/config -e CACHY
scripts/config -e SCHED_BORE