commit 163e2956bd52b4e1c598cafa9cc1a6d29ee41e79 Author: ferreo Date: Sun Jan 12 22:05:18 2025 +0000 First release diff --git a/.github/build-canary-v3 b/.github/build-canary-v3 new file mode 100644 index 0000000..56a6051 --- /dev/null +++ b/.github/build-canary-v3 @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/.github/build-nest-v3 b/.github/build-nest-v3 new file mode 100644 index 0000000..56a6051 --- /dev/null +++ b/.github/build-nest-v3 @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/.github/release-canary-v3 b/.github/release-canary-v3 new file mode 100644 index 0000000..d00491f --- /dev/null +++ b/.github/release-canary-v3 @@ -0,0 +1 @@ +1 diff --git a/.github/release-nest-v3 b/.github/release-nest-v3 new file mode 100644 index 0000000..56a6051 --- /dev/null +++ b/.github/release-nest-v3 @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/.github/workflows/build-canaryv3.yml b/.github/workflows/build-canaryv3.yml new file mode 100644 index 0000000..7b9e211 --- /dev/null +++ b/.github/workflows/build-canaryv3.yml @@ -0,0 +1,40 @@ +name: PikaOS Package Build Only (Canary) (amd64-v3) + +on: + push: + branches: + - main + paths: + - '.github/build-canary-v3' + +jobs: + build: + runs-on: ubuntu-latest + container: + image: ghcr.io/pikaos-linux/pikaos-builder:canaryv3 + volumes: + - /proc:/proc + options: --privileged -it + + steps: + - uses: actions/checkout@v3 + + - name: Install SSH key + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.SSH_KEY }} + name: id_rsa + known_hosts: ${{ secrets.KNOWN_HOSTS }} + if_key_exists: replace + + - name: Update APT Cache + run: apt-get update -y + + - name: Set Build Config + run: cp -vf ./pika-build-config/amd64-v3.sh ./pika-build-config.sh + + - name: Setup Makefile + run: cp -vf ./Makefile-v3 ./Makefile + + - name: Build Package + run: ./main.sh diff --git a/.github/workflows/build-nestv3.yml b/.github/workflows/build-nestv3.yml new file mode 100644 index 0000000..fd391dc --- /dev/null +++ b/.github/workflows/build-nestv3.yml @@ -0,0 +1,40 @@ +name: PikaOS Package Build Only (amd64-v3) + +on: + push: + branches: + - main + paths: + - '.github/build-nest-v3' + +jobs: + build: + runs-on: ubuntu-latest + container: + image: ghcr.io/pikaos-linux/pikaos-builder:nestv3 + volumes: + - /proc:/proc + options: --privileged -it + + steps: + - uses: actions/checkout@v3 + + - name: Install SSH key + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.SSH_KEY }} + name: id_rsa + known_hosts: ${{ secrets.KNOWN_HOSTS }} + if_key_exists: replace + + - name: Update APT Cache + run: apt-get update -y + + - name: Set Build Config + run: cp -vf ./pika-build-config/amd64-v3.sh ./pika-build-config.sh + + - name: Setup Makefile + run: cp -vf ./Makefile-v3 ./Makefile + + - name: Build Package + run: ./main.sh diff --git a/.github/workflows/release-canaryv3.yml b/.github/workflows/release-canaryv3.yml new file mode 100644 index 0000000..d799864 --- /dev/null +++ b/.github/workflows/release-canaryv3.yml @@ -0,0 +1,43 @@ +name: PikaOS Package Build & Release (Canary) (amd64-v3) + +on: + push: + branches: + - main + paths: + - '.github/release-canary-v3' + +jobs: + build: + runs-on: ubuntu-latest + container: + image: ghcr.io/pikaos-linux/pikaos-builder:canaryv3 + volumes: + - /proc:/proc + options: --privileged -it + + steps: + - uses: actions/checkout@v3 + + - name: Install SSH key + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.SSH_KEY }} + name: id_rsa + known_hosts: ${{ secrets.KNOWN_HOSTS }} + if_key_exists: replace + + - name: Update APT Cache + run: apt-get update -y + + - name: Set Build Config + run: cp -vf ./pika-build-config/amd64-v3.sh ./pika-build-config.sh + + - name: Setup Makefile + run: cp -vf ./Makefile-v3 ./Makefile + + - name: Build Package + run: ./main.sh + + - name: Release Package + run: ./release.sh diff --git a/.github/workflows/release-nestv3.yml b/.github/workflows/release-nestv3.yml new file mode 100644 index 0000000..ed42bb7 --- /dev/null +++ b/.github/workflows/release-nestv3.yml @@ -0,0 +1,40 @@ +name: PikaOS Package Build & Release (amd64-v3) + +on: + push: + branches: + - main + paths: + - '.github/release-nest-v3' + +jobs: + build: + runs-on: ubuntu-latest + container: + image: ghcr.io/pikaos-linux/pikaos-builder:nestv3 + volumes: + - /proc:/proc + options: --privileged -it + + steps: + - uses: actions/checkout@v3 + + - name: Install SSH key + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.SSH_KEY }} + name: id_rsa + known_hosts: ${{ secrets.KNOWN_HOSTS }} + if_key_exists: replace + + - name: Update APT Cache + run: apt-get update -y + + - name: Set Build Config + run: cp -vf ./pika-build-config/amd64-v3.sh ./pika-build-config.sh + + - name: Build Package + run: ./main.sh + + - name: Release Package + run: ./release.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0bd9964 --- /dev/null +++ b/.gitignore @@ -0,0 +1,18 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ +.idea +zig-out/ +.zig-cache/ \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..55463e5 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 PikaOS + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/falcond/build.zig b/falcond/build.zig new file mode 100644 index 0000000..44ca13d --- /dev/null +++ b/falcond/build.zig @@ -0,0 +1,17 @@ +const std = @import("std"); + +pub fn build(b: *std.Build) void { + const target = b.standardTargetOptions(.{}); + const optimize = b.standardOptimizeOption(.{}); + + const exe = b.addExecutable(.{ + .name = "falcond", + .root_source_file = .{ .cwd_relative = "src/main.zig" }, + .target = target, + .optimize = optimize, + }); + + exe.bundle_compiler_rt = true; + + b.installArtifact(exe); +} diff --git a/falcond/build.zig.zon b/falcond/build.zig.zon new file mode 100644 index 0000000..5929a5e --- /dev/null +++ b/falcond/build.zig.zon @@ -0,0 +1,72 @@ +.{ + // This is the default name used by packages depending on this one. For + // example, when a user runs `zig fetch --save `, this field is used + // as the key in the `dependencies` table. Although the user can choose a + // different name, most users will stick with this provided value. + // + // It is redundant to include "zig" in this name because it is already + // within the Zig package namespace. + .name = "falcond", + + // This is a [Semantic Version](https://semver.org/). + // In a future version of Zig it will be used for package deduplication. + .version = "0.1.0", + + // This field is optional. + // This is currently advisory only; Zig does not yet do anything + // with this value. + //.minimum_zig_version = "0.11.0", + + // This field is optional. + // Each dependency must either provide a `url` and `hash`, or a `path`. + // `zig build --fetch` can be used to fetch all dependencies of a package, recursively. + // Once all dependencies are fetched, `zig build` no longer requires + // internet connectivity. + .dependencies = .{ + // See `zig fetch --save ` for a command-line interface for adding dependencies. + //.example = .{ + // // When updating this field to a new URL, be sure to delete the corresponding + // // `hash`, otherwise you are communicating that you expect to find the old hash at + // // the new URL. + // .url = "https://example.com/foo.tar.gz", + // + // // This is computed from the file contents of the directory of files that is + // // obtained after fetching `url` and applying the inclusion rules given by + // // `paths`. + // // + // // This field is the source of truth; packages do not come from a `url`; they + // // come from a `hash`. `url` is just one of many possible mirrors for how to + // // obtain a package matching this `hash`. + // // + // // Uses the [multihash](https://multiformats.io/multihash/) format. + // .hash = "...", + // + // // When this is provided, the package is found in a directory relative to the + // // build root. In this case the package's hash is irrelevant and therefore not + // // computed. This field and `url` are mutually exclusive. + // .path = "foo", + + // // When this is set to `true`, a package is declared to be lazily + // // fetched. This makes the dependency only get fetched if it is + // // actually used. + // .lazy = false, + //}, + }, + + // Specifies the set of files and directories that are included in this package. + // Only files and directories listed here are included in the `hash` that + // is computed for this package. Only files listed here will remain on disk + // when using the zig package manager. As a rule of thumb, one should list + // files required for compilation plus any license(s). + // Paths are relative to the build root. Use the empty string (`""`) to refer to + // the build root itself. + // A directory listed here means that all files within, recursively, are included. + .paths = .{ + "build.zig", + "build.zig.zon", + "src", + // For example... + //"LICENSE", + //"README.md", + }, +} diff --git a/falcond/debian/changelog b/falcond/debian/changelog new file mode 100644 index 0000000..f98e73e --- /dev/null +++ b/falcond/debian/changelog @@ -0,0 +1,5 @@ +falcon (1.0.0-101pika1) pika; urgency=low + + * Initial release + + -- ferreo Sun, 12 Jan 2025 13:48:00 +0300 diff --git a/falcond/debian/control b/falcond/debian/control new file mode 100644 index 0000000..ae6b2d4 --- /dev/null +++ b/falcond/debian/control @@ -0,0 +1,20 @@ +Source: falcond +Section: admin +Priority: optional +Maintainer: ferreo +Rules-Requires-Root: no +Build-Depends: + debhelper-compat (= 13), zig-nightly, libdbus-1-dev, dh-systemd, git +Standards-Version: 4.6.1 +Homepage: https://pika-os.com + +Package: falcond +Architecture: amd64 +Depends: ${misc:Depends}, + ${shlibs:Depends}, + util-linux, + power-profiles-daemon | tuned-ppd, + scx, + systemd +Provides: falcond +Description: Accelerate your gaming experience with falcond, auto setting scx, vcache and choosing performance profiles diff --git a/falcond/debian/copyright b/falcond/debian/copyright new file mode 100644 index 0000000..125ed9b --- /dev/null +++ b/falcond/debian/copyright @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 PikaOS + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/falcond/debian/falcond.service b/falcond/debian/falcond.service new file mode 100644 index 0000000..ce26a88 --- /dev/null +++ b/falcond/debian/falcond.service @@ -0,0 +1,13 @@ +[Unit] +Description=Falcon Daemon Service +After=network.target +Wants=graphical.target + +[Service] +Type=simple +ExecStart=/usr/bin/falcond +User=root +Restart=on-failure + +[Install] +WantedBy=graphical.target diff --git a/falcond/debian/rules b/falcond/debian/rules new file mode 100755 index 0000000..b66f606 --- /dev/null +++ b/falcond/debian/rules @@ -0,0 +1,34 @@ +#!/usr/bin/make -f + +# See debhelper(7) (uncomment to enable). +# Output every command that modifies files on the build system. +export DH_VERBOSE = 1 + +%: + dh $@ --with=systemd + +override_dh_dwz: + echo "disabled" + +override_dh_auto_build: + zig build-exe src/main.zig -O ReleaseFast -mcpu x86_64_v3 --name falcond + # Clone profiles repository + rm -rf falcond-profiles + git clone https://github.com/PikaOS-Linux/falcond-profiles.git + +override_dh_install: + dh_install + mkdir -p debian/falcond/usr/bin/ + mkdir -p debian/falcond/usr/share/falcond/ + cp -vf falcond debian/falcond/usr/bin/ + chmod 755 debian/falcond/usr/bin/falcond + chmod +x debian/falcond/usr/bin/falcond + # Copy profiles + cp -r falcond-profiles/usr/share/falcond/profiles debian/falcond/usr/share/falcond/ + +override_dh_installsystemd: + dh_installsystemd --name=falcond + +override_dh_clean: + dh_clean + rm -rf falcond-profiles \ No newline at end of file diff --git a/falcond/debian/source/format b/falcond/debian/source/format new file mode 100644 index 0000000..163aaf8 --- /dev/null +++ b/falcond/debian/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/falcond/src/config.zig b/falcond/src/config.zig new file mode 100644 index 0000000..b491a07 --- /dev/null +++ b/falcond/src/config.zig @@ -0,0 +1,52 @@ +const std = @import("std"); +const fs = std.fs; +const confloader = @import("confloader.zig"); +const vcache_setting = @import("vcache_setting.zig"); +const scx_scheds = @import("scx_scheds.zig"); + +pub const Config = struct { + enable_performance_mode: bool = true, + scx_sched: scx_scheds.ScxScheduler = .none, + scx_sched_props: scx_scheds.ScxSchedModes = .gaming, + vcache_mode: vcache_setting.VCacheMode = .none, + + pub fn load(allocator: std.mem.Allocator) !Config { + const config_path = "/etc/falcond/config.conf"; + var config = Config{}; + const file = fs.openFileAbsolute(config_path, .{}) catch |err| switch (err) { + error.FileNotFound => { + try config.save(); + return config; + }, + else => return err, + }; + defer file.close(); + + config = try confloader.loadConf(Config, allocator, config_path); + return config; + } + + pub fn save(self: Config) !void { + const config_dir = "/etc/falcond/"; + + fs.makeDirAbsolute(config_dir) catch |err| switch (err) { + error.PathAlreadyExists => {}, + else => return err, + }; + + var file_buf: [fs.max_path_bytes]u8 = undefined; + const config_path = try std.fmt.bufPrint( + &file_buf, + "{s}/config.conf", + .{config_dir}, + ); + + const file = try fs.createFileAbsolute(config_path, .{}); + defer file.close(); + + try file.writer().print("enable_performance_mode = {}\n", .{self.enable_performance_mode}); + try file.writer().print("scx_sched = {s}\n", .{@tagName(self.scx_sched)}); + try file.writer().print("scx_sched_props = {s}\n", .{@tagName(self.scx_sched_props)}); + try file.writer().print("vcache_mode = {s}\n", .{@tagName(self.vcache_mode)}); + } +}; diff --git a/falcond/src/confloader.zig b/falcond/src/confloader.zig new file mode 100644 index 0000000..f31fc8e --- /dev/null +++ b/falcond/src/confloader.zig @@ -0,0 +1,54 @@ +const std = @import("std"); +const Parser = @import("parser.zig").Parser; + +pub fn loadConf(comptime T: type, allocator: std.mem.Allocator, path: []const u8) !T { + const file = try std.fs.openFileAbsolute(path, .{ + .mode = .read_only, + .lock = .none, + .lock_nonblocking = false, + }); + defer file.close(); + + const size = try file.getEndPos(); + if (size > std.math.maxInt(u32)) return error.FileTooLarge; + + var stack_buffer: [4096]u8 = undefined; + const buffer = if (size <= stack_buffer.len) stack_buffer[0..size] else try allocator.alloc(u8, size); + defer if (size > stack_buffer.len) allocator.free(buffer); + + const bytes_read = try file.readAll(buffer); + if (bytes_read != size) return error.UnexpectedEOF; + + var parser = Parser(T).init(allocator, buffer); + return try parser.parse(); +} + +pub fn loadConfDir(comptime T: type, allocator: std.mem.Allocator, dir_path: []const u8) !std.ArrayList(T) { + var result = std.ArrayList(T).init(allocator); + errdefer result.deinit(); + + var dir = try std.fs.openDirAbsolute(dir_path, .{ .iterate = true }); + defer dir.close(); + + var walker = try dir.walk(allocator); + defer walker.deinit(); + + while (try walker.next()) |entry| { + if (entry.kind == .file and std.mem.endsWith(u8, entry.path, ".conf")) { + const path = try std.fs.path.join(allocator, &.{ dir_path, entry.path }); + defer allocator.free(path); + + const file = try std.fs.openFileAbsolute(path, .{}); + defer file.close(); + + const content = try file.readToEndAlloc(allocator, std.math.maxInt(usize)); + defer allocator.free(content); + + var parser = Parser(T).init(allocator, content); + const parsed = try parser.parse(); + try result.append(parsed); + } + } + + return result; +} diff --git a/falcond/src/daemon.zig b/falcond/src/daemon.zig new file mode 100644 index 0000000..8710d63 --- /dev/null +++ b/falcond/src/daemon.zig @@ -0,0 +1,211 @@ +const std = @import("std"); +const ProfileManager = @import("profile.zig").ProfileManager; +const Profile = @import("profile.zig").Profile; +const Config = @import("config.zig").Config; +const linux = std.os.linux; +const posix = std.posix; +const PowerProfiles = @import("power_profiles.zig").PowerProfiles; +const scx_scheds = @import("scx_scheds.zig"); + +pub const Daemon = struct { + allocator: std.mem.Allocator, + profile_manager: ProfileManager, + oneshot: bool, + known_pids: ?std.AutoHashMap(u32, *const Profile), + power_profiles: *PowerProfiles, + + const Self = @This(); + + pub fn init(allocator: std.mem.Allocator, config: ?*Config, oneshot: bool, power_profiles: *PowerProfiles) !Self { + var profile_manager = ProfileManager.init(allocator, power_profiles, config.?); + try profile_manager.loadProfiles(oneshot); + + try scx_scheds.init(allocator); + + return Self{ + .allocator = allocator, + .profile_manager = profile_manager, + .oneshot = oneshot, + .known_pids = null, + .power_profiles = power_profiles, + }; + } + + fn scanProcesses(allocator: std.mem.Allocator) !std.AutoHashMap(u32, []const u8) { + var pids = std.AutoHashMap(u32, []const u8).init(allocator); + + const proc_fd = try std.posix.open("/proc", .{ + .ACCMODE = .RDONLY, + .DIRECTORY = true, + }, 0); + defer std.posix.close(proc_fd); + + var buffer: [8192]u8 = undefined; + while (true) { + const nread = linux.syscall3(.getdents64, @as(usize, @intCast(proc_fd)), @intFromPtr(&buffer), buffer.len); + + if (nread == 0) break; + if (nread < 0) return error.ReadDirError; + + var pos: usize = 0; + while (pos < nread) { + const dirent = @as(*align(1) linux.dirent64, @ptrCast(&buffer[pos])); + if (dirent.type == linux.DT.DIR) { + const name = std.mem.sliceTo(@as([*:0]u8, @ptrCast(&dirent.name)), 0); + if (std.fmt.parseInt(u32, name, 10)) |pid| { + if (getProcessNameFromPid(allocator, pid)) |proc_name| { + try pids.put(pid, proc_name); + } else |_| {} + } else |_| {} + } + pos += dirent.reclen; + } + } + + return pids; + } + + fn getProcessNameFromPid(allocator: std.mem.Allocator, pid: u32) ![]const u8 { + var path_buf: [64]u8 = undefined; + const path = try std.fmt.bufPrint(&path_buf, "/proc/{d}/cmdline", .{pid}); + + const file = try std.fs.openFileAbsolute(path, .{}); + defer file.close(); + + var buffer: [4096]u8 = undefined; + const bytes = try file.readAll(&buffer); + if (bytes == 0) return error.EmptyFile; + + const end = std.mem.indexOfScalar(u8, buffer[0..bytes], 0) orelse bytes; + const cmdline = buffer[0..end]; + + const last_unix = std.mem.lastIndexOfScalar(u8, cmdline, '/') orelse 0; + const last_windows = std.mem.lastIndexOfScalar(u8, cmdline, '\\') orelse 0; + const last_sep = @max(last_unix, last_windows); + + const exe_name = if (last_sep > 0) + cmdline[last_sep + 1 ..] + else + cmdline; + + return try allocator.dupe(u8, exe_name); + } + + pub fn checkProcesses(self: *Self) !void { + var arena = std.heap.ArenaAllocator.init(self.allocator); + defer arena.deinit(); + const arena_allocator = arena.allocator(); + + var processes = try scanProcesses(arena_allocator); + defer { + var it = processes.iterator(); + while (it.next()) |entry| { + arena_allocator.free(entry.value_ptr.*); + } + processes.deinit(); + } + + var it = processes.iterator(); + while (it.next()) |entry| { + const pid = entry.key_ptr.*; + const process_name = entry.value_ptr.*; + + if (!self.oneshot) { + if (self.known_pids) |*known| { + if (!known.contains(pid)) { + if (try self.profile_manager.matchProcess(arena.allocator(), try std.fmt.allocPrint(arena_allocator, "{d}", .{pid}), process_name)) |profile| { + try known.put(pid, profile); + try self.profile_manager.activateProfile(profile); + } + } + } + } else { + try self.handleProcess(try std.fmt.allocPrint(arena_allocator, "{d}", .{pid}), process_name); + } + } + + if (!self.oneshot) { + if (self.known_pids) |*known| { + var known_it = known.iterator(); + while (known_it.next()) |entry| { + const pid = entry.key_ptr.*; + if (!processes.contains(pid)) { + try self.handleProcessExit(try std.fmt.allocPrint(arena_allocator, "{d}", .{pid})); + } + } + } + } + } + + pub fn run(self: *Self) !void { + if (!self.oneshot) { + self.known_pids = std.AutoHashMap(u32, *const Profile).init(self.allocator); + } + + try self.checkProcesses(); + + if (self.oneshot) { + return; + } + + while (true) { + try self.checkProcesses(); + std.time.sleep(std.time.ns_per_s * 3); + } + } + + pub fn deinit(self: *Self) void { + scx_scheds.deinit(); + if (self.known_pids) |*pids| { + pids.deinit(); + } + self.profile_manager.deinit(); + } + + pub fn handleProcess(self: *Self, pid: []const u8, process_name: []const u8) !void { + var arena = std.heap.ArenaAllocator.init(self.allocator); + defer arena.deinit(); + + if (try self.profile_manager.matchProcess(arena.allocator(), pid, process_name)) |profile| { + if (!self.oneshot) { + if (self.known_pids) |*known| { + try known.put(try std.fmt.parseInt(u32, pid, 10), profile); + } + } + try self.profile_manager.activateProfile(profile); + } + } + + pub fn handleProcessExit(self: *Self, pid: []const u8) !void { + if (self.known_pids) |*pids| { + const pid_num = std.fmt.parseInt(u32, pid, 10) catch |err| { + std.log.warn("Failed to parse PID: {}", .{err}); + return; + }; + + if (pids.get(pid_num)) |profile| { + var found_profile = false; + + if (self.profile_manager.active_profile) |active| { + if (active == profile) { + std.log.info("Process {s} has terminated", .{profile.name}); + try self.profile_manager.deactivateProfile(active); + found_profile = true; + } + } + + if (!found_profile) { + for (self.profile_manager.queued_profiles.items, 0..) |queued, i| { + if (queued == profile) { + std.log.info("Process {s} has terminated", .{profile.name}); + _ = self.profile_manager.queued_profiles.orderedRemove(i); + break; + } + } + } + + _ = pids.remove(pid_num); + } + } + } +}; diff --git a/falcond/src/dbus.zig b/falcond/src/dbus.zig new file mode 100644 index 0000000..4554ddf --- /dev/null +++ b/falcond/src/dbus.zig @@ -0,0 +1,216 @@ +const std = @import("std"); + +pub const DBusError = error{ + CommandFailed, + ParseError, + InvalidValue, + NoConnection, +} || std.fs.File.OpenError || std.posix.WriteError || std.posix.ReadError || std.process.Child.RunError || std.process.Child.SpawnError || error{ + ProcessFdQuotaExceeded, + SystemFdQuotaExceeded, + SystemResources, + OperationAborted, + WouldBlock, + InvalidHandle, + Unexpected, + InputOutput, + OutOfMemory, + ResourceLimitReached, + StderrStreamTooLong, + StdoutStreamTooLong, + CurrentWorkingDirectoryUnlinked, + InvalidBatchScriptArg, + InvalidExe, + FileSystem, + Overflow, + InvalidCharacter, + InvalidUserId, + PermissionDenied, + ProcessAlreadyExec, + InvalidProcessGroupId, + InvalidName, + WaitAbandoned, + WaitTimeOut, + NetworkSubsystemFailed, +}; + +/// Simple DBus interface that uses busctl under the hood +pub const DBus = struct { + allocator: std.mem.Allocator, + bus_name: []const u8, + object_path: []const u8, + interface: []const u8, + + pub fn init( + allocator: std.mem.Allocator, + bus_name: []const u8, + object_path: []const u8, + interface: []const u8, + ) DBus { + return .{ + .allocator = allocator, + .bus_name = bus_name, + .object_path = object_path, + .interface = interface, + }; + } + + /// Get a property value as a string + pub fn getProperty(self: *const DBus, property: []const u8) ![]const u8 { + var argv = [_][]const u8{ + "busctl", + "--system", + "get-property", + self.bus_name, + self.object_path, + self.interface, + property, + }; + + const max_output_size = 1024 * 1024; // 1MB should be enough + const output = try std.process.Child.run(.{ + .allocator = self.allocator, + .argv = &argv, + .max_output_bytes = max_output_size, + }); + defer self.allocator.free(output.stderr); + defer self.allocator.free(output.stdout); + + if (output.term.Exited != 0) { + std.log.err("busctl failed: {s}", .{output.stderr}); + return DBusError.CommandFailed; + } + + // busctl outputs values in two formats: + // 1. String: "s \"value\"" + // 2. Integer: "u 123" + const trimmed = std.mem.trim(u8, output.stdout, " \n\r\t"); + + // Try string format first + var it = std.mem.splitScalar(u8, trimmed, '"'); + _ = it.next(); // Skip type + if (it.next()) |value| { + return self.allocator.dupe(u8, value); + } + + // Try integer format + it = std.mem.splitScalar(u8, trimmed, ' '); + _ = it.next(); // Skip type + const value = it.next() orelse { + // If property doesn't exist or is empty + if (std.mem.indexOf(u8, output.stdout, "Unknown property") != null) { + return self.allocator.dupe(u8, ""); + } + return DBusError.ParseError; + }; + + return self.allocator.dupe(u8, value); + } + + /// Get a property value as a string array + pub fn getPropertyArray(self: *const DBus, property: []const u8) ![][]const u8 { + var result = std.ArrayList([]const u8).init(self.allocator); + errdefer { + for (result.items) |item| { + self.allocator.free(item); + } + result.deinit(); + } + + const argv = [_][]const u8{ + "busctl", + "--system", + "get-property", + self.bus_name, + self.object_path, + self.interface, + property, + }; + + const max_output_size = 1024 * 1024; // 1MB should be enough + const output = try std.process.Child.run(.{ + .allocator = self.allocator, + .argv = &argv, + .max_output_bytes = max_output_size, + }); + defer self.allocator.free(output.stderr); + defer self.allocator.free(output.stdout); + + if (output.term.Exited != 0) { + std.log.err("busctl failed: {s}", .{output.stderr}); + return DBusError.CommandFailed; + } + + // busctl outputs arrays in the format: + // as 2 "value1" "value2" + var it = std.mem.splitScalar(u8, std.mem.trim(u8, output.stdout, " \n\r\t"), '"'); + _ = it.next(); // Skip type + count + + while (it.next()) |value| { + // Skip empty strings and spaces between quotes + if (value.len == 0 or std.mem.eql(u8, std.mem.trim(u8, value, " "), "")) continue; + try result.append(try self.allocator.dupe(u8, value)); + } + + return result.toOwnedSlice(); + } + + /// Set a property value + pub fn setProperty(self: *const DBus, property: []const u8, value: []const u8) !void { + const argv = [_][]const u8{ + "busctl", + "--system", + "set-property", + self.bus_name, + self.object_path, + self.interface, + property, + "s", + value, + }; + + const max_output_size = 1024; // Small size since we don't expect much output + const output = try std.process.Child.run(.{ + .allocator = self.allocator, + .argv = &argv, + .max_output_bytes = max_output_size, + }); + defer self.allocator.free(output.stderr); + defer self.allocator.free(output.stdout); + + if (output.term.Exited != 0) { + std.log.err("busctl failed: {s}", .{output.stderr}); + return DBusError.CommandFailed; + } + } + + /// Call a DBus method + pub fn callMethod(self: *const DBus, method: []const u8, args: []const []const u8) !void { + var argv = std.ArrayList([]const u8).init(self.allocator); + defer argv.deinit(); + + try argv.appendSlice(&[_][]const u8{ + "busctl", + "--system", + "call", + self.bus_name, + self.object_path, + self.interface, + method, + }); + + try argv.appendSlice(args); + + const result = try std.process.Child.run(.{ + .allocator = self.allocator, + .argv = argv.items, + }); + defer self.allocator.free(result.stderr); + defer self.allocator.free(result.stdout); + + if (result.term.Exited != 0) { + std.log.err("busctl failed: {s}", .{result.stderr}); + return DBusError.CommandFailed; + } + } +}; diff --git a/falcond/src/main.zig b/falcond/src/main.zig new file mode 100644 index 0000000..237e49a --- /dev/null +++ b/falcond/src/main.zig @@ -0,0 +1,104 @@ +const std = @import("std"); +const Daemon = @import("daemon.zig").Daemon; +const Config = @import("config.zig").Config; +const PowerProfiles = @import("power_profiles.zig").PowerProfiles; + +pub const std_options = std.Options{ + .log_level = .debug, + .log_scope_levels = &[_]std.log.ScopeLevel{ + .{ .scope = .default, .level = .debug }, + }, +}; + +const AllocTracker = struct { + allocs: usize = 0, + deallocs: usize = 0, + resizes: usize = 0, + + pub fn trackAlloc(self: *@This()) void { + self.allocs += 1; + } + + pub fn trackDealloc(self: *@This()) void { + self.deallocs += 1; + } + + pub fn trackResize(self: *@This()) void { + self.resizes += 1; + } +}; + +fn alloc(ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 { + var t: *AllocTracker = @ptrCast(@alignCast(ctx)); + t.trackAlloc(); + return gpa_vtable.alloc(gpa_ptr, len, ptr_align, ret_addr); +} + +fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool { + var t: *AllocTracker = @ptrCast(@alignCast(ctx)); + t.trackResize(); + return gpa_vtable.resize(gpa_ptr, buf, log2_buf_align, new_len, ret_addr); +} + +fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void { + var t: *AllocTracker = @ptrCast(@alignCast(ctx)); + t.trackDealloc(); + gpa_vtable.free(gpa_ptr, buf, log2_buf_align, ret_addr); +} + +var gpa_vtable: *const std.mem.Allocator.VTable = undefined; +var gpa_ptr: *anyopaque = undefined; + +pub fn main() !void { + std.log.info("Starting falcond...", .{}); + + var tracker = AllocTracker{}; + var gpa = std.heap.GeneralPurposeAllocator(.{ + .verbose_log = false, + .enable_memory_limit = true, + }){}; + defer { + const leaked = gpa.deinit(); + if (leaked == .leak) { + std.log.err("Memory leaks detected!", .{}); + } + std.log.info("Memory operations - allocs: {}, deallocs: {}, resizes: {}", .{ + tracker.allocs, + tracker.deallocs, + tracker.resizes, + }); + } + + gpa_vtable = gpa.allocator().vtable; + gpa_ptr = gpa.allocator().ptr; + const allocator = std.mem.Allocator{ + .ptr = &tracker, + .vtable = &std.mem.Allocator.VTable{ + .alloc = alloc, + .resize = resize, + .free = free, + }, + }; + + var config = try Config.load(allocator); + var power_profiles = try PowerProfiles.init(allocator, &config); + defer power_profiles.deinit(); + + if (!power_profiles.isPerformanceAvailable()) { + std.log.warn("Performance profile not available - power profile management disabled", .{}); + } else { + std.log.info("Performance profile available - power profile management enabled", .{}); + } + + const args = try std.process.argsAlloc(allocator); + defer std.process.argsFree(allocator, args); + + const oneshot = for (args) |arg| { + if (std.mem.eql(u8, arg, "--oneshot")) break true; + } else false; + + var daemon = try Daemon.init(allocator, &config, oneshot, power_profiles); + defer daemon.deinit(); + + try daemon.run(); +} diff --git a/falcond/src/parser.zig b/falcond/src/parser.zig new file mode 100644 index 0000000..074b721 --- /dev/null +++ b/falcond/src/parser.zig @@ -0,0 +1,366 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const Vector = std.meta.Vector; + +pub const ParseError = error{ + InvalidSyntax, + UnexpectedCharacter, + UnterminatedString, + InvalidNumber, + InvalidIdentifier, + UnknownField, +}; + +pub fn Parser(comptime T: type) type { + return struct { + const Self = @This(); + + content: []const u8, + pos: usize = 0, + allocator: std.mem.Allocator, + + pub fn init(allocator: std.mem.Allocator, content: []const u8) Self { + return .{ + .content = content, + .allocator = allocator, + }; + } + + fn skipWhitespace(self: *Self) void { + const v_size = std.simd.suggestVectorLength(u8) orelse 32; + const Vec = @Vector(v_size, u8); + + while (self.pos + v_size <= self.content.len) { + const chunk: Vec = self.content[self.pos..][0..v_size].*; + const spaces = chunk == @as(Vec, @splat(@as(u8, ' '))); + const tabs = chunk == @as(Vec, @splat(@as(u8, '\t'))); + const newlines = chunk == @as(Vec, @splat(@as(u8, '\n'))); + const returns = chunk == @as(Vec, @splat(@as(u8, '\r'))); + const comments = chunk == @as(Vec, @splat(@as(u8, '#'))); + + const whitespace = @reduce(.Or, spaces) or @reduce(.Or, tabs) or + @reduce(.Or, newlines) or @reduce(.Or, returns) or + @reduce(.Or, comments); + if (!whitespace) break; + + if (@reduce(.Or, comments)) { + while (self.pos < self.content.len and self.content[self.pos] != '\n') : (self.pos += 1) {} + continue; + } + + const space_mask = @select(u8, spaces, @as(Vec, @splat(@as(u8, 1))), @as(Vec, @splat(@as(u8, 0)))); + const tab_mask = @select(u8, tabs, @as(Vec, @splat(@as(u8, 1))), @as(Vec, @splat(@as(u8, 0)))); + const newline_mask = @select(u8, newlines, @as(Vec, @splat(@as(u8, 1))), @as(Vec, @splat(@as(u8, 0)))); + const return_mask = @select(u8, returns, @as(Vec, @splat(@as(u8, 1))), @as(Vec, @splat(@as(u8, 0)))); + + const mask = space_mask | tab_mask | newline_mask | return_mask; + const mask_bits = @reduce(.Or, mask); + const leading = @ctz(mask_bits); + if (leading == v_size) { + self.pos += v_size; + } else { + self.pos += leading; + break; + } + } + + while (self.pos < self.content.len) : (self.pos += 1) { + const c = self.content[self.pos]; + switch (c) { + ' ', '\t', '\r', '\n' => continue, + '#' => { + while (self.pos < self.content.len and self.content[self.pos] != '\n') : (self.pos += 1) {} + }, + else => break, + } + } + } + + fn parseString(self: *Self) ![]const u8 { + if (self.pos >= self.content.len or self.content[self.pos] != '"') + return error.InvalidSyntax; + + self.pos += 1; + const start = self.pos; + + while (self.pos < self.content.len) : (self.pos += 1) { + switch (self.content[self.pos]) { + '"' => { + const str = try self.allocator.dupe(u8, self.content[start..self.pos]); + self.pos += 1; + return str; + }, + '\\' => return error.InvalidSyntax, + else => {}, + } + } + return error.UnterminatedString; + } + + fn parseNumber(self: *Self) !i64 { + self.skipWhitespace(); + const start = self.pos; + while (self.pos < self.content.len) : (self.pos += 1) { + const c = self.content[self.pos]; + if (!std.ascii.isDigit(c) and c != '-') break; + } + const num = std.fmt.parseInt(i64, self.content[start..self.pos], 10) catch return error.InvalidNumber; + return num; + } + + fn parseArray(self: *Self) ![]const i64 { + if (self.pos >= self.content.len or self.content[self.pos] != '[') + return error.InvalidSyntax; + + self.pos += 1; + var values: [32]i64 = undefined; + var count: usize = 0; + + while (self.pos < self.content.len) { + self.skipWhitespace(); + if (self.content[self.pos] == ']') { + self.pos += 1; + return try self.allocator.dupe(i64, values[0..count]); + } + + if (count >= values.len) return error.InvalidSyntax; + const num = try self.parseNumber(); + values[count] = num; + count += 1; + + self.skipWhitespace(); + if (self.content[self.pos] == ',') { + self.pos += 1; + continue; + } + if (self.content[self.pos] == ']') { + self.pos += 1; + return try self.allocator.dupe(i64, values[0..count]); + } + return error.InvalidSyntax; + } + return error.InvalidSyntax; + } + + fn parseIdentifier(self: *Self) ![]const u8 { + const start = self.pos; + const v_size = std.simd.suggestVectorLength(u8) orelse 32; + const Vec = @Vector(v_size, u8); + + while (self.pos + v_size <= self.content.len) { + const chunk: Vec = self.content[self.pos..][0..v_size].*; + + const lower_bound = chunk >= @as(Vec, @splat(@as(u8, 'a'))); + const upper_bound = chunk <= @as(Vec, @splat(@as(u8, 'z'))); + const alpha_lower_mask = @select(u8, lower_bound, @as(Vec, @splat(@as(u8, 1))), @as(Vec, @splat(@as(u8, 0)))) & + @select(u8, upper_bound, @as(Vec, @splat(@as(u8, 1))), @as(Vec, @splat(@as(u8, 0)))); + + const upper_lower = chunk >= @as(Vec, @splat(@as(u8, 'A'))); + const upper_upper = chunk <= @as(Vec, @splat(@as(u8, 'Z'))); + const alpha_upper_mask = @select(u8, upper_lower, @as(Vec, @splat(@as(u8, 1))), @as(Vec, @splat(@as(u8, 0)))) & + @select(u8, upper_upper, @as(Vec, @splat(@as(u8, 1))), @as(Vec, @splat(@as(u8, 0)))); + + const digit_lower = chunk >= @as(Vec, @splat(@as(u8, '0'))); + const digit_upper = chunk <= @as(Vec, @splat(@as(u8, '9'))); + const digit_mask = @select(u8, digit_lower, @as(Vec, @splat(@as(u8, 1))), @as(Vec, @splat(@as(u8, 0)))) & + @select(u8, digit_upper, @as(Vec, @splat(@as(u8, 1))), @as(Vec, @splat(@as(u8, 0)))); + + const underscore_mask = @select(u8, chunk == @as(Vec, @splat(@as(u8, '_'))), @as(Vec, @splat(@as(u8, 1))), @as(Vec, @splat(@as(u8, 0)))); + + const mask = alpha_lower_mask | alpha_upper_mask | digit_mask | underscore_mask; + const valid = @reduce(.Or, mask) != 0; + if (!valid) break; + + const mask_bits = @reduce(.Or, mask); + const leading = @ctz(mask_bits); + if (leading == v_size) { + self.pos += v_size; + } else { + self.pos += leading; + break; + } + } + + while (self.pos < self.content.len) : (self.pos += 1) { + const c = self.content[self.pos]; + if (!std.ascii.isAlphabetic(c) and !std.ascii.isDigit(c) and c != '_') break; + } + + if (start == self.pos) return error.InvalidIdentifier; + return self.content[start..self.pos]; + } + + pub fn parse(self: *Self) !T { + var result: T = std.mem.zeroInit(T, .{}); + + while (self.pos < self.content.len) { + self.skipWhitespace(); + if (self.pos >= self.content.len) break; + + const field_name = try self.parseIdentifier(); + self.skipWhitespace(); + + if (self.pos >= self.content.len or self.content[self.pos] != '=') + return error.InvalidSyntax; + self.pos += 1; + + self.skipWhitespace(); + + inline for (std.meta.fields(T)) |field| { + if (std.mem.eql(u8, field_name, field.name)) { + switch (@typeInfo(field.type)) { + .bool => { + const ident = try self.parseIdentifier(); + if (std.mem.eql(u8, ident, "true")) { + @field(result, field.name) = true; + } else if (std.mem.eql(u8, ident, "false")) { + @field(result, field.name) = false; + } else return error.InvalidSyntax; + }, + .int => { + @field(result, field.name) = @intCast(try self.parseNumber()); + }, + .array => |array_info| { + const array = try self.parseArray(); + if (array.len > array_info.len) return error.InvalidSyntax; + @field(result, field.name) = undefined; + var dest = &@field(result, field.name); + @memcpy(dest[0..array.len], array); + }, + .@"enum" => { + const ident = try self.parseIdentifier(); + inline for (std.meta.fields(field.type)) |enum_field| { + if (std.mem.eql(u8, ident, enum_field.name)) { + @field(result, field.name) = @field(field.type, enum_field.name); + break; + } + } + }, + .pointer => |ptr_info| { + if (ptr_info.size != .Slice) return error.InvalidSyntax; + switch (ptr_info.child) { + u8 => { + @field(result, field.name) = try self.parseString(); + }, + i64 => { + @field(result, field.name) = try self.parseArray(); + }, + else => return error.InvalidSyntax, + } + }, + else => return error.InvalidSyntax, + } + break; + } + } + } + + return result; + } + }; +} + +// Example usage: +const Config = struct { + // Boolean tests + bool_true: bool = false, + bool_false: bool = true, + + // Integer tests + int_zero: i64 = 1, + int_positive: i64 = 0, + int_negative: i64 = 42, + int_small: u32 = 16, + + // Array tests + array_empty: [4]i64 = .{ 0, 0, 0, 0 }, + array_full: [4]i64 = .{ 0, 1, 2, 3 }, + array_partial: [4]i64 = .{ 9, 8, 0, 0 }, + + // Enum tests + enum_first: enum { First, Second, Third } = .Second, + enum_last: enum { One, Two, Last } = .One, + lscpu_core_strategy: enum { HighestFreq, Sequential } = .HighestFreq, + + // String tests + string_empty: []const u8 = "", + string_simple: []const u8 = "hello", + string_spaces: []const u8 = "hello world", + string_special: []const u8 = "hello_123", +}; + +test "parse config" { + const content = + \\bool_true = true + \\bool_false = false + \\int_zero = 0 + \\int_positive = 42 + \\int_negative = -123 + \\int_small = 16 + \\array_empty = [] + \\array_full = [0,1,2,3] + \\array_partial = [9,8] + \\enum_first = First + \\enum_last = Last + \\lscpu_core_strategy = HighestFreq + \\string_empty = "" + \\string_simple = "hello" + \\string_spaces = "hello world" + \\string_special = "hello_123" + ; + + var parser = Parser(Config).init(std.heap.page_allocator, content); + const config = try parser.parse(); + + // Boolean tests + try std.testing.expect(config.bool_true); + try std.testing.expect(!config.bool_false); + + // Integer tests + try std.testing.expectEqual(@as(i64, 0), config.int_zero); + try std.testing.expectEqual(@as(i64, 42), config.int_positive); + try std.testing.expectEqual(@as(i64, -123), config.int_negative); + try std.testing.expectEqual(@as(u32, 16), config.int_small); + + // Array tests + try std.testing.expectEqualSlices(i64, &[_]i64{ 0, 0, 0, 0 }, &config.array_empty); + try std.testing.expectEqualSlices(i64, &[_]i64{ 0, 1, 2, 3 }, &config.array_full); + try std.testing.expectEqualSlices(i64, &[_]i64{ 9, 8, 0, 0 }, &config.array_partial); + + // Enum tests + try std.testing.expectEqual(@as(@TypeOf(config.enum_first), .First), config.enum_first); + try std.testing.expectEqual(@as(@TypeOf(config.enum_last), .Last), config.enum_last); + try std.testing.expectEqual(@as(@TypeOf(config.lscpu_core_strategy), .HighestFreq), config.lscpu_core_strategy); + + // String tests + try std.testing.expectEqualStrings("", config.string_empty); + try std.testing.expectEqualStrings("hello", config.string_simple); + try std.testing.expectEqualStrings("hello world", config.string_spaces); + try std.testing.expectEqualStrings("hello_123", config.string_special); +} + +test "parse with missing fields" { + const TestConfig = struct { + name: []const u8 = "default", + cores: []const i64 = &[_]i64{ 0, 1 }, + enabled: bool = true, + count: u32 = 42, + }; + + const content = + \\name = "test" + \\cores = [5,6,7] + ; + + var parser = Parser(TestConfig).init(std.testing.allocator, content); + const config = try parser.parse(); + defer { + std.testing.allocator.free(config.name); + std.testing.allocator.free(config.cores); + } + + try std.testing.expectEqualStrings("test", config.name); + try std.testing.expectEqualSlices(i64, &[_]i64{ 5, 6, 7 }, config.cores); + try std.testing.expect(config.enabled == true); // default value + try std.testing.expect(config.count == 42); // default value +} diff --git a/falcond/src/power_profiles.zig b/falcond/src/power_profiles.zig new file mode 100644 index 0000000..8f16fc5 --- /dev/null +++ b/falcond/src/power_profiles.zig @@ -0,0 +1,126 @@ +const std = @import("std"); +const dbus = @import("dbus.zig"); +const Config = @import("config.zig").Config; + +pub const PowerProfiles = struct { + const PP_NAME = "org.freedesktop.UPower.PowerProfiles"; + const PP_PATH = "/org/freedesktop/UPower/PowerProfiles"; + const PP_IFACE = "org.freedesktop.UPower.PowerProfiles"; + + allocator: std.mem.Allocator, + dbus: dbus.DBus, + config: *Config, + original_profile: ?[]const u8, + has_performance: bool, + + pub fn init(allocator: std.mem.Allocator, config: *Config) !*PowerProfiles { + var self = try allocator.create(PowerProfiles); + errdefer allocator.destroy(self); + + if (!config.enable_performance_mode) { + std.log.info("Performance mode disabled in config", .{}); + self.* = .{ + .allocator = allocator, + .dbus = undefined, + .config = config, + .original_profile = null, + .has_performance = false, + }; + return self; + } + + self.* = .{ + .allocator = allocator, + .dbus = dbus.DBus.init(allocator, PP_NAME, PP_PATH, PP_IFACE), + .config = config, + .original_profile = null, + .has_performance = false, + }; + + const profiles = try self.getAvailableProfiles(allocator); + defer { + for (profiles) |profile| { + allocator.free(profile); + } + allocator.free(profiles); + } + + std.log.info("Available power profiles:", .{}); + for (profiles) |profile| { + std.log.info(" - {s}", .{profile}); + } + + for (profiles) |profile| { + if (std.mem.eql(u8, profile, "performance")) { + self.has_performance = true; + break; + } + } + + return self; + } + + pub fn deinit(self: *PowerProfiles) void { + if (self.original_profile) |profile| { + self.allocator.free(profile); + } + self.allocator.destroy(self); + } + + pub fn isPerformanceAvailable(self: *const PowerProfiles) bool { + return self.has_performance; + } + + pub fn getAvailableProfiles(self: *PowerProfiles, alloc: std.mem.Allocator) ![]const []const u8 { + var result = std.ArrayList([]const u8).init(alloc); + errdefer { + for (result.items) |item| { + alloc.free(item); + } + result.deinit(); + } + + const profiles_raw = try self.dbus.getPropertyArray("Profiles"); + defer { + for (profiles_raw) |item| { + alloc.free(item); + } + alloc.free(profiles_raw); + } + + var i: usize = 0; + while (i < profiles_raw.len) : (i += 1) { + const item = profiles_raw[i]; + if (std.mem.eql(u8, item, "Profile")) { + if (i + 2 < profiles_raw.len) { + try result.append(try alloc.dupe(u8, profiles_raw[i + 2])); + } + } + } + + return result.toOwnedSlice(); + } + + pub fn enablePerformanceMode(self: *PowerProfiles) !void { + if (!self.has_performance) { + std.log.warn("Performance mode not available", .{}); + return; + } + + if (self.original_profile == null) { + self.original_profile = try self.dbus.getProperty("ActiveProfile"); + } + + try self.dbus.setProperty("ActiveProfile", "performance"); + } + + pub fn disablePerformanceMode(self: *PowerProfiles) !void { + if (!self.has_performance) return; + + if (self.original_profile) |profile| { + try self.dbus.setProperty("ActiveProfile", profile); + self.allocator.free(profile); + self.original_profile = null; + } + } +}; diff --git a/falcond/src/profile.zig b/falcond/src/profile.zig new file mode 100644 index 0000000..4f990af --- /dev/null +++ b/falcond/src/profile.zig @@ -0,0 +1,269 @@ +const std = @import("std"); +const fs = std.fs; +const confloader = @import("confloader.zig"); +const PowerProfiles = @import("power_profiles.zig").PowerProfiles; +const Config = @import("config.zig").Config; +const vcache_setting = @import("vcache_setting.zig"); +const scx_scheds = @import("scx_scheds.zig"); +const Child = std.process.Child; +const linux = std.os.linux; +const CPU_SETSIZE = 1024; +const CPU_SET = extern struct { + bits: [CPU_SETSIZE / 64]u64, +}; + +pub const Profile = struct { + const LscpuCoreStrategy = enum { HighestFreq, Sequential }; + + name: []const u8, + performance_mode: bool = false, + scx_sched: scx_scheds.ScxScheduler = .none, + scx_sched_props: scx_scheds.ScxSchedModes = .gaming, + vcache_mode: vcache_setting.VCacheMode = .cache, + + pub fn matches(self: *const Profile, process_name: []const u8) bool { + const is_match = std.ascii.eqlIgnoreCase(self.name, process_name); + if (is_match) { + std.log.info("Found match: {s} for process {s}", .{ self.name, process_name }); + } + return is_match; + } +}; + +const CacheEntry = struct { + pid: u32, + timestamp: i64, + is_proton: bool, +}; + +pub const ProfileManager = struct { + allocator: std.mem.Allocator, + profiles: std.ArrayList(Profile), + proton_profile: ?*const Profile, + active_profile: ?*const Profile = null, + queued_profiles: std.ArrayList(*const Profile), + power_profiles: *PowerProfiles, + config: *const Config, + + // Don't match Wine/Proton infrastructure + const system_processes = [_][]const u8{ + "steam.exe", + "services.exe", + "winedevice.exe", + "plugplay.exe", + "svchost.exe", + "explorer.exe", + "rpcss.exe", + "tabtip.exe", + "wineboot.exe", + "rundll32.exe", + "iexplore.exe", + "conhost.exe", + "crashpad_handler.exe", + "iscriptevaluator.exe", + "VC_redist.x86.exe", + "VC_redist.x64.exe", + "cmd.exe", + "REDEngineErrorReporter.exe", + "REDprelauncher.exe", + "SteamService.exe", + "start.exe", + }; + + pub fn init(allocator: std.mem.Allocator, power_profiles: *PowerProfiles, config: *const Config) ProfileManager { + return .{ + .allocator = allocator, + .profiles = std.ArrayList(Profile).init(allocator), + .proton_profile = null, + .queued_profiles = std.ArrayList(*const Profile).init(allocator), + .power_profiles = power_profiles, + .config = config, + }; + } + + pub fn activateProfile(self: *ProfileManager, profile: *const Profile) !void { + if (self.active_profile == null) { + std.log.info("Activating profile: {s}", .{profile.name}); + self.active_profile = profile; + + if (profile.performance_mode and self.power_profiles.isPerformanceAvailable()) { + std.log.info("Enabling performance mode for profile: {s}", .{profile.name}); + try self.power_profiles.enablePerformanceMode(); + } + + const effective_mode = if (self.config.vcache_mode != .none) + self.config.vcache_mode + else + profile.vcache_mode; + try vcache_setting.applyVCacheMode(effective_mode); + + // Apply scheduler settings, using global config override if set + const effective_sched = if (self.config.scx_sched != .none) + self.config.scx_sched + else + profile.scx_sched; + const effective_sched_mode = if (self.config.scx_sched != .none) + self.config.scx_sched_props + else + profile.scx_sched_props; + try scx_scheds.applyScheduler(self.allocator, effective_sched, effective_sched_mode); + } else { + std.log.info("Queueing profile: {s} (active: {s})", .{ profile.name, self.active_profile.?.name }); + try self.queued_profiles.append(profile); + } + } + + pub fn deactivateProfile(self: *ProfileManager, profile: *const Profile) !void { + if (self.active_profile == profile) { + std.log.info("Deactivating profile: {s}", .{profile.name}); + self.active_profile = null; + + if (profile.performance_mode) { + std.log.info("Disabling performance mode for profile: {s}", .{profile.name}); + try self.power_profiles.disablePerformanceMode(); + } + + try vcache_setting.applyVCacheMode(.none); + try scx_scheds.restorePreviousState(self.allocator); + + if (self.queued_profiles.items.len > 0) { + const next_profile = self.queued_profiles.orderedRemove(0); + std.log.info("Activating next queued profile: {s}", .{next_profile.name}); + try self.activateProfile(next_profile); + } + } else { + for (self.queued_profiles.items, 0..) |queued, i| { + if (queued == profile) { + std.log.info("Removing queued profile: {s}", .{profile.name}); + _ = self.queued_profiles.orderedRemove(i); + break; + } + } + } + } + + pub fn loadProfiles(self: *ProfileManager, oneshot: bool) !void { + if (oneshot) { + try self.profiles.append(Profile{ + .name = try self.allocator.dupe(u8, "Hades3.exe"), + }); + std.log.info("Loaded oneshot profile: Hades3.exe", .{}); + + try self.profiles.append(Profile{ + .name = try self.allocator.dupe(u8, "Proton"), + }); + std.log.info("Loaded oneshot profile: Proton", .{}); + + self.proton_profile = &self.profiles.items[1]; + } else { + var profiles = try confloader.loadConfDir(Profile, self.allocator, "/usr/share/falcond/profiles"); + defer profiles.deinit(); + + try self.profiles.appendSlice(profiles.items); + + for (self.profiles.items) |*profile| { + if (std.mem.eql(u8, profile.name, "Proton")) { + self.proton_profile = profile; + std.log.info("Found Proton profile: {s}", .{profile.name}); + break; + } + } + + std.log.info("Loaded {d} profiles", .{self.profiles.items.len}); + } + } + + fn isProtonParent(_: *const ProfileManager, arena: std.mem.Allocator, pid: []const u8) !bool { + var path_buf: [std.fs.max_path_bytes]u8 = undefined; + const status_path = try std.fmt.bufPrint(&path_buf, "/proc/{s}/status", .{pid}); + + const file = std.fs.openFileAbsolute(status_path, .{}) catch |err| { + std.log.debug("Failed to open {s}: {}", .{ status_path, err }); + return switch (err) { + error.AccessDenied, error.FileNotFound => false, + else => err, + }; + }; + defer file.close(); + + const content = try file.readToEndAlloc(arena, std.math.maxInt(usize)); + + const ppid_line = std.mem.indexOf(u8, content, "PPid:") orelse return false; + const line_end = std.mem.indexOfScalarPos(u8, content, ppid_line, '\n') orelse content.len; + const ppid_start = ppid_line + 5; // Length of "PPid:" + const ppid = std.mem.trim(u8, content[ppid_start..line_end], " \t"); + + const parent_cmdline_path = try std.fmt.bufPrint(&path_buf, "/proc/{s}/cmdline", .{ppid}); + const parent_file = std.fs.openFileAbsolute(parent_cmdline_path, .{}) catch |err| { + std.log.debug("Failed to open parent cmdline {s}: {}", .{ parent_cmdline_path, err }); + return switch (err) { + error.AccessDenied, error.FileNotFound => false, + else => err, + }; + }; + defer parent_file.close(); + + const parent_content = try parent_file.readToEndAlloc(arena, std.math.maxInt(usize)); + return std.mem.indexOf(u8, parent_content, "proton") != null; + } + + fn isProtonGame(self: *ProfileManager, arena: std.mem.Allocator, pid: []const u8, process_name: []const u8) !bool { + if (!std.mem.endsWith(u8, process_name, ".exe")) return false; + + for (system_processes) |sys_proc| { + if (std.mem.eql(u8, process_name, sys_proc)) { + return false; + } + } + + return try self.isProtonParent(arena, pid); + } + + pub fn matchProcess(self: *ProfileManager, arena: std.mem.Allocator, pid: []const u8, process_name: []const u8) !?*const Profile { + const is_exe = std.mem.endsWith(u8, process_name, ".exe"); + var match: ?*const Profile = null; + + for (self.profiles.items) |*profile| { + const is_match = profile != self.proton_profile and profile.matches(process_name); + if (is_match) { + std.log.info("Matched profile {s} for process {s}", .{ profile.name, process_name }); + match = profile; + break; + } + } + + const should_check_proton = match == null and + is_exe and + self.proton_profile != null; + + if (should_check_proton) { + const is_system = for (system_processes) |sys_proc| { + if (std.mem.eql(u8, process_name, sys_proc)) break true; + } else false; + + if (!is_system) { + const is_proton = try self.isProtonParent(arena, pid); + if (is_proton) { + std.log.info("Found Proton game: {s}", .{process_name}); + match = self.proton_profile; + } + } + } + + return match; + } + + pub fn deinit(self: *ProfileManager) void { + if (self.active_profile) |profile| { + if (profile.performance_mode) { + self.power_profiles.disablePerformanceMode() catch {}; + } + } + + for (self.profiles.items) |*profile| { + self.allocator.free(profile.name); + } + self.queued_profiles.deinit(); + self.profiles.deinit(); + } +}; diff --git a/falcond/src/scx_scheds.zig b/falcond/src/scx_scheds.zig new file mode 100644 index 0000000..7bc5dce --- /dev/null +++ b/falcond/src/scx_scheds.zig @@ -0,0 +1,224 @@ +const std = @import("std"); +const dbus = @import("dbus.zig"); + +pub const ScxError = dbus.DBusError; + +pub const ScxScheduler = enum { + bpfland, + central, + flash, + flatcg, + lavd, + layered, + nest, + pair, + qmap, + rlfifo, + rustland, + rusty, + sdt, + simple, + userland, + vder, + none, + + pub fn toScxName(self: ScxScheduler) []const u8 { + return switch (self) { + .none => "", + inline else => |tag| "scx_" ++ @tagName(tag), + }; + } + + pub fn fromString(str: []const u8) ScxError!ScxScheduler { + if (std.mem.eql(u8, str, "none")) return .none; + if (std.mem.eql(u8, str, "scx_none")) return .none; + if (std.mem.eql(u8, str, "unknown")) return .none; + if (std.mem.eql(u8, str, "scx_bpfland")) return .bpfland; + if (std.mem.eql(u8, str, "scx_central")) return .central; + if (std.mem.eql(u8, str, "scx_flash")) return .flash; + if (std.mem.eql(u8, str, "scx_flatcg")) return .flatcg; + if (std.mem.eql(u8, str, "scx_lavd")) return .lavd; + if (std.mem.eql(u8, str, "scx_layered")) return .layered; + if (std.mem.eql(u8, str, "scx_nest")) return .nest; + if (std.mem.eql(u8, str, "scx_pair")) return .pair; + if (std.mem.eql(u8, str, "scx_qmap")) return .qmap; + if (std.mem.eql(u8, str, "scx_rlfifo")) return .rlfifo; + if (std.mem.eql(u8, str, "scx_rustland")) return .rustland; + if (std.mem.eql(u8, str, "scx_rusty")) return .rusty; + if (std.mem.eql(u8, str, "scx_sdt")) return .sdt; + if (std.mem.eql(u8, str, "scx_simple")) return .simple; + if (std.mem.eql(u8, str, "scx_userland")) return .userland; + if (std.mem.eql(u8, str, "scx_vder")) return .vder; + return error.InvalidValue; + } +}; + +pub const ScxSchedModes = enum { + default, + power, + gaming, + latency, + server, +}; + +const PreviousState = struct { + scheduler: ?ScxScheduler = null, + mode: ?ScxSchedModes = null, +}; + +var previous_state: PreviousState = .{}; +var supported_schedulers: []ScxScheduler = undefined; +var allocator: std.mem.Allocator = undefined; + +pub fn init(alloc: std.mem.Allocator) !void { + allocator = alloc; + std.log.info("Initializing scheduler state", .{}); + + const sched_list = try getSupportedSchedulers(alloc); + defer alloc.free(sched_list); + + std.log.info("Supported schedulers:", .{}); + if (sched_list.len > 0) { + for (sched_list) |sched| { + std.log.info(" - {s}", .{sched.toScxName()}); + } + supported_schedulers = try alloc.dupe(ScxScheduler, sched_list); + } else { + supported_schedulers = &[_]ScxScheduler{}; + } +} + +pub fn deinit() void { + if (supported_schedulers.len > 0) { + allocator.free(supported_schedulers); + } +} + +const SCX_NAME = "org.scx.Loader"; +const SCX_PATH = "/org/scx/Loader"; +const SCX_IFACE = "org.scx.Loader"; + +fn modeToInt(mode: ScxSchedModes) u32 { + return switch (mode) { + .default => 0, + .power => 1, + .gaming => 2, + .latency => 3, + .server => 4, + }; +} + +fn intToMode(value: u32) ScxError!ScxSchedModes { + return switch (value) { + 0 => .default, + 1 => .power, + 2 => .gaming, + 3 => .latency, + 4 => .server, + else => error.InvalidValue, + }; +} + +pub fn getCurrentScheduler(alloc: std.mem.Allocator) !?ScxScheduler { + var dbus_conn = dbus.DBus.init(alloc, SCX_NAME, SCX_PATH, SCX_IFACE); + + const current = try dbus_conn.getProperty("CurrentScheduler"); + defer alloc.free(current); + + if (current.len == 0) return null; + return try ScxScheduler.fromString(current); +} + +pub fn getCurrentMode(alloc: std.mem.Allocator) !ScxSchedModes { + var dbus_conn = dbus.DBus.init(alloc, SCX_NAME, SCX_PATH, SCX_IFACE); + + const mode_str = try dbus_conn.getProperty("SchedulerMode"); + defer alloc.free(mode_str); + + if (mode_str.len == 0) return .default; + + const mode = try std.fmt.parseInt(u32, mode_str, 10); + return intToMode(mode); +} + +pub fn getSupportedSchedulers(alloc: std.mem.Allocator) ![]ScxScheduler { + var dbus_conn = dbus.DBus.init(alloc, SCX_NAME, SCX_PATH, SCX_IFACE); + + const schedulers = try dbus_conn.getPropertyArray("SupportedSchedulers"); + defer { + for (schedulers) |s| { + alloc.free(s); + } + alloc.free(schedulers); + } + + var result = try std.ArrayList(ScxScheduler).initCapacity(alloc, schedulers.len); + errdefer result.deinit(); + + for (schedulers) |s| { + const scheduler = try ScxScheduler.fromString(s); + try result.append(scheduler); + } + + return result.toOwnedSlice(); +} + +pub fn storePreviousState(alloc: std.mem.Allocator) !void { + std.log.info("Storing current scheduler state", .{}); + if (try getCurrentScheduler(alloc)) |scheduler| { + if (scheduler == .none) { + std.log.info("Current scheduler is none", .{}); + previous_state.scheduler = null; + previous_state.mode = null; + } else { + std.log.info("Storing current scheduler: {s}", .{scheduler.toScxName()}); + previous_state.scheduler = scheduler; + previous_state.mode = try getCurrentMode(alloc); + } + } else { + std.log.info("No current scheduler", .{}); + previous_state.scheduler = null; + previous_state.mode = null; + } +} + +pub fn activateScheduler(alloc: std.mem.Allocator, scheduler: ScxScheduler, mode: ScxSchedModes) ScxError!void { + var dbus_conn = dbus.DBus.init(alloc, SCX_NAME, SCX_PATH, SCX_IFACE); + + const mode_str = try std.fmt.allocPrint(alloc, "{d}", .{modeToInt(mode)}); + defer alloc.free(mode_str); + + const args = [_][]const u8{ + "su", + scheduler.toScxName(), + mode_str, + }; + + try dbus_conn.callMethod("SwitchScheduler", &args); +} + +pub fn applyScheduler(alloc: std.mem.Allocator, scheduler: ScxScheduler, mode: ScxSchedModes) ScxError!void { + std.log.info("Applying scheduler {s} with mode {s}", .{ scheduler.toScxName(), @tagName(mode) }); + + try storePreviousState(alloc); + try activateScheduler(alloc, scheduler, mode); +} + +pub fn deactivateScheduler(alloc: std.mem.Allocator) ScxError!void { + var dbus_conn = dbus.DBus.init(alloc, SCX_NAME, SCX_PATH, SCX_IFACE); + try dbus_conn.callMethod("StopScheduler", &[_][]const u8{}); +} + +pub fn restorePreviousState(alloc: std.mem.Allocator) ScxError!void { + if (previous_state.scheduler) |scheduler| { + if (scheduler == .none) { + std.log.info("Previous state was none, stopping scheduler", .{}); + try deactivateScheduler(alloc); + } else { + std.log.info("Restoring previous scheduler: {s}", .{scheduler.toScxName()}); + try activateScheduler(alloc, scheduler, previous_state.mode orelse .default); + } + previous_state.scheduler = null; + previous_state.mode = null; + } +} diff --git a/falcond/src/vcache_setting.zig b/falcond/src/vcache_setting.zig new file mode 100644 index 0000000..dc3917c --- /dev/null +++ b/falcond/src/vcache_setting.zig @@ -0,0 +1,42 @@ +const std = @import("std"); +const fs = std.fs; + +pub const VCacheMode = enum { + cache, + freq, + none, +}; + +const vcache_path = "/sys/bus/platform/drivers/amd_x3d_vcache/AMDI0101:00/amd_x3d_mode"; + +var previous_mode: ?[]const u8 = null; +var previous_mode_buffer: [10]u8 = undefined; + +pub fn applyVCacheMode(vcache_mode: VCacheMode) !void { + const file = fs.openFileAbsolute(vcache_path, .{ .mode = .read_write }) catch |err| switch (err) { + error.FileNotFound => return, + else => return err, + }; + defer file.close(); + + if (vcache_mode == .none) { + if (previous_mode) |mode| { + try file.writeAll(mode); + previous_mode = null; + } + return; + } + + const bytes_read = try file.readAll(previous_mode_buffer[0..]); + if (bytes_read > 0) { + previous_mode = previous_mode_buffer[0..bytes_read]; + } + + try file.seekTo(0); + + try file.writeAll(switch (vcache_mode) { + .freq => "frequency", + .cache => "cache", + .none => unreachable, + }); +} diff --git a/main.sh b/main.sh new file mode 100755 index 0000000..37e034a --- /dev/null +++ b/main.sh @@ -0,0 +1,23 @@ +#! /bin/bash + +set -e + +VERSION="1.0.0" + +source ./pika-build-config.sh + +echo "$PIKA_BUILD_ARCH" > pika-build-arch + +cd ./falcond + +# Get build deps +apt-get build-dep ./ -y + +# Build package +LOGNAME=root dh_make --createorig -y -l -p falcond_"$VERSION" || echo "dh-make: Ignoring Last Error" +dpkg-buildpackage --no-sign + +# Move the debs to output +cd ../ +mkdir -p ./output +mv ./*.deb ./output/ diff --git a/pika-build-config/amd64-v3.sh b/pika-build-config/amd64-v3.sh new file mode 100755 index 0000000..cd674a1 --- /dev/null +++ b/pika-build-config/amd64-v3.sh @@ -0,0 +1,10 @@ +#! /bin/bash +export PIKA_BUILD_ARCH="amd64-v3" +export DEBIAN_FRONTEND="noninteractive" +export DEB_BUILD_MAINT_OPTIONS="optimize=+lto -march=x86-64-v3 -O3 -flto=auto" +export DEB_CFLAGS_MAINT_APPEND="-march=x86-64-v3 -O3 -flto=auto" +export DEB_CPPFLAGS_MAINT_APPEND="-march=x86-64-v3 -O3 -flto=auto" +export DEB_CXXFLAGS_MAINT_APPEND="-march=x86-64-v3 -O3 -flto=auto" +export DEB_LDFLAGS_MAINT_APPEND="-march=x86-64-v3 -O3 -flto=auto" +export DEB_BUILD_OPTIONS="nocheck notest terse" +export DPKG_GENSYMBOLS_CHECK_LEVEL=0 diff --git a/pika-build-config/i386.sh b/pika-build-config/i386.sh new file mode 100755 index 0000000..7629d66 --- /dev/null +++ b/pika-build-config/i386.sh @@ -0,0 +1,5 @@ +#! /bin/bash +export PIKA_BUILD_ARCH="i386" +export DEBIAN_FRONTEND="noninteractive" +export DEB_BUILD_OPTIONS="nocheck notest terse" +export DPKG_GENSYMBOLS_CHECK_LEVEL=0 diff --git a/release.sh b/release.sh new file mode 100755 index 0000000..660f48f --- /dev/null +++ b/release.sh @@ -0,0 +1,2 @@ +# send debs to server +rsync -azP --include './' --include '*.deb' --exclude '*' ./output/ ferreo@direct.pika-os.com:/srv/www/cockatiel-incoming/