diff --git a/patches/0001-cachyos-base-all.patch b/patches/0001-cachyos-base-all.patch index 4b92bf6..8abb8ad 100644 --- a/patches/0001-cachyos-base-all.patch +++ b/patches/0001-cachyos-base-all.patch @@ -1,27 +1,27 @@ -From 77d9b13b6db0afead521713204ffc4dced7ad0f2 Mon Sep 17 00:00:00 2001 +From 9829288846e128cf9d409facdfb6df3f17bf7693 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 11 Nov 2024 09:17:13 +0100 -Subject: [PATCH 01/13] amd-cache-optimizer +Date: Mon, 18 Nov 2024 13:21:37 +0100 +Subject: [PATCH 01/12] amd-cache-optimizer Signed-off-by: Peter Jung --- - .../sysfs-bus-platform-drivers-amd_x3d_vcache | 14 ++ + .../sysfs-bus-platform-drivers-amd_x3d_vcache | 12 ++ MAINTAINERS | 8 + drivers/platform/x86/amd/Kconfig | 12 ++ drivers/platform/x86/amd/Makefile | 2 + - drivers/platform/x86/amd/x3d_vcache.c | 193 ++++++++++++++++++ - 5 files changed, 229 insertions(+) + drivers/platform/x86/amd/x3d_vcache.c | 176 ++++++++++++++++++ + 5 files changed, 210 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-bus-platform-drivers-amd_x3d_vcache create mode 100644 drivers/platform/x86/amd/x3d_vcache.c diff --git a/Documentation/ABI/testing/sysfs-bus-platform-drivers-amd_x3d_vcache b/Documentation/ABI/testing/sysfs-bus-platform-drivers-amd_x3d_vcache new file mode 100644 -index 000000000000..1aa6ed0c10d9 +index 000000000000..ac3431736f5c --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-platform-drivers-amd_x3d_vcache -@@ -0,0 +1,14 @@ -+What: /sys/bus/platform/drivers/amd_x3d_vcache/AMDI0101\:00/amd_x3d_mode -+Date: October 2024 +@@ -0,0 +1,12 @@ ++What: /sys/bus/platform/drivers/amd_x3d_vcache/AMDI0101:00/amd_x3d_mode ++Date: November 2024 +KernelVersion: 6.13 +Contact: Basavaraj Natikar +Description: (RW) AMD 3D V-Cache optimizer allows users to switch CPU core @@ -32,10 +32,8 @@ index 000000000000..1aa6ed0c10d9 + those in the slower CCD. + - "cache" cores within the larger L3 CCD are prioritized before + those in the smaller L3 CCD. -+ -+ Format: %s. diff --git a/MAINTAINERS b/MAINTAINERS -index 21fdaa19229a..5dc7d5839fe9 100644 +index b878ddc99f94..3456edbb7b86 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -965,6 +965,14 @@ Q: https://patchwork.kernel.org/project/linux-rdma/list/ @@ -54,12 +52,12 @@ index 21fdaa19229a..5dc7d5839fe9 100644 M: Yazen Ghannam L: linux-edac@vger.kernel.org diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig -index f88682d36447..d73f691020d0 100644 +index f88682d36447..d77600eacb05 100644 --- a/drivers/platform/x86/amd/Kconfig +++ b/drivers/platform/x86/amd/Kconfig -@@ -6,6 +6,18 @@ - source "drivers/platform/x86/amd/pmf/Kconfig" - source "drivers/platform/x86/amd/pmc/Kconfig" +@@ -19,6 +19,18 @@ config AMD_HSMP + If you choose to compile this driver as a module the module will be + called amd_hsmp. +config AMD_3D_VCACHE + tristate "AMD 3D V-Cache Performance Optimizer Driver" @@ -73,28 +71,28 @@ index f88682d36447..d73f691020d0 100644 + If you choose to compile this driver as a module the module will be + called amd_3d_vcache. + - config AMD_HSMP - tristate "AMD HSMP Driver" - depends on AMD_NB && X86_64 && ACPI + config AMD_WBRF + bool "AMD Wifi RF Band mitigations (WBRF)" + depends on ACPI diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile -index dcec0a46f8af..16e4cce02242 100644 +index dcec0a46f8af..86d73f3bd176 100644 --- a/drivers/platform/x86/amd/Makefile +++ b/drivers/platform/x86/amd/Makefile @@ -4,6 +4,8 @@ # AMD x86 Platform-Specific Drivers # -+obj-$(CONFIG_AMD_3D_VCACHE) += amd_3d_vcache.o -+amd_3d_vcache-objs := x3d_vcache.o ++obj-$(CONFIG_AMD_3D_VCACHE) += amd_3d_vcache.o ++amd_3d_vcache-objs := x3d_vcache.o obj-$(CONFIG_AMD_PMC) += pmc/ amd_hsmp-y := hsmp.o obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o diff --git a/drivers/platform/x86/amd/x3d_vcache.c b/drivers/platform/x86/amd/x3d_vcache.c new file mode 100644 -index 000000000000..679613d02b9a +index 000000000000..0f6d3c54d879 --- /dev/null +++ b/drivers/platform/x86/amd/x3d_vcache.c -@@ -0,0 +1,193 @@ +@@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD 3D V-Cache Performance Optimizer Driver @@ -105,24 +103,26 @@ index 000000000000..679613d02b9a + * Authors: Basavaraj Natikar + * Perry Yuan + * Mario Limonciello -+ * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include ++#include +#include +#include +#include +#include +#include ++#include ++#include ++#include + +static char *x3d_mode = "frequency"; -+module_param(x3d_mode, charp, 0444); ++module_param(x3d_mode, charp, 0); +MODULE_PARM_DESC(x3d_mode, "Initial 3D-VCache mode; 'frequency' (default) or 'cache'"); + +#define DSM_REVISION_ID 0 -+#define DSM_GET_FUNCS_SUPPORTED 0 +#define DSM_SET_X3D_MODE 1 + +static guid_t x3d_guid = GUID_INIT(0xdff8e55f, 0xbcfd, 0x46fb, 0xba, 0x0a, @@ -146,6 +146,13 @@ index 000000000000..679613d02b9a + enum amd_x3d_mode_type curr_mode; +}; + ++static int amd_x3d_get_mode(struct amd_x3d_dev *data) ++{ ++ guard(mutex)(&data->lock); ++ ++ return data->curr_mode; ++} ++ +static int amd_x3d_mode_switch(struct amd_x3d_dev *data, int new_state) +{ + union acpi_object *out, argv; @@ -154,8 +161,8 @@ index 000000000000..679613d02b9a + argv.type = ACPI_TYPE_INTEGER; + argv.integer.value = new_state; + -+ out = acpi_evaluate_dsm(data->ahandle, &x3d_guid, DSM_REVISION_ID, DSM_SET_X3D_MODE, -+ &argv); ++ out = acpi_evaluate_dsm(data->ahandle, &x3d_guid, DSM_REVISION_ID, ++ DSM_SET_X3D_MODE, &argv); + if (!out) { + dev_err(data->dev, "failed to evaluate _DSM\n"); + return -EINVAL; @@ -163,7 +170,7 @@ index 000000000000..679613d02b9a + + data->curr_mode = new_state; + -+ ACPI_FREE(out); ++ kfree(out); + + return 0; +} @@ -175,24 +182,22 @@ index 000000000000..679613d02b9a + int ret; + + ret = sysfs_match_string(amd_x3d_mode_strings, buf); -+ if (ret < 0) { -+ dev_err(dev, "no matching mode to set %s\n", buf); ++ if (ret < 0) + return ret; -+ } + + ret = amd_x3d_mode_switch(data, ret); ++ if (ret < 0) ++ return ret; + -+ return ret ? ret : count; ++ return count; +} + +static ssize_t amd_x3d_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct amd_x3d_dev *data = dev_get_drvdata(dev); ++ int mode = amd_x3d_get_mode(data); + -+ if (data->curr_mode > MODE_INDEX_CACHE || data->curr_mode < MODE_INDEX_FREQ) -+ return -EINVAL; -+ -+ return sysfs_emit(buf, "%s\n", amd_x3d_mode_strings[data->curr_mode]); ++ return sysfs_emit(buf, "%s\n", amd_x3d_mode_strings[mode]); +} +static DEVICE_ATTR_RW(amd_x3d_mode); + @@ -202,43 +207,24 @@ index 000000000000..679613d02b9a +}; +ATTRIBUTE_GROUPS(amd_x3d); + -+static int amd_x3d_supported(struct amd_x3d_dev *data) ++static int amd_x3d_resume_handler(struct device *dev) +{ -+ union acpi_object *out; ++ struct amd_x3d_dev *data = dev_get_drvdata(dev); ++ int ret = amd_x3d_get_mode(data); + -+ out = acpi_evaluate_dsm(data->ahandle, &x3d_guid, DSM_REVISION_ID, -+ DSM_GET_FUNCS_SUPPORTED, NULL); -+ if (!out) { -+ dev_err(data->dev, "failed to evaluate _DSM\n"); -+ return -ENODEV; -+ } -+ -+ if (out->type != ACPI_TYPE_BUFFER) { -+ dev_err(data->dev, "invalid type %d\n", out->type); -+ ACPI_FREE(out); -+ return -EINVAL; -+ } -+ -+ ACPI_FREE(out); -+ return 0; ++ return amd_x3d_mode_switch(data, ret); +} + ++static DEFINE_SIMPLE_DEV_PM_OPS(amd_x3d_pm, NULL, amd_x3d_resume_handler); ++ +static const struct acpi_device_id amd_x3d_acpi_ids[] = { + {"AMDI0101"}, + { }, +}; +MODULE_DEVICE_TABLE(acpi, amd_x3d_acpi_ids); + -+static void amd_x3d_remove(void *context) -+{ -+ struct amd_x3d_dev *data = context; -+ -+ mutex_destroy(&data->lock); -+} -+ +static int amd_x3d_probe(struct platform_device *pdev) +{ -+ const struct acpi_device_id *id; + struct amd_x3d_dev *data; + acpi_handle handle; + int ret; @@ -247,33 +233,27 @@ index 000000000000..679613d02b9a + if (!handle) + return -ENODEV; + -+ id = acpi_match_device(amd_x3d_acpi_ids, &pdev->dev); -+ if (!id) -+ dev_err_probe(&pdev->dev, -ENODEV, "unable to match ACPI ID and data\n"); ++ if (!acpi_check_dsm(handle, &x3d_guid, DSM_REVISION_ID, BIT(DSM_SET_X3D_MODE))) ++ return -ENODEV; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = &pdev->dev; ++ ++ ret = devm_mutex_init(data->dev, &data->lock); ++ if (ret) ++ return ret; ++ + data->ahandle = handle; + platform_set_drvdata(pdev, data); + -+ ret = amd_x3d_supported(data); -+ if (ret) -+ dev_err_probe(&pdev->dev, ret, "not supported on this platform\n"); -+ + ret = match_string(amd_x3d_mode_strings, ARRAY_SIZE(amd_x3d_mode_strings), x3d_mode); + if (ret < 0) + return dev_err_probe(&pdev->dev, -EINVAL, "invalid mode %s\n", x3d_mode); + -+ mutex_init(&data->lock); -+ -+ ret = amd_x3d_mode_switch(data, ret); -+ if (ret < 0) -+ return ret; -+ -+ return devm_add_action_or_reset(&pdev->dev, amd_x3d_remove, data); ++ return amd_x3d_mode_switch(data, ret); +} + +static struct platform_driver amd_3d_vcache_driver = { @@ -281,6 +261,7 @@ index 000000000000..679613d02b9a + .name = "amd_x3d_vcache", + .dev_groups = amd_x3d_groups, + .acpi_match_table = amd_x3d_acpi_ids, ++ .pm = pm_sleep_ptr(&amd_x3d_pm), + }, + .probe = amd_x3d_probe, +}; @@ -291,10 +272,10 @@ index 000000000000..679613d02b9a -- 2.47.0 -From 54c4f598ee011b1f701bdc2a924e9930fbf10962 Mon Sep 17 00:00:00 2001 +From 64f207fa646bd3a493a4e8930b5a52ed40288a54 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 11 Nov 2024 09:17:26 +0100 -Subject: [PATCH 02/13] amd-pstate +Date: Mon, 18 Nov 2024 13:21:50 +0100 +Subject: [PATCH 02/12] amd-pstate Signed-off-by: Peter Jung --- @@ -1137,10 +1118,10 @@ index dd4682857c12..23698d0f4bb4 100644 -- 2.47.0 -From 6c34d83a13cc89085c20da699633ac1f6b612596 Mon Sep 17 00:00:00 2001 +From 2a829d44283c52f7d5ae3026bb693b9496b99b54 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 11 Nov 2024 09:17:40 +0100 -Subject: [PATCH 03/13] autofdo +Date: Mon, 18 Nov 2024 13:22:13 +0100 +Subject: [PATCH 03/12] autofdo Signed-off-by: Peter Jung --- @@ -1521,7 +1502,7 @@ index 000000000000..92195958e3db + + $ make LLVM=1 CLANG_AUTOFDO_PROFILE= CLANG_PROPELLER_PROFILE_PREFIX= diff --git a/MAINTAINERS b/MAINTAINERS -index 5dc7d5839fe9..3d4709c29704 100644 +index 3456edbb7b86..97802662e8d8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3674,6 +3674,13 @@ F: kernel/audit* @@ -1553,7 +1534,7 @@ index 5dc7d5839fe9..3d4709c29704 100644 M: Petr Mladek R: Steven Rostedt diff --git a/Makefile b/Makefile -index 79192a3024bf..e619df4e09b8 100644 +index 68a8faff2543..5ccec99bf086 100644 --- a/Makefile +++ b/Makefile @@ -1018,6 +1018,8 @@ include-$(CONFIG_KMSAN) += scripts/Makefile.kmsan @@ -1632,7 +1613,7 @@ index d317a843f7ea..f1b86eb30340 100644 SCHED_TEXT LOCK_TEXT diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 16354dfa6d96..89b8fc452a7c 100644 +index 7b9a7e8f39ac..f127d0f1024e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -126,6 +126,8 @@ config X86 @@ -1645,7 +1626,7 @@ index 16354dfa6d96..89b8fc452a7c 100644 select ARCH_USE_CMPXCHG_LOCKREF if X86_CMPXCHG64 select ARCH_USE_MEMTEST diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S -index b8c5741d2fb4..cf22081601ed 100644 +index feb8102a9ca7..bc497a67d363 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -443,6 +443,10 @@ SECTIONS @@ -1883,10 +1864,10 @@ index 3d27983dc908..6f64d611faea 100644 -- 2.47.0 -From 9f4066f41c5d80b408109ea740488da2cca89fcc Mon Sep 17 00:00:00 2001 +From 26f905b0fe8e4c5aa80548f4b568fbc578a583d1 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 11 Nov 2024 09:17:53 +0100 -Subject: [PATCH 04/13] bbr3 +Date: Mon, 18 Nov 2024 13:22:27 +0100 +Subject: [PATCH 04/12] bbr3 Signed-off-by: Peter Jung --- @@ -5269,10 +5250,10 @@ index 79064580c8c0..697270ce1ea6 100644 -- 2.47.0 -From d87383343350575ce203091b2001bde085b12fc9 Mon Sep 17 00:00:00 2001 +From f4ec451a5b3565cd1d9a64cf7775d47c207b2200 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 11 Nov 2024 09:18:05 +0100 -Subject: [PATCH 05/13] cachy +Date: Mon, 18 Nov 2024 13:22:41 +0100 +Subject: [PATCH 05/12] cachy Signed-off-by: Peter Jung --- @@ -5311,6 +5292,7 @@ Signed-off-by: Peter Jung include/linux/wait.h | 2 + init/Kconfig | 26 + kernel/Kconfig.hz | 24 + + kernel/Kconfig.preempt | 2 +- kernel/fork.c | 14 + kernel/locking/rwsem.c | 4 +- kernel/sched/fair.c | 13 + @@ -5320,20 +5302,21 @@ Signed-off-by: Peter Jung kernel/user_namespace.c | 7 + mm/Kconfig | 2 +- mm/compaction.c | 4 + + mm/huge_memory.c | 4 + mm/page-writeback.c | 8 + mm/page_alloc.c | 4 + mm/swap.c | 5 + mm/vmpressure.c | 4 + mm/vmscan.c | 8 + net/ipv4/inet_connection_sock.c | 2 +- - 50 files changed, 5073 insertions(+), 64 deletions(-) + 52 files changed, 5078 insertions(+), 65 deletions(-) create mode 100644 drivers/media/v4l2-core/v4l2loopback.c create mode 100644 drivers/media/v4l2-core/v4l2loopback.h create mode 100644 drivers/media/v4l2-core/v4l2loopback_formats.h create mode 100644 drivers/pci/controller/intel-nvme-remap.c diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 1666576acc0e..5b0b02e6988a 100644 +index d401577b5a6a..e6ec15a89924 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2248,6 +2248,9 @@ @@ -5363,7 +5346,7 @@ index 1666576acc0e..5b0b02e6988a 100644 Safety option to keep boot IRQs enabled. This should never be necessary. diff --git a/Makefile b/Makefile -index e619df4e09b8..7223a0d87413 100644 +index 5ccec99bf086..5c6151566fd3 100644 --- a/Makefile +++ b/Makefile @@ -801,11 +801,19 @@ KBUILD_CFLAGS += -fno-delete-null-pointer-checks @@ -5825,10 +5808,10 @@ index 2a7279d80460..f5849153b385 100644 # # P6_NOPs are a relatively minor optimization that require a family >= diff --git a/arch/x86/Makefile b/arch/x86/Makefile -index cd75e78a06c1..396d1db12bca 100644 +index 5b773b34768d..74e94c9ba198 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile -@@ -181,15 +181,96 @@ else +@@ -182,15 +182,96 @@ else cflags-$(CONFIG_MK8) += -march=k8 cflags-$(CONFIG_MPSC) += -march=nocona cflags-$(CONFIG_MCORE2) += -march=core2 @@ -6172,10 +6155,10 @@ index 97c2d4f15d76..5a3af44d785a 100644 This driver adds a CPUFreq driver which utilizes a fine grain processor performance frequency control range instead of legacy diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c -index cd2ac1ba53d2..ac3647df1431 100644 +index 400337f3b572..d413b60c6001 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c -@@ -3820,6 +3820,8 @@ static int __init intel_pstate_setup(char *str) +@@ -3817,6 +3817,8 @@ static int __init intel_pstate_setup(char *str) if (!strcmp(str, "disable")) no_load = 1; @@ -6240,7 +6223,7 @@ index df17e79c45c7..e454488c1a31 100644 + endmenu diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c -index 07e9ce99694f..cf966e8f61fa 100644 +index 8d97f17ffe66..27b3d03bfdcd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4473,7 +4473,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) @@ -6266,10 +6249,10 @@ index ebabfe3a512f..4d3ebcaacca1 100644 * * AMD driver supports pre-defined mathematical functions for transferring diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c -index a2cf2c066a76..285f5a045ca5 100644 +index 288be19db7c1..9d48d01d5217 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c -@@ -474,7 +474,7 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) +@@ -473,7 +473,7 @@ static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) } #endif @@ -6278,7 +6261,7 @@ index a2cf2c066a76..285f5a045ca5 100644 /** * dm_crtc_additional_color_mgmt - enable additional color properties * @crtc: DRM CRTC -@@ -556,7 +556,7 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { +@@ -555,7 +555,7 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { #if defined(CONFIG_DEBUG_FS) .late_register = amdgpu_dm_crtc_late_register, #endif @@ -6287,7 +6270,7 @@ index a2cf2c066a76..285f5a045ca5 100644 .atomic_set_property = amdgpu_dm_atomic_crtc_set_property, .atomic_get_property = amdgpu_dm_atomic_crtc_get_property, #endif -@@ -735,7 +735,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, +@@ -734,7 +734,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); @@ -6342,10 +6325,10 @@ index d5d6ab484e5a..dccba7bcdf97 100644 } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c -index ee1bcfaae3e3..3388604f222b 100644 +index 80e60ea2d11e..51dea35848f6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c -@@ -2785,7 +2785,10 @@ int smu_get_power_limit(void *handle, +@@ -2775,7 +2775,10 @@ int smu_get_power_limit(void *handle, *limit = smu->max_power_limit; break; case SMU_PPT_LIMIT_MIN: @@ -6357,7 +6340,7 @@ index ee1bcfaae3e3..3388604f222b 100644 break; default: return -EINVAL; -@@ -2809,7 +2812,14 @@ static int smu_set_power_limit(void *handle, uint32_t limit) +@@ -2799,7 +2802,14 @@ static int smu_set_power_limit(void *handle, uint32_t limit) if (smu->ppt_funcs->set_power_limit) return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); @@ -10976,6 +10959,19 @@ index 38ef6d06888e..0f78364efd4f 100644 default 1000 if HZ_1000 config SCHED_HRTICK +diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt +index fe782cd77388..a69475f2bca7 100644 +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt +@@ -69,7 +69,7 @@ config PREEMPT + + config PREEMPT_RT + bool "Fully Preemptible Kernel (Real-Time)" +- depends on EXPERT && ARCH_SUPPORTS_RT ++ depends on ARCH_SUPPORTS_RT + select PREEMPTION + help + This option turns the kernel into a real-time kernel by replacing diff --git a/kernel/fork.c b/kernel/fork.c index 22f43721d031..8287afdd01d2 100644 --- a/kernel/fork.c @@ -11075,10 +11071,10 @@ index 2d16c8545c71..54e7c4c3e2c5 100644 #ifdef CONFIG_NUMA_BALANCING /* Restrict the NUMA promotion throughput (MB/s) for each target node. */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 6c54a57275cc..f610df2e0811 100644 +index c03b3d7b320e..c5d6012794de 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -2815,7 +2815,7 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); +@@ -2816,7 +2816,7 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags); @@ -11206,6 +11202,22 @@ index a2b16b08cbbf..48d611e58ad3 100644 static int sysctl_extfrag_threshold = 500; static int __read_mostly sysctl_compact_memory; +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 5734d5d5060f..af595df5b65f 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -65,7 +65,11 @@ unsigned long transparent_hugepage_flags __read_mostly = + #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE + (1<> (20 - PAGE_SHIFT); /* Use a smaller cluster for small-memory machines */ -@@ -1105,4 +1109,5 @@ void __init swap_setup(void) +@@ -1091,4 +1095,5 @@ void __init swap_setup(void) * Right now other parts of the system means that we * _really_ don't want to cluster much more */ @@ -11331,10 +11343,10 @@ index 2b698f8419fe..fd039c41d1c8 100644 -- 2.47.0 -From cb33f67ae0f185239bebf9bd3491e5c671c72df0 Mon Sep 17 00:00:00 2001 +From 51fb944d4a6a9614b40bfd6faf5c9874cf8c714c Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 11 Nov 2024 09:18:19 +0100 -Subject: [PATCH 06/13] crypto +Date: Mon, 18 Nov 2024 13:23:23 +0100 +Subject: [PATCH 06/12] crypto Signed-off-by: Peter Jung --- @@ -12936,48 +12948,49 @@ index bbcff1fb78cb..752812bc4991 100644 -- 2.47.0 -From 10b2f8b54a3363a982c4d021ed29a191bea5c0b3 Mon Sep 17 00:00:00 2001 +From e6de96946ecba842fbe80d4bd92801f00bb7bf76 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 11 Nov 2024 09:20:20 +0100 -Subject: [PATCH 07/13] fixes +Date: Mon, 18 Nov 2024 13:24:27 +0100 +Subject: [PATCH 07/12] fixes Signed-off-by: Peter Jung --- arch/Kconfig | 4 +- - arch/x86/kernel/cpu/amd.c | 11 ++ - arch/x86/mm/tlb.c | 22 ++-- - drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 5 + - drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 108 ++++++------------ - drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 11 +- - .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 20 ++-- - .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 20 ++-- - .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 21 ++-- - .../gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 17 +-- - .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 17 +-- - .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 33 +++--- - .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 21 ++-- - .../drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c | 24 ++-- - drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 8 -- - drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h | 2 - - drivers/gpu/drm/drm_edid.c | 47 +++++++- + arch/x86/kernel/alternative.c | 10 +- + arch/x86/mm/tlb.c | 20 +-- + drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 + + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 30 ++++ + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 5 +- + drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 156 +++++++++++------- + drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 15 +- + .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 146 ++++++++-------- + .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 146 ++++++++-------- + .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 145 ++++++++-------- + .../gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 41 ++--- + .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 43 ++--- + .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 143 ++++++++-------- + .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 117 +++++++------ + .../drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c | 144 ++++++++-------- + drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 30 ++++ + drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h | 5 + + drivers/gpu/drm/drm_edid.c | 47 +++++- drivers/misc/lkdtm/bugs.c | 2 +- fs/ntfs3/attrib.c | 9 +- - fs/ntfs3/bitmap.c | 62 +++------- - fs/ntfs3/file.c | 34 +++--- - fs/ntfs3/frecord.c | 104 +++-------------- + fs/ntfs3/bitmap.c | 62 ++----- + fs/ntfs3/file.c | 34 ++-- + fs/ntfs3/frecord.c | 104 ++---------- fs/ntfs3/fsntfs.c | 2 +- fs/ntfs3/ntfs_fs.h | 3 +- - fs/ntfs3/record.c | 16 ++- - fs/ntfs3/run.c | 40 +++++-- - include/linux/compiler_attributes.h | 13 --- + fs/ntfs3/record.c | 16 +- + fs/ntfs3/run.c | 40 +++-- + include/linux/compiler_attributes.h | 13 -- include/linux/compiler_types.h | 19 +++ include/linux/mm_types.h | 1 + - init/Kconfig | 8 ++ - kernel/sched/core.c | 46 +++++--- - kernel/sched/sched.h | 5 + + init/Kconfig | 8 + + kernel/workqueue.c | 22 ++- lib/overflow_kunit.c | 2 +- scripts/package/PKGBUILD | 5 + - 34 files changed, 376 insertions(+), 386 deletions(-) + 35 files changed, 865 insertions(+), 726 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 00551f340dbe..833b2344ce79 100644 @@ -13001,41 +13014,43 @@ index 00551f340dbe..833b2344ce79 100644 depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS help This value can be used to select the number of bits to use to -diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index fab5caec0b72..823f44f7bc94 100644 ---- a/arch/x86/kernel/cpu/amd.c -+++ b/arch/x86/kernel/cpu/amd.c -@@ -924,6 +924,17 @@ static void init_amd_zen4(struct cpuinfo_x86 *c) - { - if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) - msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); -+ -+ /* -+ * These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE -+ * in some BIOS versions but they can lead to random host reboots. -+ */ -+ switch (c->x86_model) { -+ case 0x18 ... 0x1f: -+ case 0x60 ... 0x7f: -+ clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD); -+ break; -+ } +diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c +index d17518ca19b8..8b66a555d2f0 100644 +--- a/arch/x86/kernel/alternative.c ++++ b/arch/x86/kernel/alternative.c +@@ -1825,11 +1825,18 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm) + return temp_state; } - static void init_amd_zen5(struct cpuinfo_x86 *c) ++__ro_after_init struct mm_struct *poking_mm; ++__ro_after_init unsigned long poking_addr; ++ + static inline void unuse_temporary_mm(temp_mm_state_t prev_state) + { + lockdep_assert_irqs_disabled(); ++ + switch_mm_irqs_off(NULL, prev_state.mm, current); + ++ /* Clear the cpumask, to indicate no TLB flushing is needed anywhere */ ++ cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm)); ++ + /* + * Restore the breakpoints if they were disabled before the temporary mm + * was loaded. +@@ -1838,9 +1845,6 @@ static inline void unuse_temporary_mm(temp_mm_state_t prev_state) + hw_breakpoint_restore(); + } + +-__ro_after_init struct mm_struct *poking_mm; +-__ro_after_init unsigned long poking_addr; +- + static void text_poke_memcpy(void *dst, const void *src, size_t len) + { + memcpy(dst, src, len); diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c -index 86593d1b787d..1aac4fa90d3d 100644 +index 86593d1b787d..9d0d34576928 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c -@@ -568,7 +568,7 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, - * mm_cpumask. The TLB shootdown code can figure out from - * cpu_tlbstate_shared.is_lazy whether or not to send an IPI. - */ -- if (WARN_ON_ONCE(prev != &init_mm && -+ if (IS_ENABLED(CONFIG_DEBUG_VM) && WARN_ON_ONCE(prev != &init_mm && - !cpumask_test_cpu(cpu, mm_cpumask(next)))) - cpumask_set_cpu(cpu, mm_cpumask(next)); - @@ -606,18 +606,15 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, cond_mitigation(tsk); @@ -13074,11 +13089,66 @@ index 86593d1b787d..1aac4fa90d3d 100644 } if (unlikely(loaded_mm == &init_mm)) +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h +index 7617963901fa..9726c4a5842a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h +@@ -273,6 +273,8 @@ extern int amdgpu_agp; + + extern int amdgpu_wbrf; + ++extern struct workqueue_struct *amdgpu_reclaim_wq; ++ + #define AMDGPU_VM_MAX_NUM_CTX 4096 + #define AMDGPU_SG_THRESHOLD (256*1024*1024) + #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -index 852e6f315576..f6a6fc6a4f5c 100644 +index 852e6f315576..ebc13f056153 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -@@ -3078,6 +3078,11 @@ static int __init amdgpu_init(void) +@@ -260,6 +260,8 @@ struct amdgpu_watchdog_timer amdgpu_watchdog_timer = { + .period = 0x0, /* default to 0x0 (timeout disable) */ + }; + ++struct workqueue_struct *amdgpu_reclaim_wq; ++ + /** + * DOC: ignore_min_pcap (int) + * Ignore the minimum power cap. +@@ -3056,6 +3058,21 @@ static struct pci_driver amdgpu_kms_pci_driver = { + .dev_groups = amdgpu_sysfs_groups, + }; + ++static int amdgpu_wq_init(void) ++{ ++ amdgpu_reclaim_wq = ++ alloc_workqueue("amdgpu-reclaim", WQ_MEM_RECLAIM, 0); ++ if (!amdgpu_reclaim_wq) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static void amdgpu_wq_fini(void) ++{ ++ destroy_workqueue(amdgpu_reclaim_wq); ++} ++ + static int __init amdgpu_init(void) + { + int r; +@@ -3063,6 +3080,10 @@ static int __init amdgpu_init(void) + if (drm_firmware_drivers_only()) + return -EINVAL; + ++ r = amdgpu_wq_init(); ++ if (r) ++ goto error_wq; ++ + r = amdgpu_sync_init(); + if (r) + goto error_sync; +@@ -3078,6 +3099,11 @@ static int __init amdgpu_init(void) /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ amdgpu_amdkfd_init(); @@ -13090,43 +13160,82 @@ index 852e6f315576..f6a6fc6a4f5c 100644 /* let modprobe override vga console setting */ return pci_register_driver(&amdgpu_kms_pci_driver); +@@ -3085,6 +3111,9 @@ static int __init amdgpu_init(void) + amdgpu_sync_fini(); + + error_sync: ++ amdgpu_wq_fini(); ++ ++error_wq: + return r; + } + +@@ -3096,6 +3125,7 @@ static void __exit amdgpu_exit(void) + amdgpu_acpi_release(); + amdgpu_sync_fini(); + amdgpu_fence_slab_fini(); ++ amdgpu_wq_fini(); + mmu_notifier_synchronize(); + amdgpu_xcp_drv_release(); + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +index f1ffab5a1eae..15614e43be5a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +@@ -800,8 +800,9 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) + AMD_IP_BLOCK_TYPE_GFX, true)) + adev->gfx.gfx_off_state = true; + } else { +- schedule_delayed_work(&adev->gfx.gfx_off_delay_work, +- delay); ++ queue_delayed_work(amdgpu_reclaim_wq, ++ &adev->gfx.gfx_off_delay_work, ++ delay); + } + } + } else { diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c -index 3388604f222b..daa870302cc3 100644 +index 51dea35848f6..95f61b48373e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c -@@ -1257,42 +1257,18 @@ static int smu_sw_init(void *handle) +@@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit); + static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); + static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); + static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); ++static void smu_power_profile_mode_get(struct smu_context *smu, ++ enum PP_SMC_POWER_PROFILE profile_mode); ++static void smu_power_profile_mode_put(struct smu_context *smu, ++ enum PP_SMC_POWER_PROFILE profile_mode); + + static int smu_sys_get_pp_feature_mask(void *handle, + char *buf) +@@ -1257,35 +1261,19 @@ static int smu_sw_init(void *handle) INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); atomic64_set(&smu->throttle_int_counter, 0); smu->watermarks_bitmap = 0; - smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; -- smu->user_dpm_profile.user_workload_mask = 0; atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); -- smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; -- smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; -- smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; -- smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3; -- smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4; -- smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; -- smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; +- smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; +- smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; +- smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; +- smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; +- smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; +- smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; +- smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; - if (smu->is_apu || -- !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) { -- smu->driver_workload_mask = -- 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; -- } else { -- smu->driver_workload_mask = -- 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; -- smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; -- } + !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) +- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; ++ smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + else +- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; - -- smu->workload_mask = smu->driver_workload_mask | -- smu->user_dpm_profile.user_workload_mask; - smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; - smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; - smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; @@ -13134,33 +13243,66 @@ index 3388604f222b..daa870302cc3 100644 - smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; - smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; - smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; -+ !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) -+ smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; -+ else + smu->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; ++ smu_power_profile_mode_get(smu, smu->power_profile_mode); + smu->display_config = &adev->pm.pm_display_cfg; smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; -@@ -2232,24 +2208,23 @@ static int smu_enable_umd_pstate(void *handle, +@@ -2113,6 +2101,9 @@ static int smu_suspend(void *handle) + if (!ret) + adev->gfx.gfx_off_entrycount = count; + ++ /* clear this on suspend so it will get reprogrammed on resume */ ++ smu->workload_mask = 0; ++ + return 0; } - static int smu_bump_power_profile_mode(struct smu_context *smu, +@@ -2224,26 +2215,46 @@ static int smu_enable_umd_pstate(void *handle, + return 0; + } + +-static int smu_bump_power_profile_mode(struct smu_context *smu, - long *param, - uint32_t param_size) -+ long *param, -+ uint32_t param_size, -+ bool enable) ++static int smu_bump_power_profile_mode(struct smu_context *smu) { - int ret = 0; +- int ret = 0; ++ u32 workload_mask = 0; ++ int i, ret = 0; ++ ++ for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { ++ if (smu->workload_refcount[i]) ++ workload_mask |= 1 << i; ++ } ++ ++ if (smu->workload_mask == workload_mask) ++ return 0; if (smu->ppt_funcs->set_power_profile_mode) - ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); -+ ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size, enable); ++ ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask); ++ ++ if (!ret) ++ smu->workload_mask = workload_mask; return ret; } ++static void smu_power_profile_mode_get(struct smu_context *smu, ++ enum PP_SMC_POWER_PROFILE profile_mode) ++{ ++ smu->workload_refcount[profile_mode]++; ++} ++ ++static void smu_power_profile_mode_put(struct smu_context *smu, ++ enum PP_SMC_POWER_PROFILE profile_mode) ++{ ++ if (smu->workload_refcount[profile_mode]) ++ smu->workload_refcount[profile_mode]--; ++} ++ static int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, - bool skip_display_settings, @@ -13169,10 +13311,11 @@ index 3388604f222b..daa870302cc3 100644 { int ret = 0; - int index = 0; - long workload[1]; +- long workload[1]; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); -@@ -2287,13 +2262,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, + if (!skip_display_settings) { +@@ -2280,14 +2291,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, } if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && @@ -13180,16 +13323,16 @@ index 3388604f222b..daa870302cc3 100644 - index = fls(smu->workload_mask); - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload[0] = smu->workload_setting[index]; -+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { -+ workload[0] = smu->power_profile_mode; - +- - if (init || smu->power_profile_mode != workload[0]) - smu_bump_power_profile_mode(smu, workload, 0); -+ smu_bump_power_profile_mode(smu, workload, 0, true); - } +- } ++ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ++ smu_bump_power_profile_mode(smu); return ret; -@@ -2313,13 +2285,13 @@ static int smu_handle_task(struct smu_context *smu, + } +@@ -2306,13 +2311,13 @@ static int smu_handle_task(struct smu_context *smu, ret = smu_pre_display_config_changed(smu); if (ret) return ret; @@ -13206,7 +13349,7 @@ index 3388604f222b..daa870302cc3 100644 break; default: break; -@@ -2341,12 +2313,11 @@ static int smu_handle_dpm_task(void *handle, +@@ -2334,12 +2339,11 @@ static int smu_handle_dpm_task(void *handle, static int smu_switch_power_profile(void *handle, enum PP_SMC_POWER_PROFILE type, @@ -13215,365 +13358,776 @@ index 3388604f222b..daa870302cc3 100644 { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - long workload[1]; +- long workload[1]; - uint32_t index; ++ int ret; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; -@@ -2354,24 +2325,15 @@ static int smu_switch_power_profile(void *handle, +@@ -2347,21 +2351,21 @@ static int smu_switch_power_profile(void *handle, if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) return -EINVAL; - if (!en) { -- smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]); +- smu->workload_mask &= ~(1 << smu->workload_prority[type]); - index = fls(smu->workload_mask); - index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload[0] = smu->workload_setting[index]; - } else { -- smu->driver_workload_mask |= (1 << smu->workload_priority[type]); +- smu->workload_mask |= (1 << smu->workload_prority[type]); - index = fls(smu->workload_mask); - index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; - workload[0] = smu->workload_setting[index]; - } -+ /* don't disable the user's preference */ -+ if (!enable && type == smu->power_profile_mode) -+ return 0; - -- smu->workload_mask = smu->driver_workload_mask | -- smu->user_dpm_profile.user_workload_mask; -+ workload[0] = type; - +- if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && - smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) - smu_bump_power_profile_mode(smu, workload, 0); -+ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) -+ smu_bump_power_profile_mode(smu, workload, 0, enable); ++ smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { ++ if (enable) ++ smu_power_profile_mode_get(smu, type); ++ else ++ smu_power_profile_mode_put(smu, type); ++ ret = smu_bump_power_profile_mode(smu); ++ if (ret) { ++ if (enable) ++ smu_power_profile_mode_put(smu, type); ++ else ++ smu_power_profile_mode_get(smu, type); ++ return ret; ++ } ++ } return 0; } -@@ -3069,21 +3031,25 @@ static int smu_set_power_profile_mode(void *handle, +@@ -3059,12 +3063,48 @@ static int smu_set_power_profile_mode(void *handle, uint32_t param_size) { struct smu_context *smu = handle; -- int ret; -+ long workload[1]; -+ int ret = 0; ++ bool custom_changed = false; ++ int ret = 0, i; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; -- if (smu->user_dpm_profile.user_workload_mask & -- (1 << smu->workload_priority[param[param_size]])) -- return 0; -- -- smu->user_dpm_profile.user_workload_mask = -- (1 << smu->workload_priority[param[param_size]]); -- smu->workload_mask = smu->user_dpm_profile.user_workload_mask | -- smu->driver_workload_mask; -- ret = smu_bump_power_profile_mode(smu, param, param_size); -+ if (param[param_size] != smu->power_profile_mode) { +- return smu_bump_power_profile_mode(smu, param, param_size); ++ if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) { ++ if (param_size > SMU_BACKEND_MAX_CUSTOM_PARAMETERS) ++ return -EINVAL; ++ /* param_size is actually a max index, not an array size */ ++ for (i = 0; i <= param_size; i++) { ++ if (smu->custom_profile_input[i] != param[i]) { ++ custom_changed = true; ++ break; ++ } ++ } ++ } ++ ++ if ((param[param_size] != smu->power_profile_mode) || custom_changed) { ++ /* save the parameters for custom */ ++ if (custom_changed) { ++ /* param_size is actually a max index, not an array size */ ++ for (i = 0; i <= param_size; i++) ++ smu->custom_profile_input[i] = param[i]; ++ smu->custom_profile_size = param_size; ++ /* clear frontend mask so custom changes propogate */ ++ smu->workload_mask = 0; ++ } + /* clear the old user preference */ -+ workload[0] = smu->power_profile_mode; -+ ret = smu_bump_power_profile_mode(smu, workload, 0, false); -+ if (ret) -+ return ret; ++ smu_power_profile_mode_put(smu, smu->power_profile_mode); + /* set the new user preference */ -+ ret = smu_bump_power_profile_mode(smu, param, param_size, true); -+ if (!ret) ++ smu_power_profile_mode_get(smu, param[param_size]); ++ ret = smu_bump_power_profile_mode(smu); ++ if (ret) ++ smu_power_profile_mode_put(smu, param[param_size]); ++ else + /* store the user's preference */ + smu->power_profile_mode = param[param_size]; + } - - return ret; ++ ++ return ret; } + + static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h -index d60d9a12a47e..fc54b2c6ede8 100644 +index b44a185d07e8..cd71663462ff 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h -@@ -240,7 +240,6 @@ struct smu_user_dpm_profile { - /* user clock state information */ - uint32_t clk_mask[SMU_CLK_COUNT]; - uint32_t clk_dependency; -- uint32_t user_workload_mask; - }; +@@ -509,6 +509,8 @@ enum smu_fw_status { + */ + #define SMU_WBRF_EVENT_HANDLING_PACE 10 - #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ -@@ -557,12 +556,10 @@ struct smu_context { ++#define SMU_BACKEND_MAX_CUSTOM_PARAMETERS 11 ++ + struct smu_context { + struct amdgpu_device *adev; + struct amdgpu_irq_src irq_source; +@@ -556,11 +558,14 @@ struct smu_context { uint32_t hard_min_uclk_req_from_dal; bool disable_uclk_switch; -+ /* backend specific workload mask */ ++ /* asic agnostic workload mask */ uint32_t workload_mask; -- uint32_t driver_workload_mask; -- uint32_t workload_priority[WORKLOAD_POLICY_MAX]; +- uint32_t workload_prority[WORKLOAD_POLICY_MAX]; - uint32_t workload_setting[WORKLOAD_POLICY_MAX]; + /* default/user workload preference */ uint32_t power_profile_mode; - uint32_t default_power_profile_mode; ++ uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT]; ++ /* backend specific custom workload settings */ ++ long custom_profile_input[SMU_BACKEND_MAX_CUSTOM_PARAMETERS]; ++ bool custom_profile_size; bool pm_enabled; bool is_apu; -@@ -734,8 +731,10 @@ struct pptable_funcs { +@@ -731,9 +736,9 @@ struct pptable_funcs { + * @set_power_profile_mode: Set a power profile mode. Also used to * create/set custom power profile modes. * &input: Power profile mode parameters. - * &size: Size of &input. -+ * &enable: enable/disable the profile +- * &size: Size of &input. ++ * &workload_mask: mask of workloads to enable */ - int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); -+ int (*set_power_profile_mode)(struct smu_context *smu, long *input, -+ uint32_t size, bool enable); ++ int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask); /** * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c -index 31fe512028f4..ac7fbb815644 100644 +index c0f6b59369b7..2d56ece1861f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c -@@ -1443,7 +1443,8 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, +@@ -1441,98 +1441,96 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu, + return size; + } - static int arcturus_set_power_profile_mode(struct smu_context *smu, - long *input, +-static int arcturus_set_power_profile_mode(struct smu_context *smu, +- long *input, - uint32_t size) -+ uint32_t size, -+ bool enable) ++static int arcturus_set_power_profile_mode_coeff(struct smu_context *smu, ++ long *input, ++ uint32_t size) { DpmActivityMonitorCoeffInt_t activity_monitor; - int workload_type = 0; -@@ -1455,8 +1456,9 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, +- int workload_type = 0; +- uint32_t profile_mode = input[size]; +- int ret = 0; ++ int ret; + +- if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { +- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); ++ if (size != 10) return -EINVAL; ++ ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, ++ WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor), ++ false); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); ++ return ret; } ++ switch (input[0]) { ++ case 0: /* Gfxclk */ ++ activity_monitor.Gfx_FPS = input[1]; ++ activity_monitor.Gfx_UseRlcBusy = input[2]; ++ activity_monitor.Gfx_MinActiveFreqType = input[3]; ++ activity_monitor.Gfx_MinActiveFreq = input[4]; ++ activity_monitor.Gfx_BoosterFreqType = input[5]; ++ activity_monitor.Gfx_BoosterFreq = input[6]; ++ activity_monitor.Gfx_PD_Data_limit_c = input[7]; ++ activity_monitor.Gfx_PD_Data_error_coeff = input[8]; ++ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; ++ break; ++ case 1: /* Uclk */ ++ activity_monitor.Mem_FPS = input[1]; ++ activity_monitor.Mem_UseRlcBusy = input[2]; ++ activity_monitor.Mem_MinActiveFreqType = input[3]; ++ activity_monitor.Mem_MinActiveFreq = input[4]; ++ activity_monitor.Mem_BoosterFreqType = input[5]; ++ activity_monitor.Mem_BoosterFreq = input[6]; ++ activity_monitor.Mem_PD_Data_limit_c = input[7]; ++ activity_monitor.Mem_PD_Data_error_coeff = input[8]; ++ activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; ++ break; ++ default: ++ return -EINVAL; ++ } + - if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && - (smu->smc_fw_version >= 0x360d00)) { -+ if (enable && -+ (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && -+ (smu->smc_fw_version >= 0x360d00)) { - if (size != 10) - return -EINVAL; +- if (size != 10) +- return -EINVAL; ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, ++ WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor), ++ true); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); ++ return ret; ++ } -@@ -1520,18 +1522,18 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, - return -EINVAL; +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, +- WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor), +- false); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); +- return ret; +- } ++ return ret; ++} + +- switch (input[0]) { +- case 0: /* Gfxclk */ +- activity_monitor.Gfx_FPS = input[1]; +- activity_monitor.Gfx_UseRlcBusy = input[2]; +- activity_monitor.Gfx_MinActiveFreqType = input[3]; +- activity_monitor.Gfx_MinActiveFreq = input[4]; +- activity_monitor.Gfx_BoosterFreqType = input[5]; +- activity_monitor.Gfx_BoosterFreq = input[6]; +- activity_monitor.Gfx_PD_Data_limit_c = input[7]; +- activity_monitor.Gfx_PD_Data_error_coeff = input[8]; +- activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; +- break; +- case 1: /* Uclk */ +- activity_monitor.Mem_FPS = input[1]; +- activity_monitor.Mem_UseRlcBusy = input[2]; +- activity_monitor.Mem_MinActiveFreqType = input[3]; +- activity_monitor.Mem_MinActiveFreq = input[4]; +- activity_monitor.Mem_BoosterFreqType = input[5]; +- activity_monitor.Mem_BoosterFreq = input[6]; +- activity_monitor.Mem_PD_Data_limit_c = input[7]; +- activity_monitor.Mem_PD_Data_error_coeff = input[8]; +- activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; +- break; +- default: +- return -EINVAL; +- } ++static int arcturus_set_power_profile_mode(struct smu_context *smu, ++ u32 workload_mask) ++{ ++ u32 backend_workload_mask = 0; ++ bool custom_enabled = false; ++ int ret; + +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, +- WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor), +- true); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); +- return ret; +- } +- } ++ smu_cmn_get_backend_workload_mask(smu, workload_mask, ++ &backend_workload_mask, ++ &custom_enabled); + +- /* +- * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT +- * Not all profile modes are supported on arcturus. +- */ +- workload_type = smu_cmn_to_asic_specific_index(smu, +- CMN2ASIC_MAPPING_WORKLOAD, +- profile_mode); +- if (workload_type < 0) { +- dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode); +- return -EINVAL; ++ if (custom_enabled) { ++ ret = arcturus_set_power_profile_mode_coeff(smu, ++ smu->custom_profile_input, ++ smu->custom_profile_size); ++ if (ret) ++ return ret; } -+ if (enable) -+ smu->workload_mask |= (1 << workload_type); -+ else -+ smu->workload_mask &= ~(1 << workload_type); ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - smu->workload_mask, - NULL); -- if (ret) { -+ if (ret) - dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); -- return ret; -- } -- -- smu_cmn_assign_power_profile(smu); +- SMU_MSG_SetWorkloadMask, +- 1 << workload_type, +- NULL); ++ SMU_MSG_SetWorkloadMask, ++ backend_workload_mask, ++ NULL); + if (ret) { +- dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); ++ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", ++ workload_mask); + return ret; + } +- smu->power_profile_mode = profile_mode; +- - return 0; + return ret; } static int arcturus_set_performance_level(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c -index 12223f507977..656df9fce471 100644 +index 16af1a329621..72e30a3d0242 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c -@@ -2004,19 +2004,19 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) +@@ -2004,87 +2004,99 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) return size; } -static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) -+static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, -+ uint32_t size, bool enable) ++static int navi10_set_power_profile_mode_coeff(struct smu_context *smu, ++ long *input, ++ uint32_t size) { DpmActivityMonitorCoeffInt_t activity_monitor; - int workload_type, ret = 0; -+ uint32_t profile_mode = input[size]; +- int workload_type, ret = 0; ++ int ret; - smu->power_profile_mode = input[size]; -- ++ if (size != 10) ++ return -EINVAL; + - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); -+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { -+ dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor), false); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); ++ return ret; ++ } ++ ++ switch (input[0]) { ++ case 0: /* Gfxclk */ ++ activity_monitor.Gfx_FPS = input[1]; ++ activity_monitor.Gfx_MinFreqStep = input[2]; ++ activity_monitor.Gfx_MinActiveFreqType = input[3]; ++ activity_monitor.Gfx_MinActiveFreq = input[4]; ++ activity_monitor.Gfx_BoosterFreqType = input[5]; ++ activity_monitor.Gfx_BoosterFreq = input[6]; ++ activity_monitor.Gfx_PD_Data_limit_c = input[7]; ++ activity_monitor.Gfx_PD_Data_error_coeff = input[8]; ++ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; ++ break; ++ case 1: /* Socclk */ ++ activity_monitor.Soc_FPS = input[1]; ++ activity_monitor.Soc_MinFreqStep = input[2]; ++ activity_monitor.Soc_MinActiveFreqType = input[3]; ++ activity_monitor.Soc_MinActiveFreq = input[4]; ++ activity_monitor.Soc_BoosterFreqType = input[5]; ++ activity_monitor.Soc_BoosterFreq = input[6]; ++ activity_monitor.Soc_PD_Data_limit_c = input[7]; ++ activity_monitor.Soc_PD_Data_error_coeff = input[8]; ++ activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; ++ break; ++ case 2: /* Memclk */ ++ activity_monitor.Mem_FPS = input[1]; ++ activity_monitor.Mem_MinFreqStep = input[2]; ++ activity_monitor.Mem_MinActiveFreqType = input[3]; ++ activity_monitor.Mem_MinActiveFreq = input[4]; ++ activity_monitor.Mem_BoosterFreqType = input[5]; ++ activity_monitor.Mem_BoosterFreq = input[6]; ++ activity_monitor.Mem_PD_Data_limit_c = input[7]; ++ activity_monitor.Mem_PD_Data_error_coeff = input[8]; ++ activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; ++ break; ++ default: return -EINVAL; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { -+ if (enable && profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 10) - return -EINVAL; +- if (size != 10) +- return -EINVAL; ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor), true); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); ++ return ret; ++ } -@@ -2078,16 +2078,18 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor), false); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); +- return ret; +- } ++ return ret; ++} + +- switch (input[0]) { +- case 0: /* Gfxclk */ +- activity_monitor.Gfx_FPS = input[1]; +- activity_monitor.Gfx_MinFreqStep = input[2]; +- activity_monitor.Gfx_MinActiveFreqType = input[3]; +- activity_monitor.Gfx_MinActiveFreq = input[4]; +- activity_monitor.Gfx_BoosterFreqType = input[5]; +- activity_monitor.Gfx_BoosterFreq = input[6]; +- activity_monitor.Gfx_PD_Data_limit_c = input[7]; +- activity_monitor.Gfx_PD_Data_error_coeff = input[8]; +- activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; +- break; +- case 1: /* Socclk */ +- activity_monitor.Soc_FPS = input[1]; +- activity_monitor.Soc_MinFreqStep = input[2]; +- activity_monitor.Soc_MinActiveFreqType = input[3]; +- activity_monitor.Soc_MinActiveFreq = input[4]; +- activity_monitor.Soc_BoosterFreqType = input[5]; +- activity_monitor.Soc_BoosterFreq = input[6]; +- activity_monitor.Soc_PD_Data_limit_c = input[7]; +- activity_monitor.Soc_PD_Data_error_coeff = input[8]; +- activity_monitor.Soc_PD_Data_error_rate_coeff = input[9]; +- break; +- case 2: /* Memclk */ +- activity_monitor.Mem_FPS = input[1]; +- activity_monitor.Mem_MinFreqStep = input[2]; +- activity_monitor.Mem_MinActiveFreqType = input[3]; +- activity_monitor.Mem_MinActiveFreq = input[4]; +- activity_monitor.Mem_BoosterFreqType = input[5]; +- activity_monitor.Mem_BoosterFreq = input[6]; +- activity_monitor.Mem_PD_Data_limit_c = input[7]; +- activity_monitor.Mem_PD_Data_error_coeff = input[8]; +- activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; +- break; +- default: +- return -EINVAL; +- } ++static int navi10_set_power_profile_mode(struct smu_context *smu, ++ u32 workload_mask) ++{ ++ u32 backend_workload_mask = 0; ++ bool custom_enabled = false; ++ int ret; + +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor), true); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); ++ smu_cmn_get_backend_workload_mask(smu, workload_mask, ++ &backend_workload_mask, ++ &custom_enabled); ++ ++ if (custom_enabled) { ++ ret = navi10_set_power_profile_mode_coeff(smu, ++ smu->custom_profile_input, ++ smu->custom_profile_size); ++ if (ret) + return ret; +- } + } + +- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ +- workload_type = smu_cmn_to_asic_specific_index(smu, +- CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); -+ profile_mode); - if (workload_type < 0) - return -EINVAL; - -+ if (enable) -+ smu->workload_mask |= (1 << workload_type); -+ else -+ smu->workload_mask &= ~(1 << workload_type); +- if (workload_type < 0) +- return -EINVAL; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - smu->workload_mask, NULL); - if (ret) - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); -- else -- smu_cmn_assign_power_profile(smu); +- 1 << workload_type, NULL); +- if (ret) +- dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); ++ backend_workload_mask, NULL); ++ if (ret) { ++ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", ++ workload_mask); ++ return ret; ++ } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c -index 3b7b2ec8319a..289cba0f741e 100644 +index 9c3c48297cba..4945a3dda73e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c -@@ -1706,22 +1706,23 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * +@@ -1706,90 +1706,101 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char * return size; } -static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) -+static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, -+ long *input, uint32_t size, -+ bool enable) ++static int sienna_cichlid_set_power_profile_mode_coeff(struct smu_context *smu, ++ long *input, uint32_t size) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); -+ uint32_t profile_mode = input[size]; - int workload_type, ret = 0; +- int workload_type, ret = 0; ++ int ret; ++ ++ if (size != 10) ++ return -EINVAL; - smu->power_profile_mode = input[size]; -- ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor_external), false); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); ++ return ret; ++ } + - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); -+ if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { -+ dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); ++ switch (input[0]) { ++ case 0: /* Gfxclk */ ++ activity_monitor->Gfx_FPS = input[1]; ++ activity_monitor->Gfx_MinFreqStep = input[2]; ++ activity_monitor->Gfx_MinActiveFreqType = input[3]; ++ activity_monitor->Gfx_MinActiveFreq = input[4]; ++ activity_monitor->Gfx_BoosterFreqType = input[5]; ++ activity_monitor->Gfx_BoosterFreq = input[6]; ++ activity_monitor->Gfx_PD_Data_limit_c = input[7]; ++ activity_monitor->Gfx_PD_Data_error_coeff = input[8]; ++ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9]; ++ break; ++ case 1: /* Socclk */ ++ activity_monitor->Fclk_FPS = input[1]; ++ activity_monitor->Fclk_MinFreqStep = input[2]; ++ activity_monitor->Fclk_MinActiveFreqType = input[3]; ++ activity_monitor->Fclk_MinActiveFreq = input[4]; ++ activity_monitor->Fclk_BoosterFreqType = input[5]; ++ activity_monitor->Fclk_BoosterFreq = input[6]; ++ activity_monitor->Fclk_PD_Data_limit_c = input[7]; ++ activity_monitor->Fclk_PD_Data_error_coeff = input[8]; ++ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9]; ++ break; ++ case 2: /* Memclk */ ++ activity_monitor->Mem_FPS = input[1]; ++ activity_monitor->Mem_MinFreqStep = input[2]; ++ activity_monitor->Mem_MinActiveFreqType = input[3]; ++ activity_monitor->Mem_MinActiveFreq = input[4]; ++ activity_monitor->Mem_BoosterFreqType = input[5]; ++ activity_monitor->Mem_BoosterFreq = input[6]; ++ activity_monitor->Mem_PD_Data_limit_c = input[7]; ++ activity_monitor->Mem_PD_Data_error_coeff = input[8]; ++ activity_monitor->Mem_PD_Data_error_rate_coeff = input[9]; ++ break; ++ default: return -EINVAL; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { -+ if (enable && profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 10) - return -EINVAL; +- if (size != 10) +- return -EINVAL; ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor_external), true); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); ++ return ret; ++ } -@@ -1783,16 +1784,18 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor_external), false); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); +- return ret; +- } ++ return ret; ++} + +- switch (input[0]) { +- case 0: /* Gfxclk */ +- activity_monitor->Gfx_FPS = input[1]; +- activity_monitor->Gfx_MinFreqStep = input[2]; +- activity_monitor->Gfx_MinActiveFreqType = input[3]; +- activity_monitor->Gfx_MinActiveFreq = input[4]; +- activity_monitor->Gfx_BoosterFreqType = input[5]; +- activity_monitor->Gfx_BoosterFreq = input[6]; +- activity_monitor->Gfx_PD_Data_limit_c = input[7]; +- activity_monitor->Gfx_PD_Data_error_coeff = input[8]; +- activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9]; +- break; +- case 1: /* Socclk */ +- activity_monitor->Fclk_FPS = input[1]; +- activity_monitor->Fclk_MinFreqStep = input[2]; +- activity_monitor->Fclk_MinActiveFreqType = input[3]; +- activity_monitor->Fclk_MinActiveFreq = input[4]; +- activity_monitor->Fclk_BoosterFreqType = input[5]; +- activity_monitor->Fclk_BoosterFreq = input[6]; +- activity_monitor->Fclk_PD_Data_limit_c = input[7]; +- activity_monitor->Fclk_PD_Data_error_coeff = input[8]; +- activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9]; +- break; +- case 2: /* Memclk */ +- activity_monitor->Mem_FPS = input[1]; +- activity_monitor->Mem_MinFreqStep = input[2]; +- activity_monitor->Mem_MinActiveFreqType = input[3]; +- activity_monitor->Mem_MinActiveFreq = input[4]; +- activity_monitor->Mem_BoosterFreqType = input[5]; +- activity_monitor->Mem_BoosterFreq = input[6]; +- activity_monitor->Mem_PD_Data_limit_c = input[7]; +- activity_monitor->Mem_PD_Data_error_coeff = input[8]; +- activity_monitor->Mem_PD_Data_error_rate_coeff = input[9]; +- break; +- default: +- return -EINVAL; +- } ++static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, ++ u32 workload_mask) ++{ ++ u32 backend_workload_mask = 0; ++ bool custom_enabled = false; ++ int ret; + +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor_external), true); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); ++ smu_cmn_get_backend_workload_mask(smu, workload_mask, ++ &backend_workload_mask, ++ &custom_enabled); ++ ++ if (custom_enabled) { ++ ret = sienna_cichlid_set_power_profile_mode_coeff(smu, ++ smu->custom_profile_input, ++ smu->custom_profile_size); ++ if (ret) + return ret; +- } + } + +- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ +- workload_type = smu_cmn_to_asic_specific_index(smu, +- CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); -+ profile_mode); - if (workload_type < 0) - return -EINVAL; - -+ if (enable) -+ smu->workload_mask |= (1 << workload_type); -+ else -+ smu->workload_mask &= ~(1 << workload_type); +- if (workload_type < 0) +- return -EINVAL; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - smu->workload_mask, NULL); - if (ret) - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); -- else -- smu_cmn_assign_power_profile(smu); +- 1 << workload_type, NULL); +- if (ret) +- dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); ++ backend_workload_mask, NULL); ++ if (ret) { ++ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", ++ workload_mask); ++ return ret; ++ } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c -index 952ee22cbc90..a123ae7809ec 100644 +index 1fe020f1f4db..85e2f926087b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c -@@ -1054,7 +1054,8 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu, +@@ -1054,42 +1054,27 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu, return size; } -static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) -+static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, -+ uint32_t size, bool enable) ++static int vangogh_set_power_profile_mode(struct smu_context *smu, ++ u32 workload_mask) { - int workload_type, ret; - uint32_t profile_mode = input[size]; -@@ -1065,7 +1066,7 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, - } +- int workload_type, ret; +- uint32_t profile_mode = input[size]; ++ u32 backend_workload_mask = 0; ++ bool custom_enabled = false; ++ int ret; - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || -- profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) -+ profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ -@@ -1078,18 +1079,18 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, - return -EINVAL; - } - -+ if (enable) -+ smu->workload_mask |= (1 << workload_type); -+ else -+ smu->workload_mask &= ~(1 << workload_type); - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - smu->workload_mask, - NULL); -- if (ret) { -+ if (ret) - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", - workload_type); -- return ret; +- if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { +- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); +- return -EINVAL; - } - -- smu_cmn_assign_power_profile(smu); +- if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || +- profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) +- return 0; +- +- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ +- workload_type = smu_cmn_to_asic_specific_index(smu, +- CMN2ASIC_MAPPING_WORKLOAD, +- profile_mode); +- if (workload_type < 0) { +- dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n", +- profile_mode); +- return -EINVAL; +- } ++ smu_cmn_get_backend_workload_mask(smu, workload_mask, ++ &backend_workload_mask, ++ &custom_enabled); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, +- 1 << workload_type, +- NULL); ++ backend_workload_mask, ++ NULL); + if (ret) { +- dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", +- workload_type); ++ dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n", ++ workload_mask); + return ret; + } + +- smu->power_profile_mode = profile_mode; +- - return 0; + return ret; } static int vangogh_set_soft_freq_limited_range(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c -index 62316a6707ef..25779abc5447 100644 +index cc0504b063fa..70dd631c46dc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c -@@ -862,7 +862,8 @@ static int renoir_force_clk_levels(struct smu_context *smu, +@@ -862,44 +862,27 @@ static int renoir_force_clk_levels(struct smu_context *smu, return ret; } -static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) -+static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, -+ uint32_t size, bool enable) ++static int renoir_set_power_profile_mode(struct smu_context *smu, ++ u32 workload_mask) { - int workload_type, ret; - uint32_t profile_mode = input[size]; -@@ -873,7 +874,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u - } +- int workload_type, ret; +- uint32_t profile_mode = input[size]; ++ int ret; ++ u32 backend_workload_mask = 0; ++ bool custom_enabled = false; - if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || -- profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) -+ profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) - return 0; - - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ -@@ -889,17 +890,17 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u - return -EINVAL; - } - -+ if (enable) -+ smu->workload_mask |= (1 << workload_type); -+ else -+ smu->workload_mask &= ~(1 << workload_type); - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - smu->workload_mask, - NULL); -- if (ret) { -+ if (ret) - dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); -- return ret; +- if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { +- dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); +- return -EINVAL; - } +- +- if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || +- profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) +- return 0; +- +- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ +- workload_type = smu_cmn_to_asic_specific_index(smu, +- CMN2ASIC_MAPPING_WORKLOAD, +- profile_mode); +- if (workload_type < 0) { +- /* +- * TODO: If some case need switch to powersave/default power mode +- * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving. +- */ +- dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode); +- return -EINVAL; +- } ++ smu_cmn_get_backend_workload_mask(smu, workload_mask, ++ &backend_workload_mask, ++ &custom_enabled); -- smu_cmn_assign_power_profile(smu); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, +- 1 << workload_type, +- NULL); ++ backend_workload_mask, ++ NULL); + if (ret) { +- dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); ++ dev_err_once(smu->adev->dev, "Failed to set workload mask 0x08%x\n", ++ workload_mask); + return ret; + } + +- smu->power_profile_mode = profile_mode; - - return 0; + return ret; @@ -13581,237 +14135,552 @@ index 62316a6707ef..25779abc5447 100644 static int renoir_set_peak_clock_by_device(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c -index 5dd7ceca64fe..6861267b68fb 100644 +index d53e162dcd8d..dc08f8fd0f31 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c -@@ -2479,22 +2479,22 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu, +@@ -2477,82 +2477,76 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu, + return size; + } - static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, - long *input, +-static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, +- long *input, - uint32_t size) -+ uint32_t size, -+ bool enable) ++static int smu_v13_0_0_set_power_profile_mode_coeff(struct smu_context *smu, ++ long *input, ++ uint32_t size) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); -+ uint32_t profile_mode = input[size]; - int workload_type, ret = 0; - u32 workload_mask; - -- smu->power_profile_mode = input[size]; +- int workload_type, ret = 0; +- u32 workload_mask, selected_workload_mask; - +- smu->power_profile_mode = input[size]; ++ int ret; + - if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); -+ if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { -+ dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); ++ if (size != 9) return -EINVAL; +- } +- +- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { +- if (size != 9) +- return -EINVAL; + +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, +- WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor_external), +- false); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); +- return ret; +- } ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, ++ WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor_external), ++ false); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); ++ return ret; ++ } + +- switch (input[0]) { +- case 0: /* Gfxclk */ +- activity_monitor->Gfx_FPS = input[1]; +- activity_monitor->Gfx_MinActiveFreqType = input[2]; +- activity_monitor->Gfx_MinActiveFreq = input[3]; +- activity_monitor->Gfx_BoosterFreqType = input[4]; +- activity_monitor->Gfx_BoosterFreq = input[5]; +- activity_monitor->Gfx_PD_Data_limit_c = input[6]; +- activity_monitor->Gfx_PD_Data_error_coeff = input[7]; +- activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; +- break; +- case 1: /* Fclk */ +- activity_monitor->Fclk_FPS = input[1]; +- activity_monitor->Fclk_MinActiveFreqType = input[2]; +- activity_monitor->Fclk_MinActiveFreq = input[3]; +- activity_monitor->Fclk_BoosterFreqType = input[4]; +- activity_monitor->Fclk_BoosterFreq = input[5]; +- activity_monitor->Fclk_PD_Data_limit_c = input[6]; +- activity_monitor->Fclk_PD_Data_error_coeff = input[7]; +- activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; +- break; +- default: +- return -EINVAL; +- } ++ switch (input[0]) { ++ case 0: /* Gfxclk */ ++ activity_monitor->Gfx_FPS = input[1]; ++ activity_monitor->Gfx_MinActiveFreqType = input[2]; ++ activity_monitor->Gfx_MinActiveFreq = input[3]; ++ activity_monitor->Gfx_BoosterFreqType = input[4]; ++ activity_monitor->Gfx_BoosterFreq = input[5]; ++ activity_monitor->Gfx_PD_Data_limit_c = input[6]; ++ activity_monitor->Gfx_PD_Data_error_coeff = input[7]; ++ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; ++ break; ++ case 1: /* Fclk */ ++ activity_monitor->Fclk_FPS = input[1]; ++ activity_monitor->Fclk_MinActiveFreqType = input[2]; ++ activity_monitor->Fclk_MinActiveFreq = input[3]; ++ activity_monitor->Fclk_BoosterFreqType = input[4]; ++ activity_monitor->Fclk_BoosterFreq = input[5]; ++ activity_monitor->Fclk_PD_Data_limit_c = input[6]; ++ activity_monitor->Fclk_PD_Data_error_coeff = input[7]; ++ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; ++ break; ++ default: ++ return -EINVAL; ++ } + +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, +- WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor_external), +- true); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); +- return ret; +- } ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, ++ WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor_external), ++ true); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); ++ return ret; } -- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { -+ if (enable && profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 9) - return -EINVAL; - -@@ -2547,13 +2547,18 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, +- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ +- workload_type = smu_cmn_to_asic_specific_index(smu, +- CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); -+ profile_mode); ++ return ret; ++} - if (workload_type < 0) - return -EINVAL; +- if (workload_type < 0) +- return -EINVAL; ++static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, ++ u32 workload_mask) ++{ ++ u32 backend_workload_mask = 0; ++ bool custom_enabled = false; ++ int workload_type, ret; - workload_mask = 1 << workload_type; +- selected_workload_mask = workload_mask = 1 << workload_type; ++ smu_cmn_get_backend_workload_mask(smu, workload_mask, ++ &backend_workload_mask, ++ &custom_enabled); -+ if (enable) -+ smu->workload_mask |= workload_mask; -+ else -+ smu->workload_mask &= ~workload_mask; -+ /* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */ if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) && - ((smu->adev->pm.fw_version == 0x004e6601) || -@@ -2564,25 +2569,13 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, +@@ -2564,15 +2558,26 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, CMN2ASIC_MAPPING_WORKLOAD, PP_SMC_POWER_PROFILE_POWERSAVING); if (workload_type >= 0) - workload_mask |= 1 << workload_type; -+ smu->workload_mask |= 1 << workload_type; ++ backend_workload_mask |= 1 << workload_type; ++ } ++ ++ if (custom_enabled) { ++ ret = smu_v13_0_0_set_power_profile_mode_coeff(smu, ++ smu->custom_profile_input, ++ smu->custom_profile_size); ++ if (ret) ++ return ret; } -- smu->workload_mask |= workload_mask; ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - smu->workload_mask, - NULL); -- if (!ret) { -- smu_cmn_assign_power_profile(smu); -- if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) { -- workload_type = smu_cmn_to_asic_specific_index(smu, -- CMN2ASIC_MAPPING_WORKLOAD, -- PP_SMC_POWER_PROFILE_FULLSCREEN3D); -- smu->power_profile_mode = smu->workload_mask & (1 << workload_type) -- ? PP_SMC_POWER_PROFILE_FULLSCREEN3D -- : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; -- } -- } +- SMU_MSG_SetWorkloadMask, +- workload_mask, +- NULL); +- if (!ret) +- smu->workload_mask = selected_workload_mask; ++ SMU_MSG_SetWorkloadMask, ++ backend_workload_mask, ++ NULL); ++ if (ret) { ++ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", ++ workload_mask); ++ return ret; ++ } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c -index 9d0b19419de0..bf1f8e63e228 100644 +index b891a5e0a396..a10e66a691ec 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c -@@ -2434,22 +2434,23 @@ do { \ +@@ -2434,78 +2434,87 @@ do { \ return result; } -static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) -+static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, -+ long *input, uint32_t size, -+ bool enable) ++static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu, ++ long *input, uint32_t size) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); -+ uint32_t profile_mode = input[size]; - int workload_type, ret = 0; +- int workload_type, ret = 0; ++ int ret; - smu->power_profile_mode = input[size]; -- ++ if (size != 8) ++ return -EINVAL; + - if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); -+ if (profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) { -+ dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor_external), false); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); ++ return ret; ++ } ++ ++ switch (input[0]) { ++ case 0: /* Gfxclk */ ++ activity_monitor->Gfx_ActiveHystLimit = input[1]; ++ activity_monitor->Gfx_IdleHystLimit = input[2]; ++ activity_monitor->Gfx_FPS = input[3]; ++ activity_monitor->Gfx_MinActiveFreqType = input[4]; ++ activity_monitor->Gfx_BoosterFreqType = input[5]; ++ activity_monitor->Gfx_MinActiveFreq = input[6]; ++ activity_monitor->Gfx_BoosterFreq = input[7]; ++ break; ++ case 1: /* Fclk */ ++ activity_monitor->Fclk_ActiveHystLimit = input[1]; ++ activity_monitor->Fclk_IdleHystLimit = input[2]; ++ activity_monitor->Fclk_FPS = input[3]; ++ activity_monitor->Fclk_MinActiveFreqType = input[4]; ++ activity_monitor->Fclk_BoosterFreqType = input[5]; ++ activity_monitor->Fclk_MinActiveFreq = input[6]; ++ activity_monitor->Fclk_BoosterFreq = input[7]; ++ break; ++ default: return -EINVAL; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { -+ if (enable && profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 8) - return -EINVAL; +- if (size != 8) +- return -EINVAL; ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor_external), true); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); ++ return ret; ++ } -@@ -2496,17 +2497,19 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor_external), false); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); +- return ret; +- } ++ return ret; ++} + +- switch (input[0]) { +- case 0: /* Gfxclk */ +- activity_monitor->Gfx_ActiveHystLimit = input[1]; +- activity_monitor->Gfx_IdleHystLimit = input[2]; +- activity_monitor->Gfx_FPS = input[3]; +- activity_monitor->Gfx_MinActiveFreqType = input[4]; +- activity_monitor->Gfx_BoosterFreqType = input[5]; +- activity_monitor->Gfx_MinActiveFreq = input[6]; +- activity_monitor->Gfx_BoosterFreq = input[7]; +- break; +- case 1: /* Fclk */ +- activity_monitor->Fclk_ActiveHystLimit = input[1]; +- activity_monitor->Fclk_IdleHystLimit = input[2]; +- activity_monitor->Fclk_FPS = input[3]; +- activity_monitor->Fclk_MinActiveFreqType = input[4]; +- activity_monitor->Fclk_BoosterFreqType = input[5]; +- activity_monitor->Fclk_MinActiveFreq = input[6]; +- activity_monitor->Fclk_BoosterFreq = input[7]; +- break; +- default: +- return -EINVAL; +- } ++static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, ++ u32 workload_mask) ++{ ++ u32 backend_workload_mask = 0; ++ bool custom_enabled = false; ++ int ret; + +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor_external), true); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); ++ smu_cmn_get_backend_workload_mask(smu, workload_mask, ++ &backend_workload_mask, ++ &custom_enabled); ++ ++ if (custom_enabled) { ++ ret = smu_v13_0_7_set_power_profile_mode_coeff(smu, ++ smu->custom_profile_input, ++ smu->custom_profile_size); ++ if (ret) + return ret; +- } + } + +- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ +- workload_type = smu_cmn_to_asic_specific_index(smu, +- CMN2ASIC_MAPPING_WORKLOAD, - smu->power_profile_mode); -+ profile_mode); - if (workload_type < 0) - return -EINVAL; - -+ if (enable) -+ smu->workload_mask |= (1 << workload_type); -+ else -+ smu->workload_mask &= ~(1 << workload_type); +- if (workload_type < 0) +- return -EINVAL; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - smu->workload_mask, NULL); +- 1 << workload_type, NULL); ++ backend_workload_mask, NULL); - if (ret) - dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); +- if (ret) +- dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); - else -- smu_cmn_assign_power_profile(smu); +- smu->workload_mask = (1 << workload_type); ++ if (ret) { ++ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", ++ workload_mask); ++ return ret; ++ } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c -index 1aa13d32ceb2..e9c75caaebd7 100644 +index 1e16a281f2dc..aa147105a742 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c -@@ -1731,21 +1731,22 @@ static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu, +@@ -1729,90 +1729,98 @@ static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu, + return size; + } - static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, - long *input, +-static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, +- long *input, - uint32_t size) -+ uint32_t size, -+ bool enable) ++static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu, ++ long *input, ++ uint32_t size) { DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); -+ uint32_t profile_mode = input[size]; - int workload_type, ret = 0; - uint32_t current_profile_mode = smu->power_profile_mode; +- int workload_type, ret = 0; +- uint32_t current_profile_mode = smu->power_profile_mode; - smu->power_profile_mode = input[size]; ++ int ret; - if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { - dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); -+ if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { -+ dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); ++ if (size != 9) return -EINVAL; ++ ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, ++ WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor_external), ++ false); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); ++ return ret; } - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { -+ if (enable && profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { - if (size != 9) - return -EINVAL; +- if (size != 9) +- return -EINVAL; ++ switch (input[0]) { ++ case 0: /* Gfxclk */ ++ activity_monitor->Gfx_FPS = input[1]; ++ activity_monitor->Gfx_MinActiveFreqType = input[2]; ++ activity_monitor->Gfx_MinActiveFreq = input[3]; ++ activity_monitor->Gfx_BoosterFreqType = input[4]; ++ activity_monitor->Gfx_BoosterFreq = input[5]; ++ activity_monitor->Gfx_PD_Data_limit_c = input[6]; ++ activity_monitor->Gfx_PD_Data_error_coeff = input[7]; ++ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; ++ break; ++ case 1: /* Fclk */ ++ activity_monitor->Fclk_FPS = input[1]; ++ activity_monitor->Fclk_MinActiveFreqType = input[2]; ++ activity_monitor->Fclk_MinActiveFreq = input[3]; ++ activity_monitor->Fclk_BoosterFreqType = input[4]; ++ activity_monitor->Fclk_BoosterFreq = input[5]; ++ activity_monitor->Fclk_PD_Data_limit_c = input[6]; ++ activity_monitor->Fclk_PD_Data_error_coeff = input[7]; ++ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; ++ break; ++ default: ++ return -EINVAL; ++ } -@@ -1795,7 +1796,7 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, - } - } +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, +- WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor_external), +- false); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); +- return ret; +- } ++ ret = smu_cmn_update_table(smu, ++ SMU_TABLE_ACTIVITY_MONITOR_COEFF, ++ WORKLOAD_PPLIB_CUSTOM_BIT, ++ (void *)(&activity_monitor_external), ++ true); ++ if (ret) { ++ dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); ++ return ret; ++ } + +- switch (input[0]) { +- case 0: /* Gfxclk */ +- activity_monitor->Gfx_FPS = input[1]; +- activity_monitor->Gfx_MinActiveFreqType = input[2]; +- activity_monitor->Gfx_MinActiveFreq = input[3]; +- activity_monitor->Gfx_BoosterFreqType = input[4]; +- activity_monitor->Gfx_BoosterFreq = input[5]; +- activity_monitor->Gfx_PD_Data_limit_c = input[6]; +- activity_monitor->Gfx_PD_Data_error_coeff = input[7]; +- activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; +- break; +- case 1: /* Fclk */ +- activity_monitor->Fclk_FPS = input[1]; +- activity_monitor->Fclk_MinActiveFreqType = input[2]; +- activity_monitor->Fclk_MinActiveFreq = input[3]; +- activity_monitor->Fclk_BoosterFreqType = input[4]; +- activity_monitor->Fclk_BoosterFreq = input[5]; +- activity_monitor->Fclk_PD_Data_limit_c = input[6]; +- activity_monitor->Fclk_PD_Data_error_coeff = input[7]; +- activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; +- break; +- default: +- return -EINVAL; +- } ++ return ret; ++} + +- ret = smu_cmn_update_table(smu, +- SMU_TABLE_ACTIVITY_MONITOR_COEFF, +- WORKLOAD_PPLIB_CUSTOM_BIT, +- (void *)(&activity_monitor_external), +- true); +- if (ret) { +- dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); +- return ret; +- } +- } ++static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, ++ u32 workload_mask) ++{ ++ u32 backend_workload_mask = 0; ++ bool custom_enabled = false; ++ int ret; ++ ++ smu_cmn_get_backend_workload_mask(smu, workload_mask, ++ &backend_workload_mask, ++ &custom_enabled); - if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) -+ if (profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) ++ /* disable deep sleep if compute is enabled */ ++ if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE)) smu_v14_0_deep_sleep_control(smu, false); - else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) - smu_v14_0_deep_sleep_control(smu, true); -@@ -1803,15 +1804,16 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, - /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = smu_cmn_to_asic_specific_index(smu, - CMN2ASIC_MAPPING_WORKLOAD, -- smu->power_profile_mode); -+ profile_mode); - if (workload_type < 0) - return -EINVAL; - -+ if (enable) -+ smu->workload_mask |= (1 << workload_type); +- else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) + else -+ smu->workload_mask &= ~(1 << workload_type); - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, -- smu->workload_mask, NULL); -- + smu_v14_0_deep_sleep_control(smu, true); + +- /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ +- workload_type = smu_cmn_to_asic_specific_index(smu, +- CMN2ASIC_MAPPING_WORKLOAD, +- smu->power_profile_mode); +- if (workload_type < 0) +- return -EINVAL; ++ if (custom_enabled) { ++ ret = smu_v14_0_2_set_power_profile_mode_coeff(smu, ++ smu->custom_profile_input, ++ smu->custom_profile_size); ++ if (ret) ++ return ret; ++ } + +- ret = smu_cmn_send_smc_msg_with_param(smu, +- SMU_MSG_SetWorkloadMask, +- 1 << workload_type, +- NULL); - if (!ret) -- smu_cmn_assign_power_profile(smu); -+ smu->workload_mask, NULL); +- smu->workload_mask = 1 << workload_type; ++ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, ++ backend_workload_mask, NULL); ++ if (ret) { ++ dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n", ++ workload_mask); ++ return ret; ++ } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c -index bdfc5e617333..91ad434bcdae 100644 +index 91ad434bcdae..79406463a65a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c -@@ -1138,14 +1138,6 @@ int smu_cmn_set_mp1_state(struct smu_context *smu, - return ret; - } - --void smu_cmn_assign_power_profile(struct smu_context *smu) --{ -- uint32_t index; -- index = fls(smu->workload_mask); -- index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; -- smu->power_profile_mode = smu->workload_setting[index]; --} -- - bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev) +@@ -1215,3 +1215,33 @@ void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy) { - struct pci_dev *p = NULL; + policy->desc = &xgmi_plpd_policy_desc; + } ++ ++void smu_cmn_get_backend_workload_mask(struct smu_context *smu, ++ u32 workload_mask, ++ u32 *backend_workload_mask, ++ bool *custom_enabled) ++{ ++ int workload_type; ++ u32 profile_mode; ++ ++ *custom_enabled = false; ++ *backend_workload_mask = 0; ++ ++ for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) { ++ if (!(workload_mask & (1 << profile_mode))) ++ continue; ++ ++ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ ++ workload_type = smu_cmn_to_asic_specific_index(smu, ++ CMN2ASIC_MAPPING_WORKLOAD, ++ profile_mode); ++ ++ if (workload_type < 0) ++ continue; ++ ++ *backend_workload_mask |= 1 << workload_type; ++ ++ if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) ++ *custom_enabled = true; ++ } ++} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h -index 8a801e389659..1de685defe85 100644 +index 1de685defe85..8d40c02efa00 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h -@@ -130,8 +130,6 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev); - int smu_cmn_set_mp1_state(struct smu_context *smu, - enum pp_mp1_state mp1_state); +@@ -147,5 +147,10 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev); + void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy); + void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy); --void smu_cmn_assign_power_profile(struct smu_context *smu); -- - /* - * Helper function to make sysfs_emit_at() happy. Align buf to - * the current page boundary and record the offset. ++void smu_cmn_get_backend_workload_mask(struct smu_context *smu, ++ u32 workload_mask, ++ u32 *backend_workload_mask, ++ bool *custom_enabled); ++ + #endif + #endif diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 855beafb76ff..ad78059ee954 100644 --- a/drivers/gpu/drm/drm_edid.c @@ -14664,83 +15533,55 @@ index 38dbd16da6a9..504e8a7c4e2a 100644 config PAHOLE_VERSION int default $(shell,$(srctree)/scripts/pahole-version.sh $(PAHOLE)) -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 719e0ed1e976..b35752fdbcc0 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -3734,28 +3734,38 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, - */ - static int ttwu_runnable(struct task_struct *p, int wake_flags) +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 9949ffad8df0..8b07576814a5 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -3833,16 +3833,28 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, { -- struct rq_flags rf; -- struct rq *rq; -- int ret = 0; -+ CLASS(__task_rq_lock, rq_guard)(p); -+ struct rq *rq = rq_guard.rq; + bool wait = false; + struct pool_workqueue *pwq; ++ struct worker_pool *current_pool = NULL; -- rq = __task_rq_lock(p, &rf); -- if (task_on_rq_queued(p)) { -- update_rq_clock(rq); -- if (p->se.sched_delayed) -- enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED); -- if (!task_on_cpu(rq, p)) { -- /* -- * When on_rq && !on_cpu the task is preempted, see if -- * it should preempt the task that is current now. -- */ -- wakeup_preempt(rq, p, wake_flags); -+ if (!task_on_rq_queued(p)) -+ return 0; -+ -+ update_rq_clock(rq); -+ if (p->se.sched_delayed) { -+ int queue_flags = ENQUEUE_DELAYED | ENQUEUE_NOCLOCK; -+ -+ /* -+ * Since sched_delayed means we cannot be current anywhere, -+ * dequeue it here and have it fall through to the -+ * select_task_rq() case further along the ttwu() path. -+ */ -+ if (rq->nr_running > 1 && p->nr_cpus_allowed > 1) { -+ dequeue_task(rq, p, DEQUEUE_SLEEP | queue_flags); -+ return 0; - } -- ttwu_do_wakeup(p); -- ret = 1; -+ -+ enqueue_task(rq, p, queue_flags); + if (flush_color >= 0) { + WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); + atomic_set(&wq->nr_pwqs_to_flush, 1); } -- __task_rq_unlock(rq, &rf); -+ if (!task_on_cpu(rq, p)) { -+ /* -+ * When on_rq && !on_cpu the task is preempted, see if -+ * it should preempt the task that is current now. -+ */ -+ wakeup_preempt(rq, p, wake_flags); -+ } -+ ttwu_do_wakeup(p); -- return ret; -+ return 1; - } ++ /* ++ * For unbound workqueue, pwqs will map to only a few pools. ++ * Most of the time, pwqs within the same pool will be linked ++ * sequentially to wq->pwqs by cpu index. So in the majority ++ * of pwq iters, the pool is the same, only doing lock/unlock ++ * if the pool has changed. This can largely reduce expensive ++ * lock operations. ++ */ + for_each_pwq(pwq, wq) { +- struct worker_pool *pool = pwq->pool; +- +- raw_spin_lock_irq(&pool->lock); ++ if (current_pool != pwq->pool) { ++ if (likely(current_pool)) ++ raw_spin_unlock_irq(¤t_pool->lock); ++ current_pool = pwq->pool; ++ raw_spin_lock_irq(¤t_pool->lock); ++ } - #ifdef CONFIG_SMP -diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index f610df2e0811..e7fbb1d0f316 100644 ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -1779,6 +1779,11 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) - raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); - } + if (flush_color >= 0) { + WARN_ON_ONCE(pwq->flush_color != -1); +@@ -3859,9 +3871,11 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, + pwq->work_color = work_color; + } -+DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct, -+ _T->rq = __task_rq_lock(_T->lock, &_T->rf), -+ __task_rq_unlock(_T->rq, &_T->rf), -+ struct rq *rq; struct rq_flags rf) +- raw_spin_unlock_irq(&pool->lock); + } + ++ if (current_pool) ++ raw_spin_unlock_irq(¤t_pool->lock); + - DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct, - _T->rq = task_rq_lock(_T->lock, &_T->rf), - task_rq_unlock(_T->rq, _T->lock, &_T->rf), + if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) + complete(&wq->first_flusher->done); + diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index 2abc78367dd1..5222c6393f11 100644 --- a/lib/overflow_kunit.c @@ -14773,443 +15614,10 @@ index f83493838cf9..4010899652b8 100644 -- 2.47.0 -From 9001aa3709fdcb60967ed205910b873f14eed07b Mon Sep 17 00:00:00 2001 +From b1cd5f38df8ffaf4555bbb9bd1f5f0da04f0f181 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 11 Nov 2024 09:20:32 +0100 -Subject: [PATCH 08/13] ksm - -Signed-off-by: Peter Jung ---- - arch/alpha/kernel/syscalls/syscall.tbl | 3 + - arch/arm/tools/syscall.tbl | 3 + - arch/m68k/kernel/syscalls/syscall.tbl | 3 + - arch/microblaze/kernel/syscalls/syscall.tbl | 3 + - arch/mips/kernel/syscalls/syscall_n32.tbl | 3 + - arch/mips/kernel/syscalls/syscall_n64.tbl | 3 + - arch/mips/kernel/syscalls/syscall_o32.tbl | 3 + - arch/parisc/kernel/syscalls/syscall.tbl | 3 + - arch/powerpc/kernel/syscalls/syscall.tbl | 3 + - arch/s390/kernel/syscalls/syscall.tbl | 3 + - arch/sh/kernel/syscalls/syscall.tbl | 3 + - arch/sparc/kernel/syscalls/syscall.tbl | 3 + - arch/x86/entry/syscalls/syscall_32.tbl | 3 + - arch/x86/entry/syscalls/syscall_64.tbl | 3 + - arch/xtensa/kernel/syscalls/syscall.tbl | 3 + - include/linux/syscalls.h | 3 + - include/uapi/asm-generic/unistd.h | 9 +- - kernel/sys.c | 138 ++++++++++++++++++ - kernel/sys_ni.c | 3 + - scripts/syscall.tbl | 3 + - .../arch/powerpc/entry/syscalls/syscall.tbl | 3 + - .../perf/arch/s390/entry/syscalls/syscall.tbl | 3 + - 22 files changed, 206 insertions(+), 1 deletion(-) - -diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl -index 74720667fe09..e6a11f3c0a2e 100644 ---- a/arch/alpha/kernel/syscalls/syscall.tbl -+++ b/arch/alpha/kernel/syscalls/syscall.tbl -@@ -502,3 +502,6 @@ - 570 common lsm_set_self_attr sys_lsm_set_self_attr - 571 common lsm_list_modules sys_lsm_list_modules - 572 common mseal sys_mseal -+573 common process_ksm_enable sys_process_ksm_enable -+574 common process_ksm_disable sys_process_ksm_disable -+575 common process_ksm_status sys_process_ksm_status -diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl -index 23c98203c40f..10a3099decbe 100644 ---- a/arch/arm/tools/syscall.tbl -+++ b/arch/arm/tools/syscall.tbl -@@ -477,3 +477,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status -diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl -index 22a3cbd4c602..12d2c7594bf0 100644 ---- a/arch/m68k/kernel/syscalls/syscall.tbl -+++ b/arch/m68k/kernel/syscalls/syscall.tbl -@@ -462,3 +462,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status -diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl -index 2b81a6bd78b2..e2a93c856eed 100644 ---- a/arch/microblaze/kernel/syscalls/syscall.tbl -+++ b/arch/microblaze/kernel/syscalls/syscall.tbl -@@ -468,3 +468,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status -diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl -index 953f5b7dc723..b921fbf56fa6 100644 ---- a/arch/mips/kernel/syscalls/syscall_n32.tbl -+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl -@@ -401,3 +401,6 @@ - 460 n32 lsm_set_self_attr sys_lsm_set_self_attr - 461 n32 lsm_list_modules sys_lsm_list_modules - 462 n32 mseal sys_mseal -+463 n32 process_ksm_enable sys_process_ksm_enable -+464 n32 process_ksm_disable sys_process_ksm_disable -+465 n32 process_ksm_status sys_process_ksm_status -diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl -index 1464c6be6eb3..8d7f9ddd66f4 100644 ---- a/arch/mips/kernel/syscalls/syscall_n64.tbl -+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl -@@ -377,3 +377,6 @@ - 460 n64 lsm_set_self_attr sys_lsm_set_self_attr - 461 n64 lsm_list_modules sys_lsm_list_modules - 462 n64 mseal sys_mseal -+463 n64 process_ksm_enable sys_process_ksm_enable -+464 n64 process_ksm_disable sys_process_ksm_disable -+465 n64 process_ksm_status sys_process_ksm_status -diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl -index 2439a2491cff..9d6142739954 100644 ---- a/arch/mips/kernel/syscalls/syscall_o32.tbl -+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl -@@ -450,3 +450,6 @@ - 460 o32 lsm_set_self_attr sys_lsm_set_self_attr - 461 o32 lsm_list_modules sys_lsm_list_modules - 462 o32 mseal sys_mseal -+463 o32 process_ksm_enable sys_process_ksm_enable -+464 o32 process_ksm_disable sys_process_ksm_disable -+465 o32 process_ksm_status sys_process_ksm_status -diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl -index 66dc406b12e4..9d46476fd908 100644 ---- a/arch/parisc/kernel/syscalls/syscall.tbl -+++ b/arch/parisc/kernel/syscalls/syscall.tbl -@@ -461,3 +461,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status -diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl -index ebae8415dfbb..16f71bc2f6f0 100644 ---- a/arch/powerpc/kernel/syscalls/syscall.tbl -+++ b/arch/powerpc/kernel/syscalls/syscall.tbl -@@ -553,3 +553,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status -diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl -index 01071182763e..7394bad8178e 100644 ---- a/arch/s390/kernel/syscalls/syscall.tbl -+++ b/arch/s390/kernel/syscalls/syscall.tbl -@@ -465,3 +465,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status sys_process_ksm_status -diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl -index c55fd7696d40..b9fc31221b87 100644 ---- a/arch/sh/kernel/syscalls/syscall.tbl -+++ b/arch/sh/kernel/syscalls/syscall.tbl -@@ -466,3 +466,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status -diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl -index cfdfb3707c16..0d79fd772854 100644 ---- a/arch/sparc/kernel/syscalls/syscall.tbl -+++ b/arch/sparc/kernel/syscalls/syscall.tbl -@@ -508,3 +508,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status -diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl -index 534c74b14fab..c546a30575f1 100644 ---- a/arch/x86/entry/syscalls/syscall_32.tbl -+++ b/arch/x86/entry/syscalls/syscall_32.tbl -@@ -468,3 +468,6 @@ - 460 i386 lsm_set_self_attr sys_lsm_set_self_attr - 461 i386 lsm_list_modules sys_lsm_list_modules - 462 i386 mseal sys_mseal -+463 i386 process_ksm_enable sys_process_ksm_enable -+464 i386 process_ksm_disable sys_process_ksm_disable -+465 i386 process_ksm_status sys_process_ksm_status -diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl -index 7093ee21c0d1..0fcd10ba8dfe 100644 ---- a/arch/x86/entry/syscalls/syscall_64.tbl -+++ b/arch/x86/entry/syscalls/syscall_64.tbl -@@ -386,6 +386,9 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status - - # - # Due to a historical design error, certain syscalls are numbered differently -diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl -index 67083fc1b2f5..c1aecee4ad9b 100644 ---- a/arch/xtensa/kernel/syscalls/syscall.tbl -+++ b/arch/xtensa/kernel/syscalls/syscall.tbl -@@ -433,3 +433,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status -diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h -index 5758104921e6..cc9c4fac2412 100644 ---- a/include/linux/syscalls.h -+++ b/include/linux/syscalls.h -@@ -818,6 +818,9 @@ asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); - asmlinkage long sys_process_madvise(int pidfd, const struct iovec __user *vec, - size_t vlen, int behavior, unsigned int flags); - asmlinkage long sys_process_mrelease(int pidfd, unsigned int flags); -+asmlinkage long sys_process_ksm_enable(int pidfd, unsigned int flags); -+asmlinkage long sys_process_ksm_disable(int pidfd, unsigned int flags); -+asmlinkage long sys_process_ksm_status(int pidfd, unsigned int flags); - asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, - unsigned long prot, unsigned long pgoff, - unsigned long flags); -diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h -index 5bf6148cac2b..613e559ad6e0 100644 ---- a/include/uapi/asm-generic/unistd.h -+++ b/include/uapi/asm-generic/unistd.h -@@ -841,8 +841,15 @@ __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules) - #define __NR_mseal 462 - __SYSCALL(__NR_mseal, sys_mseal) - -+#define __NR_process_ksm_enable 463 -+__SYSCALL(__NR_process_ksm_enable, sys_process_ksm_enable) -+#define __NR_process_ksm_disable 464 -+__SYSCALL(__NR_process_ksm_disable, sys_process_ksm_disable) -+#define __NR_process_ksm_status 465 -+__SYSCALL(__NR_process_ksm_status, sys_process_ksm_status) -+ - #undef __NR_syscalls --#define __NR_syscalls 463 -+#define __NR_syscalls 466 - - /* - * 32 bit systems traditionally used different -diff --git a/kernel/sys.c b/kernel/sys.c -index 4da31f28fda8..fcd3aeaddd05 100644 ---- a/kernel/sys.c -+++ b/kernel/sys.c -@@ -2791,6 +2791,144 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, - return error; - } - -+#ifdef CONFIG_KSM -+enum pkc_action { -+ PKSM_ENABLE = 0, -+ PKSM_DISABLE, -+ PKSM_STATUS, -+}; -+ -+static long do_process_ksm_control(int pidfd, enum pkc_action action) -+{ -+ long ret; -+ struct task_struct *task; -+ struct mm_struct *mm; -+ unsigned int f_flags; -+ -+ task = pidfd_get_task(pidfd, &f_flags); -+ if (IS_ERR(task)) { -+ ret = PTR_ERR(task); -+ goto out; -+ } -+ -+ /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ -+ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); -+ if (IS_ERR_OR_NULL(mm)) { -+ ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; -+ goto release_task; -+ } -+ -+ /* Require CAP_SYS_NICE for influencing process performance. */ -+ if (!capable(CAP_SYS_NICE)) { -+ ret = -EPERM; -+ goto release_mm; -+ } -+ -+ if (mmap_write_lock_killable(mm)) { -+ ret = -EINTR; -+ goto release_mm; -+ } -+ -+ switch (action) { -+ case PKSM_ENABLE: -+ ret = ksm_enable_merge_any(mm); -+ break; -+ case PKSM_DISABLE: -+ ret = ksm_disable_merge_any(mm); -+ break; -+ case PKSM_STATUS: -+ ret = !!test_bit(MMF_VM_MERGE_ANY, &mm->flags); -+ break; -+ } -+ -+ mmap_write_unlock(mm); -+ -+release_mm: -+ mmput(mm); -+release_task: -+ put_task_struct(task); -+out: -+ return ret; -+} -+#endif /* CONFIG_KSM */ -+ -+SYSCALL_DEFINE2(process_ksm_enable, int, pidfd, unsigned int, flags) -+{ -+#ifdef CONFIG_KSM -+ if (flags != 0) -+ return -EINVAL; -+ -+ return do_process_ksm_control(pidfd, PKSM_ENABLE); -+#else /* CONFIG_KSM */ -+ return -ENOSYS; -+#endif /* CONFIG_KSM */ -+} -+ -+SYSCALL_DEFINE2(process_ksm_disable, int, pidfd, unsigned int, flags) -+{ -+#ifdef CONFIG_KSM -+ if (flags != 0) -+ return -EINVAL; -+ -+ return do_process_ksm_control(pidfd, PKSM_DISABLE); -+#else /* CONFIG_KSM */ -+ return -ENOSYS; -+#endif /* CONFIG_KSM */ -+} -+ -+SYSCALL_DEFINE2(process_ksm_status, int, pidfd, unsigned int, flags) -+{ -+#ifdef CONFIG_KSM -+ if (flags != 0) -+ return -EINVAL; -+ -+ return do_process_ksm_control(pidfd, PKSM_STATUS); -+#else /* CONFIG_KSM */ -+ return -ENOSYS; -+#endif /* CONFIG_KSM */ -+} -+ -+#ifdef CONFIG_KSM -+static ssize_t process_ksm_enable_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+{ -+ return sprintf(buf, "%u\n", __NR_process_ksm_enable); -+} -+static struct kobj_attribute process_ksm_enable_attr = __ATTR_RO(process_ksm_enable); -+ -+static ssize_t process_ksm_disable_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+{ -+ return sprintf(buf, "%u\n", __NR_process_ksm_disable); -+} -+static struct kobj_attribute process_ksm_disable_attr = __ATTR_RO(process_ksm_disable); -+ -+static ssize_t process_ksm_status_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+{ -+ return sprintf(buf, "%u\n", __NR_process_ksm_status); -+} -+static struct kobj_attribute process_ksm_status_attr = __ATTR_RO(process_ksm_status); -+ -+static struct attribute *process_ksm_sysfs_attrs[] = { -+ &process_ksm_enable_attr.attr, -+ &process_ksm_disable_attr.attr, -+ &process_ksm_status_attr.attr, -+ NULL, -+}; -+ -+static const struct attribute_group process_ksm_sysfs_attr_group = { -+ .attrs = process_ksm_sysfs_attrs, -+ .name = "process_ksm", -+}; -+ -+static int __init process_ksm_sysfs_init(void) -+{ -+ return sysfs_create_group(kernel_kobj, &process_ksm_sysfs_attr_group); -+} -+subsys_initcall(process_ksm_sysfs_init); -+#endif /* CONFIG_KSM */ -+ - SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, - struct getcpu_cache __user *, unused) - { -diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c -index c00a86931f8c..d82213d68522 100644 ---- a/kernel/sys_ni.c -+++ b/kernel/sys_ni.c -@@ -186,6 +186,9 @@ COND_SYSCALL(mincore); - COND_SYSCALL(madvise); - COND_SYSCALL(process_madvise); - COND_SYSCALL(process_mrelease); -+COND_SYSCALL(process_ksm_enable); -+COND_SYSCALL(process_ksm_disable); -+COND_SYSCALL(process_ksm_status); - COND_SYSCALL(remap_file_pages); - COND_SYSCALL(mbind); - COND_SYSCALL(get_mempolicy); -diff --git a/scripts/syscall.tbl b/scripts/syscall.tbl -index 845e24eb372e..227d9cc12365 100644 ---- a/scripts/syscall.tbl -+++ b/scripts/syscall.tbl -@@ -403,3 +403,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status -diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl -index ebae8415dfbb..16f71bc2f6f0 100644 ---- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl -+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl -@@ -553,3 +553,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status -diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl -index 01071182763e..7394bad8178e 100644 ---- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl -+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl -@@ -465,3 +465,6 @@ - 460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr - 461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules - 462 common mseal sys_mseal sys_mseal -+463 common process_ksm_enable sys_process_ksm_enable sys_process_ksm_enable -+464 common process_ksm_disable sys_process_ksm_disable sys_process_ksm_disable -+465 common process_ksm_status sys_process_ksm_status sys_process_ksm_status --- -2.47.0 - -From fe2e45be58ad904ff3ab40356c10d73e057c18fd Mon Sep 17 00:00:00 2001 -From: Peter Jung -Date: Mon, 11 Nov 2024 09:20:44 +0100 -Subject: [PATCH 09/13] ntsync +Date: Mon, 18 Nov 2024 13:24:49 +0100 +Subject: [PATCH 08/12] ntsync Signed-off-by: Peter Jung --- @@ -15648,7 +16056,7 @@ index 000000000000..767844637a7d + ``objs`` and in ``alert``. If this is attempted, the function fails + with ``EINVAL``. diff --git a/MAINTAINERS b/MAINTAINERS -index 3d4709c29704..3ca514d82269 100644 +index 97802662e8d8..889e074c143b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -16501,6 +16501,15 @@ T: git https://github.com/Paragon-Software-Group/linux-ntfs3.git @@ -18295,9960 +18703,10 @@ index 000000000000..5fa2c9a0768c -- 2.47.0 -From b71dfd054ca2932d63b75136a3191c01e74a374c Mon Sep 17 00:00:00 2001 +From ebc7783bf7cb1cd5026930a23092d7fbacdd22e5 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 11 Nov 2024 09:20:57 +0100 -Subject: [PATCH 10/13] openvpn-dco - -Signed-off-by: Peter Jung ---- - Documentation/netlink/specs/ovpn.yaml | 362 +++ - MAINTAINERS | 11 + - drivers/net/Kconfig | 14 + - drivers/net/Makefile | 1 + - drivers/net/ovpn/Makefile | 22 + - drivers/net/ovpn/bind.c | 54 + - drivers/net/ovpn/bind.h | 117 + - drivers/net/ovpn/crypto.c | 214 ++ - drivers/net/ovpn/crypto.h | 145 + - drivers/net/ovpn/crypto_aead.c | 386 +++ - drivers/net/ovpn/crypto_aead.h | 33 + - drivers/net/ovpn/io.c | 462 ++++ - drivers/net/ovpn/io.h | 25 + - drivers/net/ovpn/main.c | 337 +++ - drivers/net/ovpn/main.h | 24 + - drivers/net/ovpn/netlink-gen.c | 212 ++ - drivers/net/ovpn/netlink-gen.h | 41 + - drivers/net/ovpn/netlink.c | 1135 ++++++++ - drivers/net/ovpn/netlink.h | 18 + - drivers/net/ovpn/ovpnstruct.h | 61 + - drivers/net/ovpn/packet.h | 40 + - drivers/net/ovpn/peer.c | 1201 +++++++++ - drivers/net/ovpn/peer.h | 165 ++ - drivers/net/ovpn/pktid.c | 130 + - drivers/net/ovpn/pktid.h | 87 + - drivers/net/ovpn/proto.h | 104 + - drivers/net/ovpn/skb.h | 56 + - drivers/net/ovpn/socket.c | 178 ++ - drivers/net/ovpn/socket.h | 55 + - drivers/net/ovpn/stats.c | 21 + - drivers/net/ovpn/stats.h | 47 + - drivers/net/ovpn/tcp.c | 506 ++++ - drivers/net/ovpn/tcp.h | 44 + - drivers/net/ovpn/udp.c | 406 +++ - drivers/net/ovpn/udp.h | 26 + - include/net/netlink.h | 1 + - include/uapi/linux/if_link.h | 15 + - include/uapi/linux/ovpn.h | 109 + - include/uapi/linux/udp.h | 1 + - tools/net/ynl/ynl-gen-c.py | 2 + - tools/testing/selftests/Makefile | 1 + - tools/testing/selftests/net/ovpn/.gitignore | 2 + - tools/testing/selftests/net/ovpn/Makefile | 17 + - tools/testing/selftests/net/ovpn/config | 10 + - tools/testing/selftests/net/ovpn/data64.key | 5 + - tools/testing/selftests/net/ovpn/ovpn-cli.c | 2370 +++++++++++++++++ - .../testing/selftests/net/ovpn/tcp_peers.txt | 5 + - .../selftests/net/ovpn/test-chachapoly.sh | 9 + - .../testing/selftests/net/ovpn/test-float.sh | 9 + - tools/testing/selftests/net/ovpn/test-tcp.sh | 9 + - tools/testing/selftests/net/ovpn/test.sh | 183 ++ - .../testing/selftests/net/ovpn/udp_peers.txt | 5 + - 52 files changed, 9493 insertions(+) - create mode 100644 Documentation/netlink/specs/ovpn.yaml - create mode 100644 drivers/net/ovpn/Makefile - create mode 100644 drivers/net/ovpn/bind.c - create mode 100644 drivers/net/ovpn/bind.h - create mode 100644 drivers/net/ovpn/crypto.c - create mode 100644 drivers/net/ovpn/crypto.h - create mode 100644 drivers/net/ovpn/crypto_aead.c - create mode 100644 drivers/net/ovpn/crypto_aead.h - create mode 100644 drivers/net/ovpn/io.c - create mode 100644 drivers/net/ovpn/io.h - create mode 100644 drivers/net/ovpn/main.c - create mode 100644 drivers/net/ovpn/main.h - create mode 100644 drivers/net/ovpn/netlink-gen.c - create mode 100644 drivers/net/ovpn/netlink-gen.h - create mode 100644 drivers/net/ovpn/netlink.c - create mode 100644 drivers/net/ovpn/netlink.h - create mode 100644 drivers/net/ovpn/ovpnstruct.h - create mode 100644 drivers/net/ovpn/packet.h - create mode 100644 drivers/net/ovpn/peer.c - create mode 100644 drivers/net/ovpn/peer.h - create mode 100644 drivers/net/ovpn/pktid.c - create mode 100644 drivers/net/ovpn/pktid.h - create mode 100644 drivers/net/ovpn/proto.h - create mode 100644 drivers/net/ovpn/skb.h - create mode 100644 drivers/net/ovpn/socket.c - create mode 100644 drivers/net/ovpn/socket.h - create mode 100644 drivers/net/ovpn/stats.c - create mode 100644 drivers/net/ovpn/stats.h - create mode 100644 drivers/net/ovpn/tcp.c - create mode 100644 drivers/net/ovpn/tcp.h - create mode 100644 drivers/net/ovpn/udp.c - create mode 100644 drivers/net/ovpn/udp.h - create mode 100644 include/uapi/linux/ovpn.h - create mode 100644 tools/testing/selftests/net/ovpn/.gitignore - create mode 100644 tools/testing/selftests/net/ovpn/Makefile - create mode 100644 tools/testing/selftests/net/ovpn/config - create mode 100644 tools/testing/selftests/net/ovpn/data64.key - create mode 100644 tools/testing/selftests/net/ovpn/ovpn-cli.c - create mode 100644 tools/testing/selftests/net/ovpn/tcp_peers.txt - create mode 100755 tools/testing/selftests/net/ovpn/test-chachapoly.sh - create mode 100755 tools/testing/selftests/net/ovpn/test-float.sh - create mode 100755 tools/testing/selftests/net/ovpn/test-tcp.sh - create mode 100755 tools/testing/selftests/net/ovpn/test.sh - create mode 100644 tools/testing/selftests/net/ovpn/udp_peers.txt - -diff --git a/Documentation/netlink/specs/ovpn.yaml b/Documentation/netlink/specs/ovpn.yaml -new file mode 100644 -index 000000000000..79339c25d607 ---- /dev/null -+++ b/Documentation/netlink/specs/ovpn.yaml -@@ -0,0 +1,362 @@ -+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) -+# -+# Author: Antonio Quartulli -+# -+# Copyright (c) 2024, OpenVPN Inc. -+# -+ -+name: ovpn -+ -+protocol: genetlink -+ -+doc: Netlink protocol to control OpenVPN network devices -+ -+definitions: -+ - -+ type: const -+ name: nonce-tail-size -+ value: 8 -+ - -+ type: enum -+ name: cipher-alg -+ entries: [ none, aes-gcm, chacha20-poly1305 ] -+ - -+ type: enum -+ name: del-peer-reason -+ entries: [ teardown, userspace, expired, transport-error, transport-disconnect ] -+ - -+ type: enum -+ name: key-slot -+ entries: [ primary, secondary ] -+ -+attribute-sets: -+ - -+ name: peer -+ attributes: -+ - -+ name: id -+ type: u32 -+ doc: | -+ The unique ID of the peer. To be used to identify peers during -+ operations -+ checks: -+ max: 0xFFFFFF -+ - -+ name: remote-ipv4 -+ type: u32 -+ doc: The remote IPv4 address of the peer -+ byte-order: big-endian -+ display-hint: ipv4 -+ - -+ name: remote-ipv6 -+ type: binary -+ doc: The remote IPv6 address of the peer -+ display-hint: ipv6 -+ checks: -+ exact-len: 16 -+ - -+ name: remote-ipv6-scope-id -+ type: u32 -+ doc: The scope id of the remote IPv6 address of the peer (RFC2553) -+ - -+ name: remote-port -+ type: u16 -+ doc: The remote port of the peer -+ byte-order: big-endian -+ checks: -+ min: 1 -+ - -+ name: socket -+ type: u32 -+ doc: The socket to be used to communicate with the peer -+ - -+ name: vpn-ipv4 -+ type: u32 -+ doc: The IPv4 address assigned to the peer by the server -+ byte-order: big-endian -+ display-hint: ipv4 -+ - -+ name: vpn-ipv6 -+ type: binary -+ doc: The IPv6 address assigned to the peer by the server -+ display-hint: ipv6 -+ checks: -+ exact-len: 16 -+ - -+ name: local-ipv4 -+ type: u32 -+ doc: The local IPv4 to be used to send packets to the peer (UDP only) -+ byte-order: big-endian -+ display-hint: ipv4 -+ - -+ name: local-ipv6 -+ type: binary -+ doc: The local IPv6 to be used to send packets to the peer (UDP only) -+ display-hint: ipv6 -+ checks: -+ exact-len: 16 -+ - -+ name: local-port -+ type: u16 -+ doc: The local port to be used to send packets to the peer (UDP only) -+ byte-order: big-endian -+ checks: -+ min: 1 -+ - -+ name: keepalive-interval -+ type: u32 -+ doc: | -+ The number of seconds after which a keep alive message is sent to the -+ peer -+ - -+ name: keepalive-timeout -+ type: u32 -+ doc: | -+ The number of seconds from the last activity after which the peer is -+ assumed dead -+ - -+ name: del-reason -+ type: u32 -+ doc: The reason why a peer was deleted -+ enum: del-peer-reason -+ - -+ name: vpn-rx-bytes -+ type: uint -+ doc: Number of bytes received over the tunnel -+ - -+ name: vpn-tx-bytes -+ type: uint -+ doc: Number of bytes transmitted over the tunnel -+ - -+ name: vpn-rx-packets -+ type: uint -+ doc: Number of packets received over the tunnel -+ - -+ name: vpn-tx-packets -+ type: uint -+ doc: Number of packets transmitted over the tunnel -+ - -+ name: link-rx-bytes -+ type: uint -+ doc: Number of bytes received at the transport level -+ - -+ name: link-tx-bytes -+ type: uint -+ doc: Number of bytes transmitted at the transport level -+ - -+ name: link-rx-packets -+ type: u32 -+ doc: Number of packets received at the transport level -+ - -+ name: link-tx-packets -+ type: u32 -+ doc: Number of packets transmitted at the transport level -+ - -+ name: keyconf -+ attributes: -+ - -+ name: peer-id -+ type: u32 -+ doc: | -+ The unique ID of the peer. To be used to identify peers during -+ key operations -+ checks: -+ max: 0xFFFFFF -+ - -+ name: slot -+ type: u32 -+ doc: The slot where the key should be stored -+ enum: key-slot -+ - -+ name: key-id -+ doc: | -+ The unique ID of the key. Used to fetch the correct key upon -+ decryption -+ type: u32 -+ checks: -+ max: 7 -+ - -+ name: cipher-alg -+ type: u32 -+ doc: The cipher to be used when communicating with the peer -+ enum: cipher-alg -+ - -+ name: encrypt-dir -+ type: nest -+ doc: Key material for encrypt direction -+ nested-attributes: keydir -+ - -+ name: decrypt-dir -+ type: nest -+ doc: Key material for decrypt direction -+ nested-attributes: keydir -+ - -+ name: keydir -+ attributes: -+ - -+ name: cipher-key -+ type: binary -+ doc: The actual key to be used by the cipher -+ checks: -+ max-len: 256 -+ - -+ name: nonce-tail -+ type: binary -+ doc: | -+ Random nonce to be concatenated to the packet ID, in order to -+ obtain the actual cipher IV -+ checks: -+ exact-len: nonce-tail-size -+ - -+ name: ovpn -+ attributes: -+ - -+ name: ifindex -+ type: u32 -+ doc: Index of the ovpn interface to operate on -+ - -+ name: ifname -+ type: string -+ doc: Name of the ovpn interface -+ - -+ name: peer -+ type: nest -+ doc: | -+ The peer object containing the attributed of interest for the specific -+ operation -+ nested-attributes: peer -+ - -+ name: keyconf -+ type: nest -+ doc: Peer specific cipher configuration -+ nested-attributes: keyconf -+ -+operations: -+ list: -+ - -+ name: peer-new -+ attribute-set: ovpn -+ flags: [ admin-perm ] -+ doc: Add a remote peer -+ do: -+ pre: ovpn-nl-pre-doit -+ post: ovpn-nl-post-doit -+ request: -+ attributes: -+ - ifindex -+ - peer -+ - -+ name: peer-set -+ attribute-set: ovpn -+ flags: [ admin-perm ] -+ doc: modify a remote peer -+ do: -+ pre: ovpn-nl-pre-doit -+ post: ovpn-nl-post-doit -+ request: -+ attributes: -+ - ifindex -+ - peer -+ - -+ name: peer-get -+ attribute-set: ovpn -+ flags: [ admin-perm ] -+ doc: Retrieve data about existing remote peers (or a specific one) -+ do: -+ pre: ovpn-nl-pre-doit -+ post: ovpn-nl-post-doit -+ request: -+ attributes: -+ - ifindex -+ - peer -+ reply: -+ attributes: -+ - peer -+ dump: -+ request: -+ attributes: -+ - ifindex -+ reply: -+ attributes: -+ - peer -+ - -+ name: peer-del -+ attribute-set: ovpn -+ flags: [ admin-perm ] -+ doc: Delete existing remote peer -+ do: -+ pre: ovpn-nl-pre-doit -+ post: ovpn-nl-post-doit -+ request: -+ attributes: -+ - ifindex -+ - peer -+ - -+ name: peer-del-ntf -+ doc: Notification about a peer being deleted -+ notify: peer-get -+ mcgrp: peers -+ -+ - -+ name: key-new -+ attribute-set: ovpn -+ flags: [ admin-perm ] -+ doc: Add a cipher key for a specific peer -+ do: -+ pre: ovpn-nl-pre-doit -+ post: ovpn-nl-post-doit -+ request: -+ attributes: -+ - ifindex -+ - keyconf -+ - -+ name: key-get -+ attribute-set: ovpn -+ flags: [ admin-perm ] -+ doc: Retrieve non-sensitive data about peer key and cipher -+ do: -+ pre: ovpn-nl-pre-doit -+ post: ovpn-nl-post-doit -+ request: -+ attributes: -+ - ifindex -+ - keyconf -+ reply: -+ attributes: -+ - keyconf -+ - -+ name: key-swap -+ attribute-set: ovpn -+ flags: [ admin-perm ] -+ doc: Swap primary and secondary session keys for a specific peer -+ do: -+ pre: ovpn-nl-pre-doit -+ post: ovpn-nl-post-doit -+ request: -+ attributes: -+ - ifindex -+ - keyconf -+ - -+ name: key-swap-ntf -+ notify: key-get -+ doc: | -+ Notification about key having exhausted its IV space and requiring -+ renegotiation -+ mcgrp: peers -+ - -+ name: key-del -+ attribute-set: ovpn -+ flags: [ admin-perm ] -+ doc: Delete cipher key for a specific peer -+ do: -+ pre: ovpn-nl-pre-doit -+ post: ovpn-nl-post-doit -+ request: -+ attributes: -+ - ifindex -+ - keyconf -+ -+mcast-groups: -+ list: -+ - -+ name: peers -diff --git a/MAINTAINERS b/MAINTAINERS -index 3ca514d82269..f509050e63ed 100644 ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -17362,6 +17362,17 @@ F: arch/openrisc/ - F: drivers/irqchip/irq-ompic.c - F: drivers/irqchip/irq-or1k-* - -+OPENVPN DATA CHANNEL OFFLOAD -+M: Antonio Quartulli -+L: openvpn-devel@lists.sourceforge.net (moderated for non-subscribers) -+L: netdev@vger.kernel.org -+S: Supported -+T: git https://github.com/OpenVPN/linux-kernel-ovpn.git -+F: Documentation/netlink/specs/ovpn.yaml -+F: drivers/net/ovpn/ -+F: include/uapi/linux/ovpn.h -+F: tools/testing/selftests/net/ovpn/ -+ - OPENVSWITCH - M: Pravin B Shelar - L: netdev@vger.kernel.org -diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig -index 9920b3a68ed1..ddc65bc1e218 100644 ---- a/drivers/net/Kconfig -+++ b/drivers/net/Kconfig -@@ -115,6 +115,20 @@ config WIREGUARD_DEBUG - - Say N here unless you know what you're doing. - -+config OVPN -+ tristate "OpenVPN data channel offload" -+ depends on NET && INET -+ select STREAM_PARSER -+ select NET_UDP_TUNNEL -+ select DST_CACHE -+ select CRYPTO -+ select CRYPTO_AES -+ select CRYPTO_GCM -+ select CRYPTO_CHACHA20POLY1305 -+ help -+ This module enhances the performance of the OpenVPN userspace software -+ by offloading the data channel processing to kernelspace. -+ - config EQUALIZER - tristate "EQL (serial line load balancing) support" - help -diff --git a/drivers/net/Makefile b/drivers/net/Makefile -index 13743d0e83b5..5152b3330e28 100644 ---- a/drivers/net/Makefile -+++ b/drivers/net/Makefile -@@ -11,6 +11,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/ - obj-$(CONFIG_IPVTAP) += ipvlan/ - obj-$(CONFIG_DUMMY) += dummy.o - obj-$(CONFIG_WIREGUARD) += wireguard/ -+obj-$(CONFIG_OVPN) += ovpn/ - obj-$(CONFIG_EQUALIZER) += eql.o - obj-$(CONFIG_IFB) += ifb.o - obj-$(CONFIG_MACSEC) += macsec.o -diff --git a/drivers/net/ovpn/Makefile b/drivers/net/ovpn/Makefile -new file mode 100644 -index 000000000000..f4d4bd87c851 ---- /dev/null -+++ b/drivers/net/ovpn/Makefile -@@ -0,0 +1,22 @@ -+# SPDX-License-Identifier: GPL-2.0 -+# -+# ovpn -- OpenVPN data channel offload in kernel space -+# -+# Copyright (C) 2020-2024 OpenVPN, Inc. -+# -+# Author: Antonio Quartulli -+ -+obj-$(CONFIG_OVPN) := ovpn.o -+ovpn-y += bind.o -+ovpn-y += crypto.o -+ovpn-y += crypto_aead.o -+ovpn-y += main.o -+ovpn-y += io.o -+ovpn-y += netlink.o -+ovpn-y += netlink-gen.o -+ovpn-y += peer.o -+ovpn-y += pktid.o -+ovpn-y += socket.o -+ovpn-y += stats.o -+ovpn-y += tcp.o -+ovpn-y += udp.o -diff --git a/drivers/net/ovpn/bind.c b/drivers/net/ovpn/bind.c -new file mode 100644 -index 000000000000..d17d078c5730 ---- /dev/null -+++ b/drivers/net/ovpn/bind.c -@@ -0,0 +1,54 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2012-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#include -+#include -+ -+#include "ovpnstruct.h" -+#include "bind.h" -+#include "peer.h" -+ -+/** -+ * ovpn_bind_from_sockaddr - retrieve binding matching sockaddr -+ * @ss: the sockaddr to match -+ * -+ * Return: the bind matching the passed sockaddr if found, NULL otherwise -+ */ -+struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *ss) -+{ -+ struct ovpn_bind *bind; -+ size_t sa_len; -+ -+ if (ss->ss_family == AF_INET) -+ sa_len = sizeof(struct sockaddr_in); -+ else if (ss->ss_family == AF_INET6) -+ sa_len = sizeof(struct sockaddr_in6); -+ else -+ return ERR_PTR(-EAFNOSUPPORT); -+ -+ bind = kzalloc(sizeof(*bind), GFP_ATOMIC); -+ if (unlikely(!bind)) -+ return ERR_PTR(-ENOMEM); -+ -+ memcpy(&bind->remote, ss, sa_len); -+ -+ return bind; -+} -+ -+/** -+ * ovpn_bind_reset - assign new binding to peer -+ * @peer: the peer whose binding has to be replaced -+ * @new: the new bind to assign -+ */ -+void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *new) -+ __must_hold(&peer->lock) -+{ -+ kfree_rcu(rcu_replace_pointer(peer->bind, new, -+ lockdep_is_held(&peer->lock)), rcu); -+} -diff --git a/drivers/net/ovpn/bind.h b/drivers/net/ovpn/bind.h -new file mode 100644 -index 000000000000..859213d5040d ---- /dev/null -+++ b/drivers/net/ovpn/bind.h -@@ -0,0 +1,117 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2012-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#ifndef _NET_OVPN_OVPNBIND_H_ -+#define _NET_OVPN_OVPNBIND_H_ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct ovpn_peer; -+ -+/** -+ * union ovpn_sockaddr - basic transport layer address -+ * @in4: IPv4 address -+ * @in6: IPv6 address -+ */ -+union ovpn_sockaddr { -+ struct sockaddr_in in4; -+ struct sockaddr_in6 in6; -+}; -+ -+/** -+ * struct ovpn_bind - remote peer binding -+ * @remote: the remote peer sockaddress -+ * @local: local endpoint used to talk to the peer -+ * @local.ipv4: local IPv4 used to talk to the peer -+ * @local.ipv6: local IPv6 used to talk to the peer -+ * @rcu: used to schedule RCU cleanup job -+ */ -+struct ovpn_bind { -+ union ovpn_sockaddr remote; /* remote sockaddr */ -+ -+ union { -+ struct in_addr ipv4; -+ struct in6_addr ipv6; -+ } local; -+ -+ struct rcu_head rcu; -+}; -+ -+/** -+ * skb_protocol_to_family - translate skb->protocol to AF_INET or AF_INET6 -+ * @skb: the packet sk_buff to inspect -+ * -+ * Return: AF_INET, AF_INET6 or 0 in case of unknown protocol -+ */ -+static inline unsigned short skb_protocol_to_family(const struct sk_buff *skb) -+{ -+ switch (skb->protocol) { -+ case htons(ETH_P_IP): -+ return AF_INET; -+ case htons(ETH_P_IPV6): -+ return AF_INET6; -+ default: -+ return 0; -+ } -+} -+ -+/** -+ * ovpn_bind_skb_src_match - match packet source with binding -+ * @bind: the binding to match -+ * @skb: the packet to match -+ * -+ * Return: true if the packet source matches the remote peer sockaddr -+ * in the binding -+ */ -+static inline bool ovpn_bind_skb_src_match(const struct ovpn_bind *bind, -+ const struct sk_buff *skb) -+{ -+ const unsigned short family = skb_protocol_to_family(skb); -+ const union ovpn_sockaddr *remote; -+ -+ if (unlikely(!bind)) -+ return false; -+ -+ remote = &bind->remote; -+ -+ if (unlikely(remote->in4.sin_family != family)) -+ return false; -+ -+ switch (family) { -+ case AF_INET: -+ if (unlikely(remote->in4.sin_addr.s_addr != ip_hdr(skb)->saddr)) -+ return false; -+ -+ if (unlikely(remote->in4.sin_port != udp_hdr(skb)->source)) -+ return false; -+ break; -+ case AF_INET6: -+ if (unlikely(!ipv6_addr_equal(&remote->in6.sin6_addr, -+ &ipv6_hdr(skb)->saddr))) -+ return false; -+ -+ if (unlikely(remote->in6.sin6_port != udp_hdr(skb)->source)) -+ return false; -+ break; -+ default: -+ return false; -+ } -+ -+ return true; -+} -+ -+struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *sa); -+void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *bind); -+ -+#endif /* _NET_OVPN_OVPNBIND_H_ */ -diff --git a/drivers/net/ovpn/crypto.c b/drivers/net/ovpn/crypto.c -new file mode 100644 -index 000000000000..a2346bc630be ---- /dev/null -+++ b/drivers/net/ovpn/crypto.c -@@ -0,0 +1,214 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "ovpnstruct.h" -+#include "main.h" -+#include "packet.h" -+#include "pktid.h" -+#include "crypto_aead.h" -+#include "crypto.h" -+ -+static void ovpn_ks_destroy_rcu(struct rcu_head *head) -+{ -+ struct ovpn_crypto_key_slot *ks; -+ -+ ks = container_of(head, struct ovpn_crypto_key_slot, rcu); -+ ovpn_aead_crypto_key_slot_destroy(ks); -+} -+ -+void ovpn_crypto_key_slot_release(struct kref *kref) -+{ -+ struct ovpn_crypto_key_slot *ks; -+ -+ ks = container_of(kref, struct ovpn_crypto_key_slot, refcount); -+ call_rcu(&ks->rcu, ovpn_ks_destroy_rcu); -+} -+ -+/* can only be invoked when all peer references have been dropped (i.e. RCU -+ * release routine) -+ */ -+void ovpn_crypto_state_release(struct ovpn_crypto_state *cs) -+{ -+ struct ovpn_crypto_key_slot *ks; -+ -+ ks = rcu_access_pointer(cs->slots[0]); -+ if (ks) { -+ RCU_INIT_POINTER(cs->slots[0], NULL); -+ ovpn_crypto_key_slot_put(ks); -+ } -+ -+ ks = rcu_access_pointer(cs->slots[1]); -+ if (ks) { -+ RCU_INIT_POINTER(cs->slots[1], NULL); -+ ovpn_crypto_key_slot_put(ks); -+ } -+} -+ -+/* removes the key matching the specified id from the crypto context */ -+void ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id) -+{ -+ struct ovpn_crypto_key_slot *ks = NULL; -+ -+ spin_lock_bh(&cs->lock); -+ if (rcu_access_pointer(cs->slots[0])->key_id == key_id) { -+ ks = rcu_replace_pointer(cs->slots[0], NULL, -+ lockdep_is_held(&cs->lock)); -+ } else if (rcu_access_pointer(cs->slots[1])->key_id == key_id) { -+ ks = rcu_replace_pointer(cs->slots[1], NULL, -+ lockdep_is_held(&cs->lock)); -+ } -+ spin_unlock_bh(&cs->lock); -+ -+ if (ks) -+ ovpn_crypto_key_slot_put(ks); -+} -+ -+/* Reset the ovpn_crypto_state object in a way that is atomic -+ * to RCU readers. -+ */ -+int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs, -+ const struct ovpn_peer_key_reset *pkr) -+{ -+ struct ovpn_crypto_key_slot *old = NULL, *new; -+ u8 idx; -+ -+ if (pkr->slot != OVPN_KEY_SLOT_PRIMARY && -+ pkr->slot != OVPN_KEY_SLOT_SECONDARY) -+ return -EINVAL; -+ -+ new = ovpn_aead_crypto_key_slot_new(&pkr->key); -+ if (IS_ERR(new)) -+ return PTR_ERR(new); -+ -+ spin_lock_bh(&cs->lock); -+ idx = cs->primary_idx; -+ switch (pkr->slot) { -+ case OVPN_KEY_SLOT_PRIMARY: -+ old = rcu_replace_pointer(cs->slots[idx], new, -+ lockdep_is_held(&cs->lock)); -+ break; -+ case OVPN_KEY_SLOT_SECONDARY: -+ old = rcu_replace_pointer(cs->slots[!idx], new, -+ lockdep_is_held(&cs->lock)); -+ break; -+ } -+ spin_unlock_bh(&cs->lock); -+ -+ if (old) -+ ovpn_crypto_key_slot_put(old); -+ -+ return 0; -+} -+ -+void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs, -+ enum ovpn_key_slot slot) -+{ -+ struct ovpn_crypto_key_slot *ks = NULL; -+ u8 idx; -+ -+ if (slot != OVPN_KEY_SLOT_PRIMARY && -+ slot != OVPN_KEY_SLOT_SECONDARY) { -+ pr_warn("Invalid slot to release: %u\n", slot); -+ return; -+ } -+ -+ spin_lock_bh(&cs->lock); -+ idx = cs->primary_idx; -+ switch (slot) { -+ case OVPN_KEY_SLOT_PRIMARY: -+ ks = rcu_replace_pointer(cs->slots[idx], NULL, -+ lockdep_is_held(&cs->lock)); -+ break; -+ case OVPN_KEY_SLOT_SECONDARY: -+ ks = rcu_replace_pointer(cs->slots[!idx], NULL, -+ lockdep_is_held(&cs->lock)); -+ break; -+ } -+ spin_unlock_bh(&cs->lock); -+ -+ if (!ks) { -+ pr_debug("Key slot already released: %u\n", slot); -+ return; -+ } -+ -+ pr_debug("deleting key slot %u, key_id=%u\n", slot, ks->key_id); -+ ovpn_crypto_key_slot_put(ks); -+} -+ -+/* this swap is not atomic, but there will be a very short time frame where the -+ * old_secondary key won't be available. This should not be a big deal as most -+ * likely both peers are already using the new primary at this point. -+ */ -+void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs) -+{ -+ const struct ovpn_crypto_key_slot *old_primary, *old_secondary; -+ u8 idx; -+ -+ spin_lock_bh(&cs->lock); -+ idx = cs->primary_idx; -+ old_primary = rcu_dereference_protected(cs->slots[idx], -+ lockdep_is_held(&cs->lock)); -+ old_secondary = rcu_dereference_protected(cs->slots[!idx], -+ lockdep_is_held(&cs->lock)); -+ /* perform real swap by switching the index of the primary key */ -+ cs->primary_idx = !cs->primary_idx; -+ -+ pr_debug("key swapped: (old primary) %d <-> (new primary) %d\n", -+ old_primary ? old_primary->key_id : -1, -+ old_secondary ? old_secondary->key_id : -1); -+ -+ spin_unlock_bh(&cs->lock); -+} -+ -+/** -+ * ovpn_crypto_config_get - populate keyconf object with non-sensible key data -+ * @cs: the crypto state to extract the key data from -+ * @slot: the specific slot to inspect -+ * @keyconf: the output object to populate -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+int ovpn_crypto_config_get(struct ovpn_crypto_state *cs, -+ enum ovpn_key_slot slot, -+ struct ovpn_key_config *keyconf) -+{ -+ struct ovpn_crypto_key_slot *ks; -+ int idx; -+ -+ switch (slot) { -+ case OVPN_KEY_SLOT_PRIMARY: -+ idx = cs->primary_idx; -+ break; -+ case OVPN_KEY_SLOT_SECONDARY: -+ idx = !cs->primary_idx; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ rcu_read_lock(); -+ ks = rcu_dereference(cs->slots[idx]); -+ if (!ks || (ks && !ovpn_crypto_key_slot_hold(ks))) { -+ rcu_read_unlock(); -+ return -ENOENT; -+ } -+ rcu_read_unlock(); -+ -+ keyconf->cipher_alg = ovpn_aead_crypto_alg(ks); -+ keyconf->key_id = ks->key_id; -+ -+ ovpn_crypto_key_slot_put(ks); -+ -+ return 0; -+} -diff --git a/drivers/net/ovpn/crypto.h b/drivers/net/ovpn/crypto.h -new file mode 100644 -index 000000000000..b7a7be752d54 ---- /dev/null -+++ b/drivers/net/ovpn/crypto.h -@@ -0,0 +1,145 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#ifndef _NET_OVPN_OVPNCRYPTO_H_ -+#define _NET_OVPN_OVPNCRYPTO_H_ -+ -+#include "packet.h" -+#include "pktid.h" -+ -+/* info needed for both encrypt and decrypt directions */ -+struct ovpn_key_direction { -+ const u8 *cipher_key; -+ size_t cipher_key_size; -+ const u8 *nonce_tail; /* only needed for GCM modes */ -+ size_t nonce_tail_size; /* only needed for GCM modes */ -+}; -+ -+/* all info for a particular symmetric key (primary or secondary) */ -+struct ovpn_key_config { -+ enum ovpn_cipher_alg cipher_alg; -+ u8 key_id; -+ struct ovpn_key_direction encrypt; -+ struct ovpn_key_direction decrypt; -+}; -+ -+/* used to pass settings from netlink to the crypto engine */ -+struct ovpn_peer_key_reset { -+ enum ovpn_key_slot slot; -+ struct ovpn_key_config key; -+}; -+ -+struct ovpn_crypto_key_slot { -+ u8 key_id; -+ -+ struct crypto_aead *encrypt; -+ struct crypto_aead *decrypt; -+ struct ovpn_nonce_tail nonce_tail_xmit; -+ struct ovpn_nonce_tail nonce_tail_recv; -+ -+ struct ovpn_pktid_recv pid_recv ____cacheline_aligned_in_smp; -+ struct ovpn_pktid_xmit pid_xmit ____cacheline_aligned_in_smp; -+ struct kref refcount; -+ struct rcu_head rcu; -+}; -+ -+struct ovpn_crypto_state { -+ struct ovpn_crypto_key_slot __rcu *slots[2]; -+ u8 primary_idx; -+ -+ /* protects primary and secondary slots */ -+ spinlock_t lock; -+}; -+ -+static inline bool ovpn_crypto_key_slot_hold(struct ovpn_crypto_key_slot *ks) -+{ -+ return kref_get_unless_zero(&ks->refcount); -+} -+ -+static inline void ovpn_crypto_state_init(struct ovpn_crypto_state *cs) -+{ -+ RCU_INIT_POINTER(cs->slots[0], NULL); -+ RCU_INIT_POINTER(cs->slots[1], NULL); -+ cs->primary_idx = 0; -+ spin_lock_init(&cs->lock); -+} -+ -+static inline struct ovpn_crypto_key_slot * -+ovpn_crypto_key_id_to_slot(const struct ovpn_crypto_state *cs, u8 key_id) -+{ -+ struct ovpn_crypto_key_slot *ks; -+ u8 idx; -+ -+ if (unlikely(!cs)) -+ return NULL; -+ -+ rcu_read_lock(); -+ idx = cs->primary_idx; -+ ks = rcu_dereference(cs->slots[idx]); -+ if (ks && ks->key_id == key_id) { -+ if (unlikely(!ovpn_crypto_key_slot_hold(ks))) -+ ks = NULL; -+ goto out; -+ } -+ -+ ks = rcu_dereference(cs->slots[idx ^ 1]); -+ if (ks && ks->key_id == key_id) { -+ if (unlikely(!ovpn_crypto_key_slot_hold(ks))) -+ ks = NULL; -+ goto out; -+ } -+ -+ /* when both key slots are occupied but no matching key ID is found, ks -+ * has to be reset to NULL to avoid carrying a stale pointer -+ */ -+ ks = NULL; -+out: -+ rcu_read_unlock(); -+ -+ return ks; -+} -+ -+static inline struct ovpn_crypto_key_slot * -+ovpn_crypto_key_slot_primary(const struct ovpn_crypto_state *cs) -+{ -+ struct ovpn_crypto_key_slot *ks; -+ -+ rcu_read_lock(); -+ ks = rcu_dereference(cs->slots[cs->primary_idx]); -+ if (unlikely(ks && !ovpn_crypto_key_slot_hold(ks))) -+ ks = NULL; -+ rcu_read_unlock(); -+ -+ return ks; -+} -+ -+void ovpn_crypto_key_slot_release(struct kref *kref); -+ -+static inline void ovpn_crypto_key_slot_put(struct ovpn_crypto_key_slot *ks) -+{ -+ kref_put(&ks->refcount, ovpn_crypto_key_slot_release); -+} -+ -+int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs, -+ const struct ovpn_peer_key_reset *pkr); -+ -+void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs, -+ enum ovpn_key_slot slot); -+ -+void ovpn_crypto_state_release(struct ovpn_crypto_state *cs); -+ -+void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs); -+ -+int ovpn_crypto_config_get(struct ovpn_crypto_state *cs, -+ enum ovpn_key_slot slot, -+ struct ovpn_key_config *keyconf); -+ -+void ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id); -+ -+#endif /* _NET_OVPN_OVPNCRYPTO_H_ */ -diff --git a/drivers/net/ovpn/crypto_aead.c b/drivers/net/ovpn/crypto_aead.c -new file mode 100644 -index 000000000000..25e4e4a453b2 ---- /dev/null -+++ b/drivers/net/ovpn/crypto_aead.c -@@ -0,0 +1,386 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "ovpnstruct.h" -+#include "main.h" -+#include "io.h" -+#include "packet.h" -+#include "pktid.h" -+#include "crypto_aead.h" -+#include "crypto.h" -+#include "peer.h" -+#include "proto.h" -+#include "skb.h" -+ -+#define AUTH_TAG_SIZE 16 -+ -+#define ALG_NAME_AES "gcm(aes)" -+#define ALG_NAME_CHACHAPOLY "rfc7539(chacha20,poly1305)" -+ -+static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks) -+{ -+ return OVPN_OP_SIZE_V2 + /* OP header size */ -+ 4 + /* Packet ID */ -+ crypto_aead_authsize(ks->encrypt); /* Auth Tag */ -+} -+ -+int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, -+ struct sk_buff *skb) -+{ -+ const unsigned int tag_size = crypto_aead_authsize(ks->encrypt); -+ const unsigned int head_size = ovpn_aead_encap_overhead(ks); -+ struct aead_request *req; -+ struct sk_buff *trailer; -+ struct scatterlist *sg; -+ u8 iv[NONCE_SIZE]; -+ int nfrags, ret; -+ u32 pktid, op; -+ -+ ovpn_skb_cb(skb)->orig_len = skb->len; -+ ovpn_skb_cb(skb)->peer = peer; -+ ovpn_skb_cb(skb)->ks = ks; -+ -+ /* Sample AEAD header format: -+ * 48000001 00000005 7e7046bd 444a7e28 cc6387b1 64a4d6c1 380275a... -+ * [ OP32 ] [seq # ] [ auth tag ] [ payload ... ] -+ * [4-byte -+ * IV head] -+ */ -+ -+ /* check that there's enough headroom in the skb for packet -+ * encapsulation, after adding network header and encryption overhead -+ */ -+ if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM + head_size))) -+ return -ENOBUFS; -+ -+ /* get number of skb frags and ensure that packet data is writable */ -+ nfrags = skb_cow_data(skb, 0, &trailer); -+ if (unlikely(nfrags < 0)) -+ return nfrags; -+ -+ if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2))) -+ return -ENOSPC; -+ -+ ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) * -+ (nfrags + 2), GFP_ATOMIC); -+ if (unlikely(!ovpn_skb_cb(skb)->sg)) -+ return -ENOMEM; -+ -+ sg = ovpn_skb_cb(skb)->sg; -+ -+ /* sg table: -+ * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+NONCE_WIRE_SIZE), -+ * 1, 2, 3, ..., n: payload, -+ * n+1: auth_tag (len=tag_size) -+ */ -+ sg_init_table(sg, nfrags + 2); -+ -+ /* build scatterlist to encrypt packet payload */ -+ ret = skb_to_sgvec_nomark(skb, sg + 1, 0, skb->len); -+ if (unlikely(nfrags != ret)) { -+ ret = -EINVAL; -+ goto free_sg; -+ } -+ -+ /* append auth_tag onto scatterlist */ -+ __skb_push(skb, tag_size); -+ sg_set_buf(sg + nfrags + 1, skb->data, tag_size); -+ -+ /* obtain packet ID, which is used both as a first -+ * 4 bytes of nonce and last 4 bytes of associated data. -+ */ -+ ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid); -+ if (unlikely(ret < 0)) -+ goto free_sg; -+ -+ /* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes -+ * nonce -+ */ -+ ovpn_pktid_aead_write(pktid, &ks->nonce_tail_xmit, iv); -+ -+ /* make space for packet id and push it to the front */ -+ __skb_push(skb, NONCE_WIRE_SIZE); -+ memcpy(skb->data, iv, NONCE_WIRE_SIZE); -+ -+ /* add packet op as head of additional data */ -+ op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer->id); -+ __skb_push(skb, OVPN_OP_SIZE_V2); -+ BUILD_BUG_ON(sizeof(op) != OVPN_OP_SIZE_V2); -+ *((__force __be32 *)skb->data) = htonl(op); -+ -+ /* AEAD Additional data */ -+ sg_set_buf(sg, skb->data, OVPN_OP_SIZE_V2 + NONCE_WIRE_SIZE); -+ -+ req = aead_request_alloc(ks->encrypt, GFP_ATOMIC); -+ if (unlikely(!req)) { -+ ret = -ENOMEM; -+ goto free_sg; -+ } -+ -+ ovpn_skb_cb(skb)->req = req; -+ -+ /* setup async crypto operation */ -+ aead_request_set_tfm(req, ks->encrypt); -+ aead_request_set_callback(req, 0, ovpn_encrypt_post, skb); -+ aead_request_set_crypt(req, sg, sg, skb->len - head_size, iv); -+ aead_request_set_ad(req, OVPN_OP_SIZE_V2 + NONCE_WIRE_SIZE); -+ -+ /* encrypt it */ -+ return crypto_aead_encrypt(req); -+free_sg: -+ kfree(ovpn_skb_cb(skb)->sg); -+ ovpn_skb_cb(skb)->sg = NULL; -+ return ret; -+} -+ -+int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, -+ struct sk_buff *skb) -+{ -+ const unsigned int tag_size = crypto_aead_authsize(ks->decrypt); -+ int ret, payload_len, nfrags; -+ unsigned int payload_offset; -+ struct aead_request *req; -+ struct sk_buff *trailer; -+ struct scatterlist *sg; -+ unsigned int sg_len; -+ u8 iv[NONCE_SIZE]; -+ -+ payload_offset = OVPN_OP_SIZE_V2 + NONCE_WIRE_SIZE + tag_size; -+ payload_len = skb->len - payload_offset; -+ -+ ovpn_skb_cb(skb)->orig_len = skb->len; -+ ovpn_skb_cb(skb)->payload_offset = payload_offset; -+ ovpn_skb_cb(skb)->peer = peer; -+ ovpn_skb_cb(skb)->ks = ks; -+ -+ /* sanity check on packet size, payload size must be >= 0 */ -+ if (unlikely(payload_len < 0)) -+ return -EINVAL; -+ -+ /* Prepare the skb data buffer to be accessed up until the auth tag. -+ * This is required because this area is directly mapped into the sg -+ * list. -+ */ -+ if (unlikely(!pskb_may_pull(skb, payload_offset))) -+ return -ENODATA; -+ -+ /* get number of skb frags and ensure that packet data is writable */ -+ nfrags = skb_cow_data(skb, 0, &trailer); -+ if (unlikely(nfrags < 0)) -+ return nfrags; -+ -+ if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2))) -+ return -ENOSPC; -+ -+ ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) * -+ (nfrags + 2), GFP_ATOMIC); -+ if (unlikely(!ovpn_skb_cb(skb)->sg)) -+ return -ENOMEM; -+ -+ sg = ovpn_skb_cb(skb)->sg; -+ -+ /* sg table: -+ * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+NONCE_WIRE_SIZE), -+ * 1, 2, 3, ..., n: payload, -+ * n+1: auth_tag (len=tag_size) -+ */ -+ sg_init_table(sg, nfrags + 2); -+ -+ /* packet op is head of additional data */ -+ sg_len = OVPN_OP_SIZE_V2 + NONCE_WIRE_SIZE; -+ sg_set_buf(sg, skb->data, sg_len); -+ -+ /* build scatterlist to decrypt packet payload */ -+ ret = skb_to_sgvec_nomark(skb, sg + 1, payload_offset, payload_len); -+ if (unlikely(nfrags != ret)) { -+ ret = -EINVAL; -+ goto free_sg; -+ } -+ -+ /* append auth_tag onto scatterlist */ -+ sg_set_buf(sg + nfrags + 1, skb->data + sg_len, tag_size); -+ -+ /* copy nonce into IV buffer */ -+ memcpy(iv, skb->data + OVPN_OP_SIZE_V2, NONCE_WIRE_SIZE); -+ memcpy(iv + NONCE_WIRE_SIZE, ks->nonce_tail_recv.u8, -+ sizeof(struct ovpn_nonce_tail)); -+ -+ req = aead_request_alloc(ks->decrypt, GFP_ATOMIC); -+ if (unlikely(!req)) { -+ ret = -ENOMEM; -+ goto free_sg; -+ } -+ -+ ovpn_skb_cb(skb)->req = req; -+ -+ /* setup async crypto operation */ -+ aead_request_set_tfm(req, ks->decrypt); -+ aead_request_set_callback(req, 0, ovpn_decrypt_post, skb); -+ aead_request_set_crypt(req, sg, sg, payload_len + tag_size, iv); -+ -+ aead_request_set_ad(req, NONCE_WIRE_SIZE + OVPN_OP_SIZE_V2); -+ -+ /* decrypt it */ -+ return crypto_aead_decrypt(req); -+free_sg: -+ kfree(ovpn_skb_cb(skb)->sg); -+ ovpn_skb_cb(skb)->sg = NULL; -+ return ret; -+} -+ -+/* Initialize a struct crypto_aead object */ -+struct crypto_aead *ovpn_aead_init(const char *title, const char *alg_name, -+ const unsigned char *key, -+ unsigned int keylen) -+{ -+ struct crypto_aead *aead; -+ int ret; -+ -+ aead = crypto_alloc_aead(alg_name, 0, 0); -+ if (IS_ERR(aead)) { -+ ret = PTR_ERR(aead); -+ pr_err("%s crypto_alloc_aead failed, err=%d\n", title, ret); -+ aead = NULL; -+ goto error; -+ } -+ -+ ret = crypto_aead_setkey(aead, key, keylen); -+ if (ret) { -+ pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title, -+ keylen, ret); -+ goto error; -+ } -+ -+ ret = crypto_aead_setauthsize(aead, AUTH_TAG_SIZE); -+ if (ret) { -+ pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title, -+ ret); -+ goto error; -+ } -+ -+ /* basic AEAD assumption */ -+ if (crypto_aead_ivsize(aead) != NONCE_SIZE) { -+ pr_err("%s IV size must be %d\n", title, NONCE_SIZE); -+ ret = -EINVAL; -+ goto error; -+ } -+ -+ pr_debug("********* Cipher %s (%s)\n", alg_name, title); -+ pr_debug("*** IV size=%u\n", crypto_aead_ivsize(aead)); -+ pr_debug("*** req size=%u\n", crypto_aead_reqsize(aead)); -+ pr_debug("*** block size=%u\n", crypto_aead_blocksize(aead)); -+ pr_debug("*** auth size=%u\n", crypto_aead_authsize(aead)); -+ pr_debug("*** alignmask=0x%x\n", crypto_aead_alignmask(aead)); -+ -+ return aead; -+ -+error: -+ crypto_free_aead(aead); -+ return ERR_PTR(ret); -+} -+ -+void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks) -+{ -+ if (!ks) -+ return; -+ -+ crypto_free_aead(ks->encrypt); -+ crypto_free_aead(ks->decrypt); -+ kfree(ks); -+} -+ -+struct ovpn_crypto_key_slot * -+ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc) -+{ -+ struct ovpn_crypto_key_slot *ks = NULL; -+ const char *alg_name; -+ int ret; -+ -+ /* validate crypto alg */ -+ switch (kc->cipher_alg) { -+ case OVPN_CIPHER_ALG_AES_GCM: -+ alg_name = ALG_NAME_AES; -+ break; -+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305: -+ alg_name = ALG_NAME_CHACHAPOLY; -+ break; -+ default: -+ return ERR_PTR(-EOPNOTSUPP); -+ } -+ -+ if (sizeof(struct ovpn_nonce_tail) != kc->encrypt.nonce_tail_size || -+ sizeof(struct ovpn_nonce_tail) != kc->decrypt.nonce_tail_size) -+ return ERR_PTR(-EINVAL); -+ -+ /* build the key slot */ -+ ks = kmalloc(sizeof(*ks), GFP_KERNEL); -+ if (!ks) -+ return ERR_PTR(-ENOMEM); -+ -+ ks->encrypt = NULL; -+ ks->decrypt = NULL; -+ kref_init(&ks->refcount); -+ ks->key_id = kc->key_id; -+ -+ ks->encrypt = ovpn_aead_init("encrypt", alg_name, -+ kc->encrypt.cipher_key, -+ kc->encrypt.cipher_key_size); -+ if (IS_ERR(ks->encrypt)) { -+ ret = PTR_ERR(ks->encrypt); -+ ks->encrypt = NULL; -+ goto destroy_ks; -+ } -+ -+ ks->decrypt = ovpn_aead_init("decrypt", alg_name, -+ kc->decrypt.cipher_key, -+ kc->decrypt.cipher_key_size); -+ if (IS_ERR(ks->decrypt)) { -+ ret = PTR_ERR(ks->decrypt); -+ ks->decrypt = NULL; -+ goto destroy_ks; -+ } -+ -+ memcpy(ks->nonce_tail_xmit.u8, kc->encrypt.nonce_tail, -+ sizeof(struct ovpn_nonce_tail)); -+ memcpy(ks->nonce_tail_recv.u8, kc->decrypt.nonce_tail, -+ sizeof(struct ovpn_nonce_tail)); -+ -+ /* init packet ID generation/validation */ -+ ovpn_pktid_xmit_init(&ks->pid_xmit); -+ ovpn_pktid_recv_init(&ks->pid_recv); -+ -+ return ks; -+ -+destroy_ks: -+ ovpn_aead_crypto_key_slot_destroy(ks); -+ return ERR_PTR(ret); -+} -+ -+enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks) -+{ -+ const char *alg_name; -+ -+ if (!ks->encrypt) -+ return OVPN_CIPHER_ALG_NONE; -+ -+ alg_name = crypto_tfm_alg_name(crypto_aead_tfm(ks->encrypt)); -+ -+ if (!strcmp(alg_name, ALG_NAME_AES)) -+ return OVPN_CIPHER_ALG_AES_GCM; -+ else if (!strcmp(alg_name, ALG_NAME_CHACHAPOLY)) -+ return OVPN_CIPHER_ALG_CHACHA20_POLY1305; -+ else -+ return OVPN_CIPHER_ALG_NONE; -+} -diff --git a/drivers/net/ovpn/crypto_aead.h b/drivers/net/ovpn/crypto_aead.h -new file mode 100644 -index 000000000000..fb65be82436e ---- /dev/null -+++ b/drivers/net/ovpn/crypto_aead.h -@@ -0,0 +1,33 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#ifndef _NET_OVPN_OVPNAEAD_H_ -+#define _NET_OVPN_OVPNAEAD_H_ -+ -+#include "crypto.h" -+ -+#include -+#include -+ -+struct crypto_aead *ovpn_aead_init(const char *title, const char *alg_name, -+ const unsigned char *key, -+ unsigned int keylen); -+ -+int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, -+ struct sk_buff *skb); -+int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, -+ struct sk_buff *skb); -+ -+struct ovpn_crypto_key_slot * -+ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc); -+void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks); -+ -+enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks); -+ -+#endif /* _NET_OVPN_OVPNAEAD_H_ */ -diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c -new file mode 100644 -index 000000000000..c04791a508e5 ---- /dev/null -+++ b/drivers/net/ovpn/io.c -@@ -0,0 +1,462 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2019-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ovpnstruct.h" -+#include "peer.h" -+#include "io.h" -+#include "bind.h" -+#include "crypto.h" -+#include "crypto_aead.h" -+#include "netlink.h" -+#include "proto.h" -+#include "tcp.h" -+#include "udp.h" -+#include "skb.h" -+#include "socket.h" -+ -+const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE] = { -+ 0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb, -+ 0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48 -+}; -+ -+/** -+ * ovpn_is_keepalive - check if skb contains a keepalive message -+ * @skb: packet to check -+ * -+ * Assumes that the first byte of skb->data is defined. -+ * -+ * Return: true if skb contains a keepalive or false otherwise -+ */ -+static bool ovpn_is_keepalive(struct sk_buff *skb) -+{ -+ if (*skb->data != ovpn_keepalive_message[0]) -+ return false; -+ -+ if (skb->len != OVPN_KEEPALIVE_SIZE) -+ return false; -+ -+ if (!pskb_may_pull(skb, OVPN_KEEPALIVE_SIZE)) -+ return false; -+ -+ return !memcmp(skb->data, ovpn_keepalive_message, OVPN_KEEPALIVE_SIZE); -+} -+ -+/* Called after decrypt to write the IP packet to the device. -+ * This method is expected to manage/free the skb. -+ */ -+static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb) -+{ -+ unsigned int pkt_len; -+ -+ /* we can't guarantee the packet wasn't corrupted before entering the -+ * VPN, therefore we give other layers a chance to check that -+ */ -+ skb->ip_summed = CHECKSUM_NONE; -+ -+ /* skb hash for transport packet no longer valid after decapsulation */ -+ skb_clear_hash(skb); -+ -+ /* post-decrypt scrub -- prepare to inject encapsulated packet onto the -+ * interface, based on __skb_tunnel_rx() in dst.h -+ */ -+ skb->dev = peer->ovpn->dev; -+ skb_set_queue_mapping(skb, 0); -+ skb_scrub_packet(skb, true); -+ -+ skb_reset_network_header(skb); -+ skb_reset_transport_header(skb); -+ skb_probe_transport_header(skb); -+ skb_reset_inner_headers(skb); -+ -+ memset(skb->cb, 0, sizeof(skb->cb)); -+ -+ /* cause packet to be "received" by the interface */ -+ pkt_len = skb->len; -+ if (likely(gro_cells_receive(&peer->ovpn->gro_cells, -+ skb) == NET_RX_SUCCESS)) -+ /* update RX stats with the size of decrypted packet */ -+ dev_sw_netstats_rx_add(peer->ovpn->dev, pkt_len); -+} -+ -+void ovpn_decrypt_post(void *data, int ret) -+{ -+ struct ovpn_crypto_key_slot *ks; -+ unsigned int payload_offset = 0; -+ struct sk_buff *skb = data; -+ struct ovpn_peer *peer; -+ unsigned int orig_len; -+ __be16 proto; -+ __be32 *pid; -+ -+ /* crypto is happening asynchronously. this function will be called -+ * again later by the crypto callback with a proper return code -+ */ -+ if (unlikely(ret == -EINPROGRESS)) -+ return; -+ -+ payload_offset = ovpn_skb_cb(skb)->payload_offset; -+ ks = ovpn_skb_cb(skb)->ks; -+ peer = ovpn_skb_cb(skb)->peer; -+ orig_len = ovpn_skb_cb(skb)->orig_len; -+ -+ /* crypto is done, cleanup skb CB and its members */ -+ -+ if (likely(ovpn_skb_cb(skb)->sg)) -+ kfree(ovpn_skb_cb(skb)->sg); -+ -+ if (likely(ovpn_skb_cb(skb)->req)) -+ aead_request_free(ovpn_skb_cb(skb)->req); -+ -+ if (unlikely(ret < 0)) -+ goto drop; -+ -+ /* PID sits after the op */ -+ pid = (__force __be32 *)(skb->data + OVPN_OP_SIZE_V2); -+ ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0); -+ if (unlikely(ret < 0)) { -+ net_err_ratelimited("%s: PKT ID RX error: %d\n", -+ peer->ovpn->dev->name, ret); -+ goto drop; -+ } -+ -+ /* keep track of last received authenticated packet for keepalive */ -+ peer->last_recv = ktime_get_real_seconds(); -+ -+ if (peer->sock->sock->sk->sk_protocol == IPPROTO_UDP) { -+ /* check if this peer changed it's IP address and update -+ * state -+ */ -+ ovpn_peer_float(peer, skb); -+ /* update source endpoint for this peer */ -+ ovpn_peer_update_local_endpoint(peer, skb); -+ } -+ -+ /* point to encapsulated IP packet */ -+ __skb_pull(skb, payload_offset); -+ -+ /* check if this is a valid datapacket that has to be delivered to the -+ * ovpn interface -+ */ -+ skb_reset_network_header(skb); -+ proto = ovpn_ip_check_protocol(skb); -+ if (unlikely(!proto)) { -+ /* check if null packet */ -+ if (unlikely(!pskb_may_pull(skb, 1))) { -+ net_info_ratelimited("%s: NULL packet received from peer %u\n", -+ peer->ovpn->dev->name, peer->id); -+ goto drop; -+ } -+ -+ if (ovpn_is_keepalive(skb)) { -+ net_dbg_ratelimited("%s: ping received from peer %u\n", -+ peer->ovpn->dev->name, peer->id); -+ goto drop; -+ } -+ -+ net_info_ratelimited("%s: unsupported protocol received from peer %u\n", -+ peer->ovpn->dev->name, peer->id); -+ goto drop; -+ } -+ skb->protocol = proto; -+ -+ /* perform Reverse Path Filtering (RPF) */ -+ if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) { -+ if (skb_protocol_to_family(skb) == AF_INET6) -+ net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI6c\n", -+ peer->ovpn->dev->name, peer->id, -+ &ipv6_hdr(skb)->saddr); -+ else -+ net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI4\n", -+ peer->ovpn->dev->name, peer->id, -+ &ip_hdr(skb)->saddr); -+ goto drop; -+ } -+ -+ /* increment RX stats */ -+ ovpn_peer_stats_increment_rx(&peer->vpn_stats, skb->len); -+ ovpn_peer_stats_increment_rx(&peer->link_stats, orig_len); -+ -+ ovpn_netdev_write(peer, skb); -+ /* skb is passed to upper layer - don't free it */ -+ skb = NULL; -+drop: -+ if (unlikely(skb)) -+ dev_core_stats_rx_dropped_inc(peer->ovpn->dev); -+ if (likely(peer)) -+ ovpn_peer_put(peer); -+ if (likely(ks)) -+ ovpn_crypto_key_slot_put(ks); -+ kfree_skb(skb); -+} -+ -+/* pick next packet from RX queue, decrypt and forward it to the device */ -+void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb) -+{ -+ struct ovpn_crypto_key_slot *ks; -+ u8 key_id; -+ -+ /* get the key slot matching the key ID in the received packet */ -+ key_id = ovpn_key_id_from_skb(skb); -+ ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id); -+ if (unlikely(!ks)) { -+ net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n", -+ peer->ovpn->dev->name, peer->id, key_id); -+ dev_core_stats_rx_dropped_inc(peer->ovpn->dev); -+ kfree_skb(skb); -+ return; -+ } -+ -+ memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb)); -+ ovpn_decrypt_post(skb, ovpn_aead_decrypt(peer, ks, skb)); -+} -+ -+void ovpn_encrypt_post(void *data, int ret) -+{ -+ struct ovpn_crypto_key_slot *ks; -+ struct sk_buff *skb = data; -+ struct ovpn_peer *peer; -+ unsigned int orig_len; -+ -+ /* encryption is happening asynchronously. This function will be -+ * called later by the crypto callback with a proper return value -+ */ -+ if (unlikely(ret == -EINPROGRESS)) -+ return; -+ -+ ks = ovpn_skb_cb(skb)->ks; -+ peer = ovpn_skb_cb(skb)->peer; -+ orig_len = ovpn_skb_cb(skb)->orig_len; -+ -+ /* crypto is done, cleanup skb CB and its members */ -+ -+ if (likely(ovpn_skb_cb(skb)->sg)) -+ kfree(ovpn_skb_cb(skb)->sg); -+ -+ if (likely(ovpn_skb_cb(skb)->req)) -+ aead_request_free(ovpn_skb_cb(skb)->req); -+ -+ if (unlikely(ret == -ERANGE)) { -+ /* we ran out of IVs and we must kill the key as it can't be -+ * use anymore -+ */ -+ netdev_warn(peer->ovpn->dev, -+ "killing key %u for peer %u\n", ks->key_id, -+ peer->id); -+ ovpn_crypto_kill_key(&peer->crypto, ks->key_id); -+ /* let userspace know so that a new key must be negotiated */ -+ ovpn_nl_key_swap_notify(peer, ks->key_id); -+ goto err; -+ } -+ -+ if (unlikely(ret < 0)) -+ goto err; -+ -+ skb_mark_not_on_list(skb); -+ ovpn_peer_stats_increment_tx(&peer->link_stats, skb->len); -+ ovpn_peer_stats_increment_tx(&peer->vpn_stats, orig_len); -+ -+ switch (peer->sock->sock->sk->sk_protocol) { -+ case IPPROTO_UDP: -+ ovpn_udp_send_skb(peer->ovpn, peer, skb); -+ break; -+ case IPPROTO_TCP: -+ ovpn_tcp_send_skb(peer, skb); -+ break; -+ default: -+ /* no transport configured yet */ -+ goto err; -+ } -+ -+ /* keep track of last sent packet for keepalive */ -+ peer->last_sent = ktime_get_real_seconds(); -+ -+ /* skb passed down the stack - don't free it */ -+ skb = NULL; -+err: -+ if (unlikely(skb)) -+ dev_core_stats_tx_dropped_inc(peer->ovpn->dev); -+ if (likely(peer)) -+ ovpn_peer_put(peer); -+ if (likely(ks)) -+ ovpn_crypto_key_slot_put(ks); -+ kfree_skb(skb); -+} -+ -+static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb) -+{ -+ struct ovpn_crypto_key_slot *ks; -+ -+ if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL && -+ skb_checksum_help(skb))) { -+ net_warn_ratelimited("%s: cannot compute checksum for outgoing packet\n", -+ peer->ovpn->dev->name); -+ return false; -+ } -+ -+ /* get primary key to be used for encrypting data */ -+ ks = ovpn_crypto_key_slot_primary(&peer->crypto); -+ if (unlikely(!ks)) { -+ net_warn_ratelimited("%s: error while retrieving primary key slot for peer %u\n", -+ peer->ovpn->dev->name, peer->id); -+ return false; -+ } -+ -+ /* take a reference to the peer because the crypto code may run async. -+ * ovpn_encrypt_post() will release it upon completion -+ */ -+ if (unlikely(!ovpn_peer_hold(peer))) { -+ DEBUG_NET_WARN_ON_ONCE(1); -+ return false; -+ } -+ -+ memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb)); -+ ovpn_encrypt_post(skb, ovpn_aead_encrypt(peer, ks, skb)); -+ return true; -+} -+ -+/* send skb to connected peer, if any */ -+static void ovpn_send(struct ovpn_struct *ovpn, struct sk_buff *skb, -+ struct ovpn_peer *peer) -+{ -+ struct sk_buff *curr, *next; -+ -+ if (likely(!peer)) -+ /* retrieve peer serving the destination IP of this packet */ -+ peer = ovpn_peer_get_by_dst(ovpn, skb); -+ if (unlikely(!peer)) { -+ net_dbg_ratelimited("%s: no peer to send data to\n", -+ ovpn->dev->name); -+ dev_core_stats_tx_dropped_inc(ovpn->dev); -+ goto drop; -+ } -+ -+ /* this might be a GSO-segmented skb list: process each skb -+ * independently -+ */ -+ skb_list_walk_safe(skb, curr, next) -+ if (unlikely(!ovpn_encrypt_one(peer, curr))) { -+ dev_core_stats_tx_dropped_inc(ovpn->dev); -+ kfree_skb(curr); -+ } -+ -+ /* skb passed over, no need to free */ -+ skb = NULL; -+drop: -+ if (likely(peer)) -+ ovpn_peer_put(peer); -+ kfree_skb_list(skb); -+} -+ -+/* Send user data to the network -+ */ -+netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev) -+{ -+ struct ovpn_struct *ovpn = netdev_priv(dev); -+ struct sk_buff *segments, *curr, *next; -+ struct sk_buff_head skb_list; -+ __be16 proto; -+ int ret; -+ -+ /* reset netfilter state */ -+ nf_reset_ct(skb); -+ -+ /* verify IP header size in network packet */ -+ proto = ovpn_ip_check_protocol(skb); -+ if (unlikely(!proto || skb->protocol != proto)) { -+ net_err_ratelimited("%s: dropping malformed payload packet\n", -+ dev->name); -+ dev_core_stats_tx_dropped_inc(ovpn->dev); -+ goto drop; -+ } -+ -+ if (skb_is_gso(skb)) { -+ segments = skb_gso_segment(skb, 0); -+ if (IS_ERR(segments)) { -+ ret = PTR_ERR(segments); -+ net_err_ratelimited("%s: cannot segment packet: %d\n", -+ dev->name, ret); -+ dev_core_stats_tx_dropped_inc(ovpn->dev); -+ goto drop; -+ } -+ -+ consume_skb(skb); -+ skb = segments; -+ } -+ -+ /* from this moment on, "skb" might be a list */ -+ -+ __skb_queue_head_init(&skb_list); -+ skb_list_walk_safe(skb, curr, next) { -+ skb_mark_not_on_list(curr); -+ -+ curr = skb_share_check(curr, GFP_ATOMIC); -+ if (unlikely(!curr)) { -+ net_err_ratelimited("%s: skb_share_check failed\n", -+ dev->name); -+ dev_core_stats_tx_dropped_inc(ovpn->dev); -+ continue; -+ } -+ -+ __skb_queue_tail(&skb_list, curr); -+ } -+ skb_list.prev->next = NULL; -+ -+ ovpn_send(ovpn, skb_list.next, NULL); -+ -+ return NETDEV_TX_OK; -+ -+drop: -+ skb_tx_error(skb); -+ kfree_skb_list(skb); -+ return NET_XMIT_DROP; -+} -+ -+/** -+ * ovpn_xmit_special - encrypt and transmit an out-of-band message to peer -+ * @peer: peer to send the message to -+ * @data: message content -+ * @len: message length -+ * -+ * Assumes that caller holds a reference to peer -+ */ -+void ovpn_xmit_special(struct ovpn_peer *peer, const void *data, -+ const unsigned int len) -+{ -+ struct ovpn_struct *ovpn; -+ struct sk_buff *skb; -+ -+ ovpn = peer->ovpn; -+ if (unlikely(!ovpn)) -+ return; -+ -+ skb = alloc_skb(256 + len, GFP_ATOMIC); -+ if (unlikely(!skb)) -+ return; -+ -+ skb_reserve(skb, 128); -+ skb->priority = TC_PRIO_BESTEFFORT; -+ __skb_put_data(skb, data, len); -+ -+ /* increase reference counter when passing peer to sending queue */ -+ if (!ovpn_peer_hold(peer)) { -+ netdev_dbg(ovpn->dev, "%s: cannot hold peer reference for sending special packet\n", -+ __func__); -+ kfree_skb(skb); -+ return; -+ } -+ -+ ovpn_send(ovpn, skb, peer); -+} -diff --git a/drivers/net/ovpn/io.h b/drivers/net/ovpn/io.h -new file mode 100644 -index 000000000000..eb224114152c ---- /dev/null -+++ b/drivers/net/ovpn/io.h -@@ -0,0 +1,25 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2019-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#ifndef _NET_OVPN_OVPN_H_ -+#define _NET_OVPN_OVPN_H_ -+ -+#define OVPN_KEEPALIVE_SIZE 16 -+extern const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE]; -+ -+netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev); -+ -+void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb); -+void ovpn_xmit_special(struct ovpn_peer *peer, const void *data, -+ const unsigned int len); -+ -+void ovpn_encrypt_post(void *data, int ret); -+void ovpn_decrypt_post(void *data, int ret); -+ -+#endif /* _NET_OVPN_OVPN_H_ */ -diff --git a/drivers/net/ovpn/main.c b/drivers/net/ovpn/main.c -new file mode 100644 -index 000000000000..9dcf51ae1497 ---- /dev/null -+++ b/drivers/net/ovpn/main.c -@@ -0,0 +1,337 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ * James Yonan -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ovpnstruct.h" -+#include "main.h" -+#include "netlink.h" -+#include "io.h" -+#include "packet.h" -+#include "peer.h" -+#include "tcp.h" -+ -+/* Driver info */ -+#define DRV_DESCRIPTION "OpenVPN data channel offload (ovpn)" -+#define DRV_COPYRIGHT "(C) 2020-2024 OpenVPN, Inc." -+ -+static void ovpn_struct_free(struct net_device *net) -+{ -+ struct ovpn_struct *ovpn = netdev_priv(net); -+ -+ kfree(ovpn->peers); -+} -+ -+static int ovpn_net_init(struct net_device *dev) -+{ -+ struct ovpn_struct *ovpn = netdev_priv(dev); -+ -+ return gro_cells_init(&ovpn->gro_cells, dev); -+} -+ -+static void ovpn_net_uninit(struct net_device *dev) -+{ -+ struct ovpn_struct *ovpn = netdev_priv(dev); -+ -+ gro_cells_destroy(&ovpn->gro_cells); -+} -+ -+static int ovpn_net_open(struct net_device *dev) -+{ -+ /* ovpn keeps the carrier always on to avoid losing IP or route -+ * configuration upon disconnection. This way it can prevent leaks -+ * of traffic outside of the VPN tunnel. -+ * The user may override this behaviour by tearing down the interface -+ * manually. -+ */ -+ netif_carrier_on(dev); -+ netif_tx_start_all_queues(dev); -+ return 0; -+} -+ -+static int ovpn_net_stop(struct net_device *dev) -+{ -+ netif_tx_stop_all_queues(dev); -+ return 0; -+} -+ -+static const struct net_device_ops ovpn_netdev_ops = { -+ .ndo_init = ovpn_net_init, -+ .ndo_uninit = ovpn_net_uninit, -+ .ndo_open = ovpn_net_open, -+ .ndo_stop = ovpn_net_stop, -+ .ndo_start_xmit = ovpn_net_xmit, -+}; -+ -+static const struct device_type ovpn_type = { -+ .name = OVPN_FAMILY_NAME, -+}; -+ -+static const struct nla_policy ovpn_policy[IFLA_OVPN_MAX + 1] = { -+ [IFLA_OVPN_MODE] = NLA_POLICY_RANGE(NLA_U8, OVPN_MODE_P2P, -+ OVPN_MODE_MP), -+}; -+ -+/** -+ * ovpn_dev_is_valid - check if the netdevice is of type 'ovpn' -+ * @dev: the interface to check -+ * -+ * Return: whether the netdevice is of type 'ovpn' -+ */ -+bool ovpn_dev_is_valid(const struct net_device *dev) -+{ -+ return dev->netdev_ops->ndo_start_xmit == ovpn_net_xmit; -+} -+ -+static void ovpn_get_drvinfo(struct net_device *dev, -+ struct ethtool_drvinfo *info) -+{ -+ strscpy(info->driver, OVPN_FAMILY_NAME, sizeof(info->driver)); -+ strscpy(info->bus_info, "ovpn", sizeof(info->bus_info)); -+} -+ -+static const struct ethtool_ops ovpn_ethtool_ops = { -+ .get_drvinfo = ovpn_get_drvinfo, -+ .get_link = ethtool_op_get_link, -+ .get_ts_info = ethtool_op_get_ts_info, -+}; -+ -+static void ovpn_setup(struct net_device *dev) -+{ -+ /* compute the overhead considering AEAD encryption */ -+ const int overhead = sizeof(u32) + NONCE_WIRE_SIZE + 16 + -+ sizeof(struct udphdr) + -+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); -+ -+ netdev_features_t feat = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | -+ NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | -+ NETIF_F_HIGHDMA; -+ -+ dev->needs_free_netdev = true; -+ -+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; -+ -+ dev->ethtool_ops = &ovpn_ethtool_ops; -+ dev->netdev_ops = &ovpn_netdev_ops; -+ -+ dev->priv_destructor = ovpn_struct_free; -+ -+ dev->hard_header_len = 0; -+ dev->addr_len = 0; -+ dev->mtu = ETH_DATA_LEN - overhead; -+ dev->min_mtu = IPV4_MIN_MTU; -+ dev->max_mtu = IP_MAX_MTU - overhead; -+ -+ dev->type = ARPHRD_NONE; -+ dev->flags = IFF_POINTOPOINT | IFF_NOARP; -+ dev->priv_flags |= IFF_NO_QUEUE; -+ -+ dev->lltx = true; -+ dev->features |= feat; -+ dev->hw_features |= feat; -+ dev->hw_enc_features |= feat; -+ -+ dev->needed_headroom = OVPN_HEAD_ROOM; -+ dev->needed_tailroom = OVPN_MAX_PADDING; -+ -+ SET_NETDEV_DEVTYPE(dev, &ovpn_type); -+} -+ -+static int ovpn_mp_alloc(struct ovpn_struct *ovpn) -+{ -+ struct in_device *dev_v4; -+ int i; -+ -+ if (ovpn->mode != OVPN_MODE_MP) -+ return 0; -+ -+ dev_v4 = __in_dev_get_rtnl(ovpn->dev); -+ if (dev_v4) { -+ /* disable redirects as Linux gets confused by ovpn -+ * handling same-LAN routing. -+ * This happens because a multipeer interface is used as -+ * relay point between hosts in the same subnet, while -+ * in a classic LAN this would not be needed because the -+ * two hosts would be able to talk directly. -+ */ -+ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); -+ IPV4_DEVCONF_ALL(dev_net(ovpn->dev), SEND_REDIRECTS) = false; -+ } -+ -+ /* the peer container is fairly large, therefore we allocate it only in -+ * MP mode -+ */ -+ ovpn->peers = kzalloc(sizeof(*ovpn->peers), GFP_KERNEL); -+ if (!ovpn->peers) -+ return -ENOMEM; -+ -+ spin_lock_init(&ovpn->peers->lock); -+ -+ for (i = 0; i < ARRAY_SIZE(ovpn->peers->by_id); i++) { -+ INIT_HLIST_HEAD(&ovpn->peers->by_id[i]); -+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_vpn_addr[i], i); -+ INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_transp_addr[i], i); -+ } -+ -+ return 0; -+} -+ -+static int ovpn_newlink(struct net *src_net, struct net_device *dev, -+ struct nlattr *tb[], struct nlattr *data[], -+ struct netlink_ext_ack *extack) -+{ -+ struct ovpn_struct *ovpn = netdev_priv(dev); -+ enum ovpn_mode mode = OVPN_MODE_P2P; -+ int err; -+ -+ if (data && data[IFLA_OVPN_MODE]) { -+ mode = nla_get_u8(data[IFLA_OVPN_MODE]); -+ netdev_dbg(dev, "setting device mode: %u\n", mode); -+ } -+ -+ ovpn->dev = dev; -+ ovpn->mode = mode; -+ spin_lock_init(&ovpn->lock); -+ INIT_DELAYED_WORK(&ovpn->keepalive_work, ovpn_peer_keepalive_work); -+ -+ err = ovpn_mp_alloc(ovpn); -+ if (err < 0) -+ return err; -+ -+ /* turn carrier explicitly off after registration, this way state is -+ * clearly defined -+ */ -+ netif_carrier_off(dev); -+ -+ return register_netdevice(dev); -+} -+ -+static struct rtnl_link_ops ovpn_link_ops = { -+ .kind = OVPN_FAMILY_NAME, -+ .netns_refund = false, -+ .priv_size = sizeof(struct ovpn_struct), -+ .setup = ovpn_setup, -+ .policy = ovpn_policy, -+ .maxtype = IFLA_OVPN_MAX, -+ .newlink = ovpn_newlink, -+ .dellink = unregister_netdevice_queue, -+}; -+ -+static int ovpn_netdev_notifier_call(struct notifier_block *nb, -+ unsigned long state, void *ptr) -+{ -+ struct net_device *dev = netdev_notifier_info_to_dev(ptr); -+ struct ovpn_struct *ovpn; -+ -+ if (!ovpn_dev_is_valid(dev)) -+ return NOTIFY_DONE; -+ -+ ovpn = netdev_priv(dev); -+ -+ switch (state) { -+ case NETDEV_REGISTER: -+ ovpn->registered = true; -+ break; -+ case NETDEV_UNREGISTER: -+ /* twiddle thumbs on netns device moves */ -+ if (dev->reg_state != NETREG_UNREGISTERING) -+ break; -+ -+ /* can be delivered multiple times, so check registered flag, -+ * then destroy the interface -+ */ -+ if (!ovpn->registered) -+ return NOTIFY_DONE; -+ -+ netif_carrier_off(dev); -+ ovpn->registered = false; -+ -+ cancel_delayed_work_sync(&ovpn->keepalive_work); -+ -+ switch (ovpn->mode) { -+ case OVPN_MODE_P2P: -+ ovpn_peer_release_p2p(ovpn); -+ break; -+ case OVPN_MODE_MP: -+ ovpn_peers_free(ovpn); -+ break; -+ } -+ break; -+ case NETDEV_POST_INIT: -+ case NETDEV_GOING_DOWN: -+ case NETDEV_DOWN: -+ case NETDEV_UP: -+ case NETDEV_PRE_UP: -+ break; -+ default: -+ return NOTIFY_DONE; -+ } -+ -+ return NOTIFY_OK; -+} -+ -+static struct notifier_block ovpn_netdev_notifier = { -+ .notifier_call = ovpn_netdev_notifier_call, -+}; -+ -+static int __init ovpn_init(void) -+{ -+ int err = register_netdevice_notifier(&ovpn_netdev_notifier); -+ -+ if (err) { -+ pr_err("ovpn: can't register netdevice notifier: %d\n", err); -+ return err; -+ } -+ -+ err = rtnl_link_register(&ovpn_link_ops); -+ if (err) { -+ pr_err("ovpn: can't register rtnl link ops: %d\n", err); -+ goto unreg_netdev; -+ } -+ -+ err = ovpn_nl_register(); -+ if (err) { -+ pr_err("ovpn: can't register netlink family: %d\n", err); -+ goto unreg_rtnl; -+ } -+ -+ ovpn_tcp_init(); -+ -+ return 0; -+ -+unreg_rtnl: -+ rtnl_link_unregister(&ovpn_link_ops); -+unreg_netdev: -+ unregister_netdevice_notifier(&ovpn_netdev_notifier); -+ return err; -+} -+ -+static __exit void ovpn_cleanup(void) -+{ -+ ovpn_nl_unregister(); -+ rtnl_link_unregister(&ovpn_link_ops); -+ unregister_netdevice_notifier(&ovpn_netdev_notifier); -+ -+ rcu_barrier(); -+} -+ -+module_init(ovpn_init); -+module_exit(ovpn_cleanup); -+ -+MODULE_DESCRIPTION(DRV_DESCRIPTION); -+MODULE_AUTHOR(DRV_COPYRIGHT); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/net/ovpn/main.h b/drivers/net/ovpn/main.h -new file mode 100644 -index 000000000000..28e5c44816e1 ---- /dev/null -+++ b/drivers/net/ovpn/main.h -@@ -0,0 +1,24 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2019-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#ifndef _NET_OVPN_MAIN_H_ -+#define _NET_OVPN_MAIN_H_ -+ -+bool ovpn_dev_is_valid(const struct net_device *dev); -+ -+#define SKB_HEADER_LEN \ -+ (max(sizeof(struct iphdr), sizeof(struct ipv6hdr)) + \ -+ sizeof(struct udphdr) + NET_SKB_PAD) -+ -+#define OVPN_HEAD_ROOM ALIGN(16 + SKB_HEADER_LEN, 4) -+#define OVPN_MAX_PADDING 16 -+ -+#define OVPN_QUEUE_LEN 1024 -+ -+#endif /* _NET_OVPN_MAIN_H_ */ -diff --git a/drivers/net/ovpn/netlink-gen.c b/drivers/net/ovpn/netlink-gen.c -new file mode 100644 -index 000000000000..6a43eab9a136 ---- /dev/null -+++ b/drivers/net/ovpn/netlink-gen.c -@@ -0,0 +1,212 @@ -+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) -+/* Do not edit directly, auto-generated from: */ -+/* Documentation/netlink/specs/ovpn.yaml */ -+/* YNL-GEN kernel source */ -+ -+#include -+#include -+ -+#include "netlink-gen.h" -+ -+#include -+ -+/* Integer value ranges */ -+static const struct netlink_range_validation ovpn_a_peer_id_range = { -+ .max = 16777215ULL, -+}; -+ -+static const struct netlink_range_validation ovpn_a_keyconf_peer_id_range = { -+ .max = 16777215ULL, -+}; -+ -+/* Common nested types */ -+const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1] = { -+ [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range), -+ [OVPN_A_KEYCONF_SLOT] = NLA_POLICY_MAX(NLA_U32, 1), -+ [OVPN_A_KEYCONF_KEY_ID] = NLA_POLICY_MAX(NLA_U32, 7), -+ [OVPN_A_KEYCONF_CIPHER_ALG] = NLA_POLICY_MAX(NLA_U32, 2), -+ [OVPN_A_KEYCONF_ENCRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy), -+ [OVPN_A_KEYCONF_DECRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy), -+}; -+ -+const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1] = { -+ [OVPN_A_KEYDIR_CIPHER_KEY] = NLA_POLICY_MAX_LEN(256), -+ [OVPN_A_KEYDIR_NONCE_TAIL] = NLA_POLICY_EXACT_LEN(OVPN_NONCE_TAIL_SIZE), -+}; -+ -+const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1] = { -+ [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range), -+ [OVPN_A_PEER_REMOTE_IPV4] = { .type = NLA_U32, }, -+ [OVPN_A_PEER_REMOTE_IPV6] = NLA_POLICY_EXACT_LEN(16), -+ [OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID] = { .type = NLA_U32, }, -+ [OVPN_A_PEER_REMOTE_PORT] = NLA_POLICY_MIN(NLA_U16, 1), -+ [OVPN_A_PEER_SOCKET] = { .type = NLA_U32, }, -+ [OVPN_A_PEER_VPN_IPV4] = { .type = NLA_U32, }, -+ [OVPN_A_PEER_VPN_IPV6] = NLA_POLICY_EXACT_LEN(16), -+ [OVPN_A_PEER_LOCAL_IPV4] = { .type = NLA_U32, }, -+ [OVPN_A_PEER_LOCAL_IPV6] = NLA_POLICY_EXACT_LEN(16), -+ [OVPN_A_PEER_LOCAL_PORT] = NLA_POLICY_MIN(NLA_U16, 1), -+ [OVPN_A_PEER_KEEPALIVE_INTERVAL] = { .type = NLA_U32, }, -+ [OVPN_A_PEER_KEEPALIVE_TIMEOUT] = { .type = NLA_U32, }, -+ [OVPN_A_PEER_DEL_REASON] = NLA_POLICY_MAX(NLA_U32, 4), -+ [OVPN_A_PEER_VPN_RX_BYTES] = { .type = NLA_UINT, }, -+ [OVPN_A_PEER_VPN_TX_BYTES] = { .type = NLA_UINT, }, -+ [OVPN_A_PEER_VPN_RX_PACKETS] = { .type = NLA_UINT, }, -+ [OVPN_A_PEER_VPN_TX_PACKETS] = { .type = NLA_UINT, }, -+ [OVPN_A_PEER_LINK_RX_BYTES] = { .type = NLA_UINT, }, -+ [OVPN_A_PEER_LINK_TX_BYTES] = { .type = NLA_UINT, }, -+ [OVPN_A_PEER_LINK_RX_PACKETS] = { .type = NLA_U32, }, -+ [OVPN_A_PEER_LINK_TX_PACKETS] = { .type = NLA_U32, }, -+}; -+ -+/* OVPN_CMD_PEER_NEW - do */ -+static const struct nla_policy ovpn_peer_new_nl_policy[OVPN_A_PEER + 1] = { -+ [OVPN_A_IFINDEX] = { .type = NLA_U32, }, -+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy), -+}; -+ -+/* OVPN_CMD_PEER_SET - do */ -+static const struct nla_policy ovpn_peer_set_nl_policy[OVPN_A_PEER + 1] = { -+ [OVPN_A_IFINDEX] = { .type = NLA_U32, }, -+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy), -+}; -+ -+/* OVPN_CMD_PEER_GET - do */ -+static const struct nla_policy ovpn_peer_get_do_nl_policy[OVPN_A_PEER + 1] = { -+ [OVPN_A_IFINDEX] = { .type = NLA_U32, }, -+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy), -+}; -+ -+/* OVPN_CMD_PEER_GET - dump */ -+static const struct nla_policy ovpn_peer_get_dump_nl_policy[OVPN_A_IFINDEX + 1] = { -+ [OVPN_A_IFINDEX] = { .type = NLA_U32, }, -+}; -+ -+/* OVPN_CMD_PEER_DEL - do */ -+static const struct nla_policy ovpn_peer_del_nl_policy[OVPN_A_PEER + 1] = { -+ [OVPN_A_IFINDEX] = { .type = NLA_U32, }, -+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy), -+}; -+ -+/* OVPN_CMD_KEY_NEW - do */ -+static const struct nla_policy ovpn_key_new_nl_policy[OVPN_A_KEYCONF + 1] = { -+ [OVPN_A_IFINDEX] = { .type = NLA_U32, }, -+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy), -+}; -+ -+/* OVPN_CMD_KEY_GET - do */ -+static const struct nla_policy ovpn_key_get_nl_policy[OVPN_A_KEYCONF + 1] = { -+ [OVPN_A_IFINDEX] = { .type = NLA_U32, }, -+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy), -+}; -+ -+/* OVPN_CMD_KEY_SWAP - do */ -+static const struct nla_policy ovpn_key_swap_nl_policy[OVPN_A_KEYCONF + 1] = { -+ [OVPN_A_IFINDEX] = { .type = NLA_U32, }, -+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy), -+}; -+ -+/* OVPN_CMD_KEY_DEL - do */ -+static const struct nla_policy ovpn_key_del_nl_policy[OVPN_A_KEYCONF + 1] = { -+ [OVPN_A_IFINDEX] = { .type = NLA_U32, }, -+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy), -+}; -+ -+/* Ops table for ovpn */ -+static const struct genl_split_ops ovpn_nl_ops[] = { -+ { -+ .cmd = OVPN_CMD_PEER_NEW, -+ .pre_doit = ovpn_nl_pre_doit, -+ .doit = ovpn_nl_peer_new_doit, -+ .post_doit = ovpn_nl_post_doit, -+ .policy = ovpn_peer_new_nl_policy, -+ .maxattr = OVPN_A_PEER, -+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, -+ }, -+ { -+ .cmd = OVPN_CMD_PEER_SET, -+ .pre_doit = ovpn_nl_pre_doit, -+ .doit = ovpn_nl_peer_set_doit, -+ .post_doit = ovpn_nl_post_doit, -+ .policy = ovpn_peer_set_nl_policy, -+ .maxattr = OVPN_A_PEER, -+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, -+ }, -+ { -+ .cmd = OVPN_CMD_PEER_GET, -+ .pre_doit = ovpn_nl_pre_doit, -+ .doit = ovpn_nl_peer_get_doit, -+ .post_doit = ovpn_nl_post_doit, -+ .policy = ovpn_peer_get_do_nl_policy, -+ .maxattr = OVPN_A_PEER, -+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, -+ }, -+ { -+ .cmd = OVPN_CMD_PEER_GET, -+ .dumpit = ovpn_nl_peer_get_dumpit, -+ .policy = ovpn_peer_get_dump_nl_policy, -+ .maxattr = OVPN_A_IFINDEX, -+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP, -+ }, -+ { -+ .cmd = OVPN_CMD_PEER_DEL, -+ .pre_doit = ovpn_nl_pre_doit, -+ .doit = ovpn_nl_peer_del_doit, -+ .post_doit = ovpn_nl_post_doit, -+ .policy = ovpn_peer_del_nl_policy, -+ .maxattr = OVPN_A_PEER, -+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, -+ }, -+ { -+ .cmd = OVPN_CMD_KEY_NEW, -+ .pre_doit = ovpn_nl_pre_doit, -+ .doit = ovpn_nl_key_new_doit, -+ .post_doit = ovpn_nl_post_doit, -+ .policy = ovpn_key_new_nl_policy, -+ .maxattr = OVPN_A_KEYCONF, -+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, -+ }, -+ { -+ .cmd = OVPN_CMD_KEY_GET, -+ .pre_doit = ovpn_nl_pre_doit, -+ .doit = ovpn_nl_key_get_doit, -+ .post_doit = ovpn_nl_post_doit, -+ .policy = ovpn_key_get_nl_policy, -+ .maxattr = OVPN_A_KEYCONF, -+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, -+ }, -+ { -+ .cmd = OVPN_CMD_KEY_SWAP, -+ .pre_doit = ovpn_nl_pre_doit, -+ .doit = ovpn_nl_key_swap_doit, -+ .post_doit = ovpn_nl_post_doit, -+ .policy = ovpn_key_swap_nl_policy, -+ .maxattr = OVPN_A_KEYCONF, -+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, -+ }, -+ { -+ .cmd = OVPN_CMD_KEY_DEL, -+ .pre_doit = ovpn_nl_pre_doit, -+ .doit = ovpn_nl_key_del_doit, -+ .post_doit = ovpn_nl_post_doit, -+ .policy = ovpn_key_del_nl_policy, -+ .maxattr = OVPN_A_KEYCONF, -+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, -+ }, -+}; -+ -+static const struct genl_multicast_group ovpn_nl_mcgrps[] = { -+ [OVPN_NLGRP_PEERS] = { "peers", }, -+}; -+ -+struct genl_family ovpn_nl_family __ro_after_init = { -+ .name = OVPN_FAMILY_NAME, -+ .version = OVPN_FAMILY_VERSION, -+ .netnsok = true, -+ .parallel_ops = true, -+ .module = THIS_MODULE, -+ .split_ops = ovpn_nl_ops, -+ .n_split_ops = ARRAY_SIZE(ovpn_nl_ops), -+ .mcgrps = ovpn_nl_mcgrps, -+ .n_mcgrps = ARRAY_SIZE(ovpn_nl_mcgrps), -+}; -diff --git a/drivers/net/ovpn/netlink-gen.h b/drivers/net/ovpn/netlink-gen.h -new file mode 100644 -index 000000000000..66a4e4a0a055 ---- /dev/null -+++ b/drivers/net/ovpn/netlink-gen.h -@@ -0,0 +1,41 @@ -+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ -+/* Do not edit directly, auto-generated from: */ -+/* Documentation/netlink/specs/ovpn.yaml */ -+/* YNL-GEN kernel header */ -+ -+#ifndef _LINUX_OVPN_GEN_H -+#define _LINUX_OVPN_GEN_H -+ -+#include -+#include -+ -+#include -+ -+/* Common nested types */ -+extern const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1]; -+extern const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1]; -+extern const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1]; -+ -+int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, -+ struct genl_info *info); -+void -+ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, -+ struct genl_info *info); -+ -+int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info); -+int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info); -+int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info); -+int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb); -+int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info); -+int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info); -+int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info); -+int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info); -+int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info); -+ -+enum { -+ OVPN_NLGRP_PEERS, -+}; -+ -+extern struct genl_family ovpn_nl_family; -+ -+#endif /* _LINUX_OVPN_GEN_H */ -diff --git a/drivers/net/ovpn/netlink.c b/drivers/net/ovpn/netlink.c -new file mode 100644 -index 000000000000..4d7d835cb47f ---- /dev/null -+++ b/drivers/net/ovpn/netlink.c -@@ -0,0 +1,1135 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ */ -+ -+#include -+#include -+#include -+ -+#include -+ -+#include "ovpnstruct.h" -+#include "main.h" -+#include "io.h" -+#include "netlink.h" -+#include "netlink-gen.h" -+#include "bind.h" -+#include "crypto.h" -+#include "packet.h" -+#include "peer.h" -+#include "socket.h" -+ -+MODULE_ALIAS_GENL_FAMILY(OVPN_FAMILY_NAME); -+ -+/** -+ * ovpn_get_dev_from_attrs - retrieve the ovpn private data from the netdevice -+ * a netlink message is targeting -+ * @net: network namespace where to look for the interface -+ * @info: generic netlink info from the user request -+ * -+ * Return: the ovpn private data, if found, or an error otherwise -+ */ -+static struct ovpn_struct * -+ovpn_get_dev_from_attrs(struct net *net, const struct genl_info *info) -+{ -+ struct ovpn_struct *ovpn; -+ struct net_device *dev; -+ int ifindex; -+ -+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_IFINDEX)) -+ return ERR_PTR(-EINVAL); -+ -+ ifindex = nla_get_u32(info->attrs[OVPN_A_IFINDEX]); -+ -+ rcu_read_lock(); -+ dev = dev_get_by_index_rcu(net, ifindex); -+ if (!dev) { -+ rcu_read_unlock(); -+ NL_SET_ERR_MSG_MOD(info->extack, -+ "ifindex does not match any interface"); -+ return ERR_PTR(-ENODEV); -+ } -+ -+ if (!ovpn_dev_is_valid(dev)) { -+ rcu_read_unlock(); -+ NL_SET_ERR_MSG_MOD(info->extack, -+ "specified interface is not ovpn"); -+ NL_SET_BAD_ATTR(info->extack, info->attrs[OVPN_A_IFINDEX]); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ ovpn = netdev_priv(dev); -+ netdev_hold(dev, &ovpn->dev_tracker, GFP_KERNEL); -+ rcu_read_unlock(); -+ -+ return ovpn; -+} -+ -+int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, -+ struct genl_info *info) -+{ -+ struct ovpn_struct *ovpn = ovpn_get_dev_from_attrs(genl_info_net(info), -+ info); -+ -+ if (IS_ERR(ovpn)) -+ return PTR_ERR(ovpn); -+ -+ info->user_ptr[0] = ovpn; -+ -+ return 0; -+} -+ -+void ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, -+ struct genl_info *info) -+{ -+ struct ovpn_struct *ovpn = info->user_ptr[0]; -+ -+ if (ovpn) -+ netdev_put(ovpn->dev, &ovpn->dev_tracker); -+} -+ -+static int ovpn_nl_attr_sockaddr_remote(struct nlattr **attrs, -+ struct sockaddr_storage *ss) -+{ -+ struct sockaddr_in6 *sin6; -+ struct sockaddr_in *sin; -+ struct in6_addr *in6; -+ __be16 port = 0; -+ __be32 *in; -+ int af; -+ -+ ss->ss_family = AF_UNSPEC; -+ -+ if (attrs[OVPN_A_PEER_REMOTE_PORT]) -+ port = nla_get_be16(attrs[OVPN_A_PEER_REMOTE_PORT]); -+ -+ if (attrs[OVPN_A_PEER_REMOTE_IPV4]) { -+ af = AF_INET; -+ ss->ss_family = AF_INET; -+ in = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV4]); -+ } else if (attrs[OVPN_A_PEER_REMOTE_IPV6]) { -+ af = AF_INET6; -+ ss->ss_family = AF_INET6; -+ in6 = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV6]); -+ } else { -+ return AF_UNSPEC; -+ } -+ -+ switch (ss->ss_family) { -+ case AF_INET6: -+ /* If this is a regular IPv6 just break and move on, -+ * otherwise switch to AF_INET and extract the IPv4 accordingly -+ */ -+ if (!ipv6_addr_v4mapped(in6)) { -+ sin6 = (struct sockaddr_in6 *)ss; -+ sin6->sin6_port = port; -+ memcpy(&sin6->sin6_addr, in6, sizeof(*in6)); -+ break; -+ } -+ -+ /* v4-mapped-v6 address */ -+ ss->ss_family = AF_INET; -+ in = &in6->s6_addr32[3]; -+ fallthrough; -+ case AF_INET: -+ sin = (struct sockaddr_in *)ss; -+ sin->sin_port = port; -+ sin->sin_addr.s_addr = *in; -+ break; -+ } -+ -+ /* don't return ss->ss_family as it may have changed in case of -+ * v4-mapped-v6 address -+ */ -+ return af; -+} -+ -+static u8 *ovpn_nl_attr_local_ip(struct nlattr **attrs) -+{ -+ u8 *addr6; -+ -+ if (!attrs[OVPN_A_PEER_LOCAL_IPV4] && !attrs[OVPN_A_PEER_LOCAL_IPV6]) -+ return NULL; -+ -+ if (attrs[OVPN_A_PEER_LOCAL_IPV4]) -+ return nla_data(attrs[OVPN_A_PEER_LOCAL_IPV4]); -+ -+ addr6 = nla_data(attrs[OVPN_A_PEER_LOCAL_IPV6]); -+ /* this is an IPv4-mapped IPv6 address, therefore extract the actual -+ * v4 address from the last 4 bytes -+ */ -+ if (ipv6_addr_v4mapped((struct in6_addr *)addr6)) -+ return addr6 + 12; -+ -+ return addr6; -+} -+ -+static int ovpn_nl_peer_precheck(struct ovpn_struct *ovpn, -+ struct genl_info *info, -+ struct nlattr **attrs) -+{ -+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs, -+ OVPN_A_PEER_ID)) -+ return -EINVAL; -+ -+ if (attrs[OVPN_A_PEER_REMOTE_IPV4] && attrs[OVPN_A_PEER_REMOTE_IPV6]) { -+ NL_SET_ERR_MSG_MOD(info->extack, -+ "cannot specify both remote IPv4 or IPv6 address"); -+ return -EINVAL; -+ } -+ -+ if (!attrs[OVPN_A_PEER_REMOTE_IPV4] && -+ !attrs[OVPN_A_PEER_REMOTE_IPV6] && attrs[OVPN_A_PEER_REMOTE_PORT]) { -+ NL_SET_ERR_MSG_MOD(info->extack, -+ "cannot specify remote port without IP address"); -+ return -EINVAL; -+ } -+ -+ if (!attrs[OVPN_A_PEER_REMOTE_IPV4] && -+ attrs[OVPN_A_PEER_LOCAL_IPV4]) { -+ NL_SET_ERR_MSG_MOD(info->extack, -+ "cannot specify local IPv4 address without remote"); -+ return -EINVAL; -+ } -+ -+ if (!attrs[OVPN_A_PEER_REMOTE_IPV6] && -+ attrs[OVPN_A_PEER_LOCAL_IPV6]) { -+ NL_SET_ERR_MSG_MOD(info->extack, -+ "cannot specify local IPV6 address without remote"); -+ return -EINVAL; -+ } -+ -+ if (!attrs[OVPN_A_PEER_REMOTE_IPV6] && -+ attrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID]) { -+ NL_SET_ERR_MSG_MOD(info->extack, -+ "cannot specify scope id without remote IPv6 address"); -+ return -EINVAL; -+ } -+ -+ /* VPN IPs are needed only in MP mode for selecting the right peer */ -+ if (ovpn->mode == OVPN_MODE_P2P && (attrs[OVPN_A_PEER_VPN_IPV4] || -+ attrs[OVPN_A_PEER_VPN_IPV6])) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "VPN IP unexpected in P2P mode"); -+ return -EINVAL; -+ } -+ -+ if ((attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] && -+ !attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) || -+ (!attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] && -+ attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT])) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "keepalive interval and timeout are required together"); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+/** -+ * ovpn_nl_peer_modify - modify the peer attributes according to the incoming msg -+ * @peer: the peer to modify -+ * @info: generic netlink info from the user request -+ * @attrs: the attributes from the user request -+ * -+ * Return: a negative error code in case of failure, 0 on success or 1 on -+ * success and the VPN IPs have been modified (requires rehashing in MP -+ * mode) -+ */ -+static int ovpn_nl_peer_modify(struct ovpn_peer *peer, struct genl_info *info, -+ struct nlattr **attrs) -+{ -+ struct sockaddr_storage ss = {}; -+ u32 sockfd, interv, timeout; -+ struct socket *sock = NULL; -+ u8 *local_ip = NULL; -+ bool rehash = false; -+ int ret; -+ -+ if (attrs[OVPN_A_PEER_SOCKET]) { -+ /* lookup the fd in the kernel table and extract the socket -+ * object -+ */ -+ sockfd = nla_get_u32(attrs[OVPN_A_PEER_SOCKET]); -+ /* sockfd_lookup() increases sock's refcounter */ -+ sock = sockfd_lookup(sockfd, &ret); -+ if (!sock) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "cannot lookup peer socket (fd=%u): %d", -+ sockfd, ret); -+ return -ENOTSOCK; -+ } -+ -+ /* Only when using UDP as transport protocol the remote endpoint -+ * can be configured so that ovpn knows where to send packets -+ * to. -+ * -+ * In case of TCP, the socket is connected to the peer and ovpn -+ * will just send bytes over it, without the need to specify a -+ * destination. -+ */ -+ if (sock->sk->sk_protocol != IPPROTO_UDP && -+ (attrs[OVPN_A_PEER_REMOTE_IPV4] || -+ attrs[OVPN_A_PEER_REMOTE_IPV6])) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "unexpected remote IP address for non UDP socket"); -+ sockfd_put(sock); -+ return -EINVAL; -+ } -+ -+ if (peer->sock) -+ ovpn_socket_put(peer->sock); -+ -+ peer->sock = ovpn_socket_new(sock, peer); -+ if (IS_ERR(peer->sock)) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "cannot encapsulate socket: %ld", -+ PTR_ERR(peer->sock)); -+ sockfd_put(sock); -+ peer->sock = NULL; -+ return -ENOTSOCK; -+ } -+ } -+ -+ if (ovpn_nl_attr_sockaddr_remote(attrs, &ss) != AF_UNSPEC) { -+ /* we carry the local IP in a generic container. -+ * ovpn_peer_reset_sockaddr() will properly interpret it -+ * based on ss.ss_family -+ */ -+ local_ip = ovpn_nl_attr_local_ip(attrs); -+ -+ spin_lock_bh(&peer->lock); -+ /* set peer sockaddr */ -+ ret = ovpn_peer_reset_sockaddr(peer, &ss, local_ip); -+ if (ret < 0) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "cannot set peer sockaddr: %d", -+ ret); -+ spin_unlock_bh(&peer->lock); -+ return ret; -+ } -+ spin_unlock_bh(&peer->lock); -+ } -+ -+ if (attrs[OVPN_A_PEER_VPN_IPV4]) { -+ rehash = true; -+ peer->vpn_addrs.ipv4.s_addr = -+ nla_get_in_addr(attrs[OVPN_A_PEER_VPN_IPV4]); -+ } -+ -+ if (attrs[OVPN_A_PEER_VPN_IPV6]) { -+ rehash = true; -+ peer->vpn_addrs.ipv6 = -+ nla_get_in6_addr(attrs[OVPN_A_PEER_VPN_IPV6]); -+ } -+ -+ /* when setting the keepalive, both parameters have to be configured */ -+ if (attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] && -+ attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) { -+ interv = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL]); -+ timeout = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]); -+ ovpn_peer_keepalive_set(peer, interv, timeout); -+ } -+ -+ netdev_dbg(peer->ovpn->dev, -+ "%s: peer id=%u endpoint=%pIScp/%s VPN-IPv4=%pI4 VPN-IPv6=%pI6c\n", -+ __func__, peer->id, &ss, -+ peer->sock->sock->sk->sk_prot_creator->name, -+ &peer->vpn_addrs.ipv4.s_addr, &peer->vpn_addrs.ipv6); -+ -+ return rehash ? 1 : 0; -+} -+ -+int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1]; -+ struct ovpn_struct *ovpn = info->user_ptr[0]; -+ struct ovpn_peer *peer; -+ u32 peer_id; -+ int ret; -+ -+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER)) -+ return -EINVAL; -+ -+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER], -+ ovpn_peer_nl_policy, info->extack); -+ if (ret) -+ return ret; -+ -+ ret = ovpn_nl_peer_precheck(ovpn, info, attrs); -+ if (ret < 0) -+ return ret; -+ -+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs, -+ OVPN_A_PEER_SOCKET)) -+ return -EINVAL; -+ -+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]); -+ peer = ovpn_peer_new(ovpn, peer_id); -+ if (IS_ERR(peer)) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "cannot create new peer object for peer %u: %ld", -+ peer_id, PTR_ERR(peer)); -+ return PTR_ERR(peer); -+ } -+ -+ ret = ovpn_nl_peer_modify(peer, info, attrs); -+ if (ret < 0) -+ goto peer_release; -+ -+ ret = ovpn_peer_add(ovpn, peer); -+ if (ret < 0) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "cannot add new peer (id=%u) to hashtable: %d\n", -+ peer->id, ret); -+ goto peer_release; -+ } -+ -+ return 0; -+ -+peer_release: -+ /* release right away because peer is not used in any context */ -+ ovpn_peer_release(peer); -+ -+ return ret; -+} -+ -+int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1]; -+ struct ovpn_struct *ovpn = info->user_ptr[0]; -+ struct ovpn_peer *peer; -+ u32 peer_id; -+ int ret; -+ -+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER)) -+ return -EINVAL; -+ -+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER], -+ ovpn_peer_nl_policy, info->extack); -+ if (ret) -+ return ret; -+ -+ ret = ovpn_nl_peer_precheck(ovpn, info, attrs); -+ if (ret < 0) -+ return ret; -+ -+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]); -+ peer = ovpn_peer_get_by_id(ovpn, peer_id); -+ if (!peer) -+ return -ENOENT; -+ -+ ret = ovpn_nl_peer_modify(peer, info, attrs); -+ if (ret < 0) { -+ ovpn_peer_put(peer); -+ return ret; -+ } -+ -+ /* ret == 1 means that VPN IPv4/6 has been modified and rehashing -+ * is required -+ */ -+ if (ret > 0) { -+ spin_lock_bh(&ovpn->peers->lock); -+ ovpn_peer_hash_vpn_ip(peer); -+ spin_unlock_bh(&ovpn->peers->lock); -+ } -+ -+ ovpn_peer_put(peer); -+ -+ return 0; -+} -+ -+static int ovpn_nl_send_peer(struct sk_buff *skb, const struct genl_info *info, -+ const struct ovpn_peer *peer, u32 portid, u32 seq, -+ int flags) -+{ -+ const struct ovpn_bind *bind; -+ struct nlattr *attr; -+ void *hdr; -+ -+ hdr = genlmsg_put(skb, portid, seq, &ovpn_nl_family, flags, -+ OVPN_CMD_PEER_GET); -+ if (!hdr) -+ return -ENOBUFS; -+ -+ attr = nla_nest_start(skb, OVPN_A_PEER); -+ if (!attr) -+ goto err; -+ -+ if (nla_put_u32(skb, OVPN_A_PEER_ID, peer->id)) -+ goto err; -+ -+ if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) -+ if (nla_put_in_addr(skb, OVPN_A_PEER_VPN_IPV4, -+ peer->vpn_addrs.ipv4.s_addr)) -+ goto err; -+ -+ if (!ipv6_addr_equal(&peer->vpn_addrs.ipv6, &in6addr_any)) -+ if (nla_put_in6_addr(skb, OVPN_A_PEER_VPN_IPV6, -+ &peer->vpn_addrs.ipv6)) -+ goto err; -+ -+ if (nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_INTERVAL, -+ peer->keepalive_interval) || -+ nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_TIMEOUT, -+ peer->keepalive_timeout)) -+ goto err; -+ -+ rcu_read_lock(); -+ bind = rcu_dereference(peer->bind); -+ if (bind) { -+ if (bind->remote.in4.sin_family == AF_INET) { -+ if (nla_put_in_addr(skb, OVPN_A_PEER_REMOTE_IPV4, -+ bind->remote.in4.sin_addr.s_addr) || -+ nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT, -+ bind->remote.in4.sin_port) || -+ nla_put_in_addr(skb, OVPN_A_PEER_LOCAL_IPV4, -+ bind->local.ipv4.s_addr)) -+ goto err_unlock; -+ } else if (bind->remote.in4.sin_family == AF_INET6) { -+ if (nla_put_in6_addr(skb, OVPN_A_PEER_REMOTE_IPV6, -+ &bind->remote.in6.sin6_addr) || -+ nla_put_u32(skb, OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID, -+ bind->remote.in6.sin6_scope_id) || -+ nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT, -+ bind->remote.in6.sin6_port) || -+ nla_put_in6_addr(skb, OVPN_A_PEER_LOCAL_IPV6, -+ &bind->local.ipv6)) -+ goto err_unlock; -+ } -+ } -+ rcu_read_unlock(); -+ -+ if (nla_put_net16(skb, OVPN_A_PEER_LOCAL_PORT, -+ inet_sk(peer->sock->sock->sk)->inet_sport) || -+ /* VPN RX stats */ -+ nla_put_uint(skb, OVPN_A_PEER_VPN_RX_BYTES, -+ atomic64_read(&peer->vpn_stats.rx.bytes)) || -+ nla_put_uint(skb, OVPN_A_PEER_VPN_RX_PACKETS, -+ atomic64_read(&peer->vpn_stats.rx.packets)) || -+ /* VPN TX stats */ -+ nla_put_uint(skb, OVPN_A_PEER_VPN_TX_BYTES, -+ atomic64_read(&peer->vpn_stats.tx.bytes)) || -+ nla_put_uint(skb, OVPN_A_PEER_VPN_TX_PACKETS, -+ atomic64_read(&peer->vpn_stats.tx.packets)) || -+ /* link RX stats */ -+ nla_put_uint(skb, OVPN_A_PEER_LINK_RX_BYTES, -+ atomic64_read(&peer->link_stats.rx.bytes)) || -+ nla_put_uint(skb, OVPN_A_PEER_LINK_RX_PACKETS, -+ atomic64_read(&peer->link_stats.rx.packets)) || -+ /* link TX stats */ -+ nla_put_uint(skb, OVPN_A_PEER_LINK_TX_BYTES, -+ atomic64_read(&peer->link_stats.tx.bytes)) || -+ nla_put_uint(skb, OVPN_A_PEER_LINK_TX_PACKETS, -+ atomic64_read(&peer->link_stats.tx.packets))) -+ goto err; -+ -+ nla_nest_end(skb, attr); -+ genlmsg_end(skb, hdr); -+ -+ return 0; -+err_unlock: -+ rcu_read_unlock(); -+err: -+ genlmsg_cancel(skb, hdr); -+ return -EMSGSIZE; -+} -+ -+int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1]; -+ struct ovpn_struct *ovpn = info->user_ptr[0]; -+ struct ovpn_peer *peer; -+ struct sk_buff *msg; -+ u32 peer_id; -+ int ret; -+ -+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER)) -+ return -EINVAL; -+ -+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER], -+ ovpn_peer_nl_policy, info->extack); -+ if (ret) -+ return ret; -+ -+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs, -+ OVPN_A_PEER_ID)) -+ return -EINVAL; -+ -+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]); -+ peer = ovpn_peer_get_by_id(ovpn, peer_id); -+ if (!peer) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "cannot find peer with id %u", peer_id); -+ return -ENOENT; -+ } -+ -+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); -+ if (!msg) { -+ ret = -ENOMEM; -+ goto err; -+ } -+ -+ ret = ovpn_nl_send_peer(msg, info, peer, info->snd_portid, -+ info->snd_seq, 0); -+ if (ret < 0) { -+ nlmsg_free(msg); -+ goto err; -+ } -+ -+ ret = genlmsg_reply(msg, info); -+err: -+ ovpn_peer_put(peer); -+ return ret; -+} -+ -+int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) -+{ -+ const struct genl_info *info = genl_info_dump(cb); -+ int bkt, last_idx = cb->args[1], dumped = 0; -+ struct ovpn_struct *ovpn; -+ struct ovpn_peer *peer; -+ -+ ovpn = ovpn_get_dev_from_attrs(sock_net(cb->skb->sk), info); -+ if (IS_ERR(ovpn)) -+ return PTR_ERR(ovpn); -+ -+ if (ovpn->mode == OVPN_MODE_P2P) { -+ /* if we already dumped a peer it means we are done */ -+ if (last_idx) -+ goto out; -+ -+ rcu_read_lock(); -+ peer = rcu_dereference(ovpn->peer); -+ if (peer) { -+ if (ovpn_nl_send_peer(skb, info, peer, -+ NETLINK_CB(cb->skb).portid, -+ cb->nlh->nlmsg_seq, -+ NLM_F_MULTI) == 0) -+ dumped++; -+ } -+ rcu_read_unlock(); -+ } else { -+ rcu_read_lock(); -+ hash_for_each_rcu(ovpn->peers->by_id, bkt, peer, -+ hash_entry_id) { -+ /* skip already dumped peers that were dumped by -+ * previous invocations -+ */ -+ if (last_idx > 0) { -+ last_idx--; -+ continue; -+ } -+ -+ if (ovpn_nl_send_peer(skb, info, peer, -+ NETLINK_CB(cb->skb).portid, -+ cb->nlh->nlmsg_seq, -+ NLM_F_MULTI) < 0) -+ break; -+ -+ /* count peers being dumped during this invocation */ -+ dumped++; -+ } -+ rcu_read_unlock(); -+ } -+ -+out: -+ netdev_put(ovpn->dev, &ovpn->dev_tracker); -+ -+ /* sum up peers dumped in this message, so that at the next invocation -+ * we can continue from where we left -+ */ -+ cb->args[1] += dumped; -+ return skb->len; -+} -+ -+int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1]; -+ struct ovpn_struct *ovpn = info->user_ptr[0]; -+ struct ovpn_peer *peer; -+ u32 peer_id; -+ int ret; -+ -+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER)) -+ return -EINVAL; -+ -+ ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER], -+ ovpn_peer_nl_policy, info->extack); -+ if (ret) -+ return ret; -+ -+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs, -+ OVPN_A_PEER_ID)) -+ return -EINVAL; -+ -+ peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]); -+ -+ peer = ovpn_peer_get_by_id(ovpn, peer_id); -+ if (!peer) -+ return -ENOENT; -+ -+ netdev_dbg(ovpn->dev, "%s: peer id=%u\n", __func__, peer->id); -+ ret = ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_USERSPACE); -+ ovpn_peer_put(peer); -+ -+ return ret; -+} -+ -+static int ovpn_nl_get_key_dir(struct genl_info *info, struct nlattr *key, -+ enum ovpn_cipher_alg cipher, -+ struct ovpn_key_direction *dir) -+{ -+ struct nlattr *attrs[OVPN_A_KEYDIR_MAX + 1]; -+ int ret; -+ -+ ret = nla_parse_nested(attrs, OVPN_A_KEYDIR_MAX, key, -+ ovpn_keydir_nl_policy, info->extack); -+ if (ret) -+ return ret; -+ -+ switch (cipher) { -+ case OVPN_CIPHER_ALG_AES_GCM: -+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305: -+ if (NL_REQ_ATTR_CHECK(info->extack, key, attrs, -+ OVPN_A_KEYDIR_CIPHER_KEY) || -+ NL_REQ_ATTR_CHECK(info->extack, key, attrs, -+ OVPN_A_KEYDIR_NONCE_TAIL)) -+ return -EINVAL; -+ -+ dir->cipher_key = nla_data(attrs[OVPN_A_KEYDIR_CIPHER_KEY]); -+ dir->cipher_key_size = nla_len(attrs[OVPN_A_KEYDIR_CIPHER_KEY]); -+ -+ /* These algorithms require a 96bit nonce, -+ * Construct it by combining 4-bytes packet id and -+ * 8-bytes nonce-tail from userspace -+ */ -+ dir->nonce_tail = nla_data(attrs[OVPN_A_KEYDIR_NONCE_TAIL]); -+ dir->nonce_tail_size = nla_len(attrs[OVPN_A_KEYDIR_NONCE_TAIL]); -+ break; -+ default: -+ NL_SET_ERR_MSG_MOD(info->extack, "unsupported cipher"); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+/** -+ * ovpn_nl_key_new_doit - configure a new key for the specified peer -+ * @skb: incoming netlink message -+ * @info: genetlink metadata -+ * -+ * This function allows the user to install a new key in the peer crypto -+ * state. -+ * Each peer has two 'slots', namely 'primary' and 'secondary', where -+ * keys can be installed. The key in the 'primary' slot is used for -+ * encryption, while both keys can be used for decryption by matching the -+ * key ID carried in the incoming packet. -+ * -+ * The user is responsible for rotating keys when necessary. The user -+ * may fetch peer traffic statistics via netlink in order to better -+ * identify the right time to rotate keys. -+ * The renegotiation follows these steps: -+ * 1. a new key is computed by the user and is installed in the 'secondary' -+ * slot -+ * 2. at user discretion (usually after a predetermined time) 'primary' and -+ * 'secondary' contents are swapped and the new key starts being used for -+ * encryption, while the old key is kept around for decryption of late -+ * packets. -+ * -+ * Return: 0 on success or a negative error code otherwise. -+ */ -+int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1]; -+ struct ovpn_struct *ovpn = info->user_ptr[0]; -+ struct ovpn_peer_key_reset pkr; -+ struct ovpn_peer *peer; -+ u32 peer_id; -+ int ret; -+ -+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF)) -+ return -EINVAL; -+ -+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX, -+ info->attrs[OVPN_A_KEYCONF], -+ ovpn_keyconf_nl_policy, info->extack); -+ if (ret) -+ return ret; -+ -+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, -+ OVPN_A_KEYCONF_PEER_ID)) -+ return -EINVAL; -+ -+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, -+ OVPN_A_KEYCONF_SLOT) || -+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, -+ OVPN_A_KEYCONF_KEY_ID) || -+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, -+ OVPN_A_KEYCONF_CIPHER_ALG) || -+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, -+ OVPN_A_KEYCONF_ENCRYPT_DIR) || -+ NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, -+ OVPN_A_KEYCONF_DECRYPT_DIR)) -+ return -EINVAL; -+ -+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]); -+ pkr.slot = nla_get_u8(attrs[OVPN_A_KEYCONF_SLOT]); -+ pkr.key.key_id = nla_get_u16(attrs[OVPN_A_KEYCONF_KEY_ID]); -+ pkr.key.cipher_alg = nla_get_u16(attrs[OVPN_A_KEYCONF_CIPHER_ALG]); -+ -+ ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_ENCRYPT_DIR], -+ pkr.key.cipher_alg, &pkr.key.encrypt); -+ if (ret < 0) -+ return ret; -+ -+ ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_DECRYPT_DIR], -+ pkr.key.cipher_alg, &pkr.key.decrypt); -+ if (ret < 0) -+ return ret; -+ -+ peer = ovpn_peer_get_by_id(ovpn, peer_id); -+ if (!peer) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "no peer with id %u to set key for", -+ peer_id); -+ return -ENOENT; -+ } -+ -+ ret = ovpn_crypto_state_reset(&peer->crypto, &pkr); -+ if (ret < 0) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "cannot install new key for peer %u", -+ peer_id); -+ goto out; -+ } -+ -+ netdev_dbg(ovpn->dev, "%s: new key installed (id=%u) for peer %u\n", -+ __func__, pkr.key.key_id, peer_id); -+out: -+ ovpn_peer_put(peer); -+ return ret; -+} -+ -+static int ovpn_nl_send_key(struct sk_buff *skb, const struct genl_info *info, -+ u32 peer_id, enum ovpn_key_slot slot, -+ const struct ovpn_key_config *keyconf, u32 portid, -+ u32 seq, int flags) -+{ -+ struct nlattr *attr; -+ void *hdr; -+ -+ hdr = genlmsg_put(skb, portid, seq, &ovpn_nl_family, flags, -+ OVPN_CMD_KEY_GET); -+ if (!hdr) -+ return -ENOBUFS; -+ -+ attr = nla_nest_start(skb, OVPN_A_KEYCONF); -+ if (!attr) -+ goto err; -+ -+ if (nla_put_u32(skb, OVPN_A_KEYCONF_PEER_ID, peer_id)) -+ goto err; -+ -+ if (nla_put_u32(skb, OVPN_A_KEYCONF_SLOT, slot) || -+ nla_put_u32(skb, OVPN_A_KEYCONF_KEY_ID, keyconf->key_id) || -+ nla_put_u32(skb, OVPN_A_KEYCONF_CIPHER_ALG, keyconf->cipher_alg)) -+ goto err; -+ -+ nla_nest_end(skb, attr); -+ genlmsg_end(skb, hdr); -+ -+ return 0; -+err: -+ genlmsg_cancel(skb, hdr); -+ return -EMSGSIZE; -+} -+ -+int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1]; -+ struct ovpn_struct *ovpn = info->user_ptr[0]; -+ struct ovpn_key_config keyconf = { 0 }; -+ enum ovpn_key_slot slot; -+ struct ovpn_peer *peer; -+ struct sk_buff *msg; -+ u32 peer_id; -+ int ret; -+ -+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF)) -+ return -EINVAL; -+ -+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX, -+ info->attrs[OVPN_A_KEYCONF], -+ ovpn_keyconf_nl_policy, info->extack); -+ if (ret) -+ return ret; -+ -+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, -+ OVPN_A_KEYCONF_PEER_ID)) -+ return -EINVAL; -+ -+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]); -+ -+ peer = ovpn_peer_get_by_id(ovpn, peer_id); -+ if (!peer) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "cannot find peer with id %u", 0); -+ return -ENOENT; -+ } -+ -+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, -+ OVPN_A_KEYCONF_SLOT)) -+ return -EINVAL; -+ -+ slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]); -+ -+ ret = ovpn_crypto_config_get(&peer->crypto, slot, &keyconf); -+ if (ret < 0) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "cannot extract key from slot %u for peer %u", -+ slot, peer_id); -+ goto err; -+ } -+ -+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); -+ if (!msg) { -+ ret = -ENOMEM; -+ goto err; -+ } -+ -+ ret = ovpn_nl_send_key(msg, info, peer->id, slot, &keyconf, -+ info->snd_portid, info->snd_seq, 0); -+ if (ret < 0) { -+ nlmsg_free(msg); -+ goto err; -+ } -+ -+ ret = genlmsg_reply(msg, info); -+err: -+ ovpn_peer_put(peer); -+ return ret; -+} -+ -+int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct ovpn_struct *ovpn = info->user_ptr[0]; -+ struct nlattr *attrs[OVPN_A_PEER_MAX + 1]; -+ struct ovpn_peer *peer; -+ u32 peer_id; -+ int ret; -+ -+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF)) -+ return -EINVAL; -+ -+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX, -+ info->attrs[OVPN_A_KEYCONF], -+ ovpn_keyconf_nl_policy, info->extack); -+ if (ret) -+ return ret; -+ -+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, -+ OVPN_A_KEYCONF_PEER_ID)) -+ return -EINVAL; -+ -+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]); -+ -+ peer = ovpn_peer_get_by_id(ovpn, peer_id); -+ if (!peer) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "no peer with id %u to swap keys for", -+ peer_id); -+ return -ENOENT; -+ } -+ -+ ovpn_crypto_key_slots_swap(&peer->crypto); -+ ovpn_peer_put(peer); -+ -+ return 0; -+} -+ -+int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1]; -+ struct ovpn_struct *ovpn = info->user_ptr[0]; -+ enum ovpn_key_slot slot; -+ struct ovpn_peer *peer; -+ u32 peer_id; -+ int ret; -+ -+ if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF)) -+ return -EINVAL; -+ -+ ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX, -+ info->attrs[OVPN_A_KEYCONF], -+ ovpn_keyconf_nl_policy, info->extack); -+ if (ret) -+ return ret; -+ -+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, -+ OVPN_A_KEYCONF_PEER_ID)) -+ return -EINVAL; -+ -+ if (ret) -+ return ret; -+ -+ if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, -+ OVPN_A_KEYCONF_SLOT)) -+ return -EINVAL; -+ -+ peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]); -+ slot = nla_get_u8(attrs[OVPN_A_KEYCONF_SLOT]); -+ -+ peer = ovpn_peer_get_by_id(ovpn, peer_id); -+ if (!peer) { -+ NL_SET_ERR_MSG_FMT_MOD(info->extack, -+ "no peer with id %u to delete key for", -+ peer_id); -+ return -ENOENT; -+ } -+ -+ ovpn_crypto_key_slot_delete(&peer->crypto, slot); -+ ovpn_peer_put(peer); -+ -+ return 0; -+} -+ -+/** -+ * ovpn_nl_peer_del_notify - notify userspace about peer being deleted -+ * @peer: the peer being deleted -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+int ovpn_nl_peer_del_notify(struct ovpn_peer *peer) -+{ -+ struct sk_buff *msg; -+ struct nlattr *attr; -+ int ret = -EMSGSIZE; -+ void *hdr; -+ -+ netdev_info(peer->ovpn->dev, "deleting peer with id %u, reason %d\n", -+ peer->id, peer->delete_reason); -+ -+ msg = nlmsg_new(100, GFP_ATOMIC); -+ if (!msg) -+ return -ENOMEM; -+ -+ hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_PEER_DEL_NTF); -+ if (!hdr) { -+ ret = -ENOBUFS; -+ goto err_free_msg; -+ } -+ -+ if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex)) -+ goto err_cancel_msg; -+ -+ attr = nla_nest_start(msg, OVPN_A_PEER); -+ if (!attr) -+ goto err_cancel_msg; -+ -+ if (nla_put_u8(msg, OVPN_A_PEER_DEL_REASON, peer->delete_reason)) -+ goto err_cancel_msg; -+ -+ if (nla_put_u32(msg, OVPN_A_PEER_ID, peer->id)) -+ goto err_cancel_msg; -+ -+ nla_nest_end(msg, attr); -+ -+ genlmsg_end(msg, hdr); -+ -+ genlmsg_multicast_netns(&ovpn_nl_family, dev_net(peer->ovpn->dev), msg, -+ 0, OVPN_NLGRP_PEERS, GFP_ATOMIC); -+ -+ return 0; -+ -+err_cancel_msg: -+ genlmsg_cancel(msg, hdr); -+err_free_msg: -+ nlmsg_free(msg); -+ return ret; -+} -+ -+/** -+ * ovpn_nl_key_swap_notify - notify userspace peer's key must be renewed -+ * @peer: the peer whose key needs to be renewed -+ * @key_id: the ID of the key that needs to be renewed -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id) -+{ -+ struct nlattr *k_attr; -+ struct sk_buff *msg; -+ int ret = -EMSGSIZE; -+ void *hdr; -+ -+ netdev_info(peer->ovpn->dev, "peer with id %u must rekey - primary key unusable.\n", -+ peer->id); -+ -+ msg = nlmsg_new(100, GFP_ATOMIC); -+ if (!msg) -+ return -ENOMEM; -+ -+ hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_KEY_SWAP_NTF); -+ if (!hdr) { -+ ret = -ENOBUFS; -+ goto err_free_msg; -+ } -+ -+ if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex)) -+ goto err_cancel_msg; -+ -+ k_attr = nla_nest_start(msg, OVPN_A_KEYCONF); -+ if (!k_attr) -+ goto err_cancel_msg; -+ -+ if (nla_put_u32(msg, OVPN_A_KEYCONF_PEER_ID, peer->id)) -+ goto err_cancel_msg; -+ -+ if (nla_put_u16(msg, OVPN_A_KEYCONF_KEY_ID, key_id)) -+ goto err_cancel_msg; -+ -+ nla_nest_end(msg, k_attr); -+ genlmsg_end(msg, hdr); -+ -+ genlmsg_multicast_netns(&ovpn_nl_family, dev_net(peer->ovpn->dev), msg, -+ 0, OVPN_NLGRP_PEERS, GFP_ATOMIC); -+ -+ return 0; -+ -+err_cancel_msg: -+ genlmsg_cancel(msg, hdr); -+err_free_msg: -+ nlmsg_free(msg); -+ return ret; -+} -+ -+/** -+ * ovpn_nl_register - perform any needed registration in the NL subsustem -+ * -+ * Return: 0 on success, a negative error code otherwise -+ */ -+int __init ovpn_nl_register(void) -+{ -+ int ret = genl_register_family(&ovpn_nl_family); -+ -+ if (ret) { -+ pr_err("ovpn: genl_register_family failed: %d\n", ret); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+/** -+ * ovpn_nl_unregister - undo any module wide netlink registration -+ */ -+void ovpn_nl_unregister(void) -+{ -+ genl_unregister_family(&ovpn_nl_family); -+} -diff --git a/drivers/net/ovpn/netlink.h b/drivers/net/ovpn/netlink.h -new file mode 100644 -index 000000000000..4ab3abcf23db ---- /dev/null -+++ b/drivers/net/ovpn/netlink.h -@@ -0,0 +1,18 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ */ -+ -+#ifndef _NET_OVPN_NETLINK_H_ -+#define _NET_OVPN_NETLINK_H_ -+ -+int ovpn_nl_register(void); -+void ovpn_nl_unregister(void); -+ -+int ovpn_nl_peer_del_notify(struct ovpn_peer *peer); -+int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id); -+ -+#endif /* _NET_OVPN_NETLINK_H_ */ -diff --git a/drivers/net/ovpn/ovpnstruct.h b/drivers/net/ovpn/ovpnstruct.h -new file mode 100644 -index 000000000000..4ac00d550ecb ---- /dev/null -+++ b/drivers/net/ovpn/ovpnstruct.h -@@ -0,0 +1,61 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2019-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#ifndef _NET_OVPN_OVPNSTRUCT_H_ -+#define _NET_OVPN_OVPNSTRUCT_H_ -+ -+#include -+#include -+#include -+#include -+ -+/** -+ * struct ovpn_peer_collection - container of peers for MultiPeer mode -+ * @by_id: table of peers index by ID -+ * @by_vpn_addr: table of peers indexed by VPN IP address (items can be -+ * rehashed on the fly due to peer IP change) -+ * @by_transp_addr: table of peers indexed by transport address (items can be -+ * rehashed on the fly due to peer IP change) -+ * @lock: protects writes to peer tables -+ */ -+struct ovpn_peer_collection { -+ DECLARE_HASHTABLE(by_id, 12); -+ struct hlist_nulls_head by_vpn_addr[1 << 12]; -+ struct hlist_nulls_head by_transp_addr[1 << 12]; -+ -+ spinlock_t lock; /* protects writes to peer tables */ -+}; -+ -+/** -+ * struct ovpn_struct - per ovpn interface state -+ * @dev: the actual netdev representing the tunnel -+ * @dev_tracker: reference tracker for associated dev -+ * @registered: whether dev is still registered with netdev or not -+ * @mode: device operation mode (i.e. p2p, mp, ..) -+ * @lock: protect this object -+ * @peers: data structures holding multi-peer references -+ * @peer: in P2P mode, this is the only remote peer -+ * @dev_list: entry for the module wide device list -+ * @gro_cells: pointer to the Generic Receive Offload cell -+ * @keepalive_work: struct used to schedule keepalive periodic job -+ */ -+struct ovpn_struct { -+ struct net_device *dev; -+ netdevice_tracker dev_tracker; -+ bool registered; -+ enum ovpn_mode mode; -+ spinlock_t lock; /* protect writing to the ovpn_struct object */ -+ struct ovpn_peer_collection *peers; -+ struct ovpn_peer __rcu *peer; -+ struct list_head dev_list; -+ struct gro_cells gro_cells; -+ struct delayed_work keepalive_work; -+}; -+ -+#endif /* _NET_OVPN_OVPNSTRUCT_H_ */ -diff --git a/drivers/net/ovpn/packet.h b/drivers/net/ovpn/packet.h -new file mode 100644 -index 000000000000..e14c9bf464f7 ---- /dev/null -+++ b/drivers/net/ovpn/packet.h -@@ -0,0 +1,40 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ * James Yonan -+ */ -+ -+#ifndef _NET_OVPN_PACKET_H_ -+#define _NET_OVPN_PACKET_H_ -+ -+/* When the OpenVPN protocol is run in AEAD mode, use -+ * the OpenVPN packet ID as the AEAD nonce: -+ * -+ * 00000005 521c3b01 4308c041 -+ * [seq # ] [ nonce_tail ] -+ * [ 12-byte full IV ] -> NONCE_SIZE -+ * [4-bytes -> NONCE_WIRE_SIZE -+ * on wire] -+ */ -+ -+/* OpenVPN nonce size */ -+#define NONCE_SIZE 12 -+ -+/* OpenVPN nonce size reduced by 8-byte nonce tail -- this is the -+ * size of the AEAD Associated Data (AD) sent over the wire -+ * and is normally the head of the IV -+ */ -+#define NONCE_WIRE_SIZE (NONCE_SIZE - sizeof(struct ovpn_nonce_tail)) -+ -+/* Last 8 bytes of AEAD nonce -+ * Provided by userspace and usually derived from -+ * key material generated during TLS handshake -+ */ -+struct ovpn_nonce_tail { -+ u8 u8[OVPN_NONCE_TAIL_SIZE]; -+}; -+ -+#endif /* _NET_OVPN_PACKET_H_ */ -diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c -new file mode 100644 -index 000000000000..91c608f1ffa1 ---- /dev/null -+++ b/drivers/net/ovpn/peer.c -@@ -0,0 +1,1201 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include "ovpnstruct.h" -+#include "bind.h" -+#include "pktid.h" -+#include "crypto.h" -+#include "io.h" -+#include "main.h" -+#include "netlink.h" -+#include "peer.h" -+#include "socket.h" -+ -+/** -+ * ovpn_peer_keepalive_set - configure keepalive values for peer -+ * @peer: the peer to configure -+ * @interval: outgoing keepalive interval -+ * @timeout: incoming keepalive timeout -+ */ -+void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout) -+{ -+ time64_t now = ktime_get_real_seconds(); -+ -+ netdev_dbg(peer->ovpn->dev, -+ "%s: scheduling keepalive for peer %u: interval=%u timeout=%u\n", -+ __func__, peer->id, interval, timeout); -+ -+ peer->keepalive_interval = interval; -+ peer->last_sent = now; -+ peer->keepalive_xmit_exp = now + interval; -+ -+ peer->keepalive_timeout = timeout; -+ peer->last_recv = now; -+ peer->keepalive_recv_exp = now + timeout; -+ -+ /* now that interval and timeout have been changed, kick -+ * off the worker so that the next delay can be recomputed -+ */ -+ mod_delayed_work(system_wq, &peer->ovpn->keepalive_work, 0); -+} -+ -+/** -+ * ovpn_peer_new - allocate and initialize a new peer object -+ * @ovpn: the openvpn instance inside which the peer should be created -+ * @id: the ID assigned to this peer -+ * -+ * Return: a pointer to the new peer on success or an error code otherwise -+ */ -+struct ovpn_peer *ovpn_peer_new(struct ovpn_struct *ovpn, u32 id) -+{ -+ struct ovpn_peer *peer; -+ int ret; -+ -+ /* alloc and init peer object */ -+ peer = kzalloc(sizeof(*peer), GFP_KERNEL); -+ if (!peer) -+ return ERR_PTR(-ENOMEM); -+ -+ peer->id = id; -+ peer->halt = false; -+ peer->ovpn = ovpn; -+ -+ peer->vpn_addrs.ipv4.s_addr = htonl(INADDR_ANY); -+ peer->vpn_addrs.ipv6 = in6addr_any; -+ -+ RCU_INIT_POINTER(peer->bind, NULL); -+ ovpn_crypto_state_init(&peer->crypto); -+ spin_lock_init(&peer->lock); -+ kref_init(&peer->refcount); -+ ovpn_peer_stats_init(&peer->vpn_stats); -+ ovpn_peer_stats_init(&peer->link_stats); -+ -+ ret = dst_cache_init(&peer->dst_cache, GFP_KERNEL); -+ if (ret < 0) { -+ netdev_err(ovpn->dev, "%s: cannot initialize dst cache\n", -+ __func__); -+ kfree(peer); -+ return ERR_PTR(ret); -+ } -+ -+ netdev_hold(ovpn->dev, &ovpn->dev_tracker, GFP_KERNEL); -+ -+ return peer; -+} -+ -+/** -+ * ovpn_peer_reset_sockaddr - recreate binding for peer -+ * @peer: peer to recreate the binding for -+ * @ss: sockaddr to use as remote endpoint for the binding -+ * @local_ip: local IP for the binding -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer, -+ const struct sockaddr_storage *ss, -+ const u8 *local_ip) -+ __must_hold(&peer->lock) -+{ -+ struct ovpn_bind *bind; -+ size_t ip_len; -+ -+ /* create new ovpn_bind object */ -+ bind = ovpn_bind_from_sockaddr(ss); -+ if (IS_ERR(bind)) -+ return PTR_ERR(bind); -+ -+ if (local_ip) { -+ if (ss->ss_family == AF_INET) { -+ ip_len = sizeof(struct in_addr); -+ } else if (ss->ss_family == AF_INET6) { -+ ip_len = sizeof(struct in6_addr); -+ } else { -+ netdev_dbg(peer->ovpn->dev, "%s: invalid family for remote endpoint\n", -+ __func__); -+ kfree(bind); -+ return -EINVAL; -+ } -+ -+ memcpy(&bind->local, local_ip, ip_len); -+ } -+ -+ /* set binding */ -+ ovpn_bind_reset(peer, bind); -+ -+ return 0; -+} -+ -+#define ovpn_get_hash_head(_tbl, _key, _key_len) ({ \ -+ typeof(_tbl) *__tbl = &(_tbl); \ -+ (&(*__tbl)[jhash(_key, _key_len, 0) % HASH_SIZE(*__tbl)]); }) \ -+ -+/** -+ * ovpn_peer_float - update remote endpoint for peer -+ * @peer: peer to update the remote endpoint for -+ * @skb: incoming packet to retrieve the source address (remote) from -+ */ -+void ovpn_peer_float(struct ovpn_peer *peer, struct sk_buff *skb) -+{ -+ struct hlist_nulls_head *nhead; -+ struct sockaddr_storage ss; -+ const u8 *local_ip = NULL; -+ struct sockaddr_in6 *sa6; -+ struct sockaddr_in *sa; -+ struct ovpn_bind *bind; -+ sa_family_t family; -+ size_t salen; -+ -+ rcu_read_lock(); -+ bind = rcu_dereference(peer->bind); -+ if (unlikely(!bind)) { -+ rcu_read_unlock(); -+ return; -+ } -+ -+ spin_lock_bh(&peer->lock); -+ if (likely(ovpn_bind_skb_src_match(bind, skb))) -+ goto unlock; -+ -+ family = skb_protocol_to_family(skb); -+ -+ if (bind->remote.in4.sin_family == family) -+ local_ip = (u8 *)&bind->local; -+ -+ switch (family) { -+ case AF_INET: -+ sa = (struct sockaddr_in *)&ss; -+ sa->sin_family = AF_INET; -+ sa->sin_addr.s_addr = ip_hdr(skb)->saddr; -+ sa->sin_port = udp_hdr(skb)->source; -+ salen = sizeof(*sa); -+ break; -+ case AF_INET6: -+ sa6 = (struct sockaddr_in6 *)&ss; -+ sa6->sin6_family = AF_INET6; -+ sa6->sin6_addr = ipv6_hdr(skb)->saddr; -+ sa6->sin6_port = udp_hdr(skb)->source; -+ sa6->sin6_scope_id = ipv6_iface_scope_id(&ipv6_hdr(skb)->saddr, -+ skb->skb_iif); -+ salen = sizeof(*sa6); -+ break; -+ default: -+ goto unlock; -+ } -+ -+ netdev_dbg(peer->ovpn->dev, "%s: peer %d floated to %pIScp", __func__, -+ peer->id, &ss); -+ ovpn_peer_reset_sockaddr(peer, (struct sockaddr_storage *)&ss, -+ local_ip); -+ spin_unlock_bh(&peer->lock); -+ rcu_read_unlock(); -+ -+ /* rehashing is required only in MP mode as P2P has one peer -+ * only and thus there is no hashtable -+ */ -+ if (peer->ovpn->mode == OVPN_MODE_MP) { -+ spin_lock_bh(&peer->ovpn->peers->lock); -+ /* remove old hashing */ -+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr); -+ /* re-add with new transport address */ -+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_transp_addr, -+ &ss, salen); -+ hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead); -+ spin_unlock_bh(&peer->ovpn->peers->lock); -+ } -+ return; -+unlock: -+ spin_unlock_bh(&peer->lock); -+ rcu_read_unlock(); -+} -+ -+void ovpn_peer_release(struct ovpn_peer *peer) -+{ -+ if (peer->sock) -+ ovpn_socket_put(peer->sock); -+ -+ ovpn_crypto_state_release(&peer->crypto); -+ spin_lock_bh(&peer->lock); -+ ovpn_bind_reset(peer, NULL); -+ spin_unlock_bh(&peer->lock); -+ -+ dst_cache_destroy(&peer->dst_cache); -+ netdev_put(peer->ovpn->dev, &peer->ovpn->dev_tracker); -+ kfree_rcu(peer, rcu); -+} -+ -+/** -+ * ovpn_peer_release_kref - callback for kref_put -+ * @kref: the kref object belonging to the peer -+ */ -+void ovpn_peer_release_kref(struct kref *kref) -+{ -+ struct ovpn_peer *peer = container_of(kref, struct ovpn_peer, refcount); -+ -+ ovpn_nl_peer_del_notify(peer); -+ ovpn_peer_release(peer); -+} -+ -+/** -+ * ovpn_peer_skb_to_sockaddr - fill sockaddr with skb source address -+ * @skb: the packet to extract data from -+ * @ss: the sockaddr to fill -+ * -+ * Return: true on success or false otherwise -+ */ -+static bool ovpn_peer_skb_to_sockaddr(struct sk_buff *skb, -+ struct sockaddr_storage *ss) -+{ -+ struct sockaddr_in6 *sa6; -+ struct sockaddr_in *sa4; -+ -+ ss->ss_family = skb_protocol_to_family(skb); -+ switch (ss->ss_family) { -+ case AF_INET: -+ sa4 = (struct sockaddr_in *)ss; -+ sa4->sin_family = AF_INET; -+ sa4->sin_addr.s_addr = ip_hdr(skb)->saddr; -+ sa4->sin_port = udp_hdr(skb)->source; -+ break; -+ case AF_INET6: -+ sa6 = (struct sockaddr_in6 *)ss; -+ sa6->sin6_family = AF_INET6; -+ sa6->sin6_addr = ipv6_hdr(skb)->saddr; -+ sa6->sin6_port = udp_hdr(skb)->source; -+ break; -+ default: -+ return false; -+ } -+ -+ return true; -+} -+ -+/** -+ * ovpn_nexthop_from_skb4 - retrieve IPv4 nexthop for outgoing skb -+ * @skb: the outgoing packet -+ * -+ * Return: the IPv4 of the nexthop -+ */ -+static __be32 ovpn_nexthop_from_skb4(struct sk_buff *skb) -+{ -+ const struct rtable *rt = skb_rtable(skb); -+ -+ if (rt && rt->rt_uses_gateway) -+ return rt->rt_gw4; -+ -+ return ip_hdr(skb)->daddr; -+} -+ -+/** -+ * ovpn_nexthop_from_skb6 - retrieve IPv6 nexthop for outgoing skb -+ * @skb: the outgoing packet -+ * -+ * Return: the IPv6 of the nexthop -+ */ -+static struct in6_addr ovpn_nexthop_from_skb6(struct sk_buff *skb) -+{ -+ const struct rt6_info *rt = skb_rt6_info(skb); -+ -+ if (!rt || !(rt->rt6i_flags & RTF_GATEWAY)) -+ return ipv6_hdr(skb)->daddr; -+ -+ return rt->rt6i_gateway; -+} -+ -+/** -+ * ovpn_peer_get_by_vpn_addr4 - retrieve peer by its VPN IPv4 address -+ * @ovpn: the openvpn instance to search -+ * @addr: VPN IPv4 to use as search key -+ * -+ * Refcounter is not increased for the returned peer. -+ * -+ * Return: the peer if found or NULL otherwise -+ */ -+static struct ovpn_peer *ovpn_peer_get_by_vpn_addr4(struct ovpn_struct *ovpn, -+ __be32 addr) -+{ -+ struct hlist_nulls_head *nhead; -+ struct hlist_nulls_node *ntmp; -+ struct ovpn_peer *tmp; -+ -+ nhead = ovpn_get_hash_head(ovpn->peers->by_vpn_addr, &addr, -+ sizeof(addr)); -+ -+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr4) -+ if (addr == tmp->vpn_addrs.ipv4.s_addr) -+ return tmp; -+ -+ return NULL; -+} -+ -+/** -+ * ovpn_peer_get_by_vpn_addr6 - retrieve peer by its VPN IPv6 address -+ * @ovpn: the openvpn instance to search -+ * @addr: VPN IPv6 to use as search key -+ * -+ * Refcounter is not increased for the returned peer. -+ * -+ * Return: the peer if found or NULL otherwise -+ */ -+static struct ovpn_peer *ovpn_peer_get_by_vpn_addr6(struct ovpn_struct *ovpn, -+ struct in6_addr *addr) -+{ -+ struct hlist_nulls_head *nhead; -+ struct hlist_nulls_node *ntmp; -+ struct ovpn_peer *tmp; -+ -+ nhead = ovpn_get_hash_head(ovpn->peers->by_vpn_addr, addr, -+ sizeof(*addr)); -+ -+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr6) -+ if (ipv6_addr_equal(addr, &tmp->vpn_addrs.ipv6)) -+ return tmp; -+ -+ return NULL; -+} -+ -+/** -+ * ovpn_peer_transp_match - check if sockaddr and peer binding match -+ * @peer: the peer to get the binding from -+ * @ss: the sockaddr to match -+ * -+ * Return: true if sockaddr and binding match or false otherwise -+ */ -+static bool ovpn_peer_transp_match(const struct ovpn_peer *peer, -+ const struct sockaddr_storage *ss) -+{ -+ struct ovpn_bind *bind = rcu_dereference(peer->bind); -+ struct sockaddr_in6 *sa6; -+ struct sockaddr_in *sa4; -+ -+ if (unlikely(!bind)) -+ return false; -+ -+ if (ss->ss_family != bind->remote.in4.sin_family) -+ return false; -+ -+ switch (ss->ss_family) { -+ case AF_INET: -+ sa4 = (struct sockaddr_in *)ss; -+ if (sa4->sin_addr.s_addr != bind->remote.in4.sin_addr.s_addr) -+ return false; -+ if (sa4->sin_port != bind->remote.in4.sin_port) -+ return false; -+ break; -+ case AF_INET6: -+ sa6 = (struct sockaddr_in6 *)ss; -+ if (!ipv6_addr_equal(&sa6->sin6_addr, -+ &bind->remote.in6.sin6_addr)) -+ return false; -+ if (sa6->sin6_port != bind->remote.in6.sin6_port) -+ return false; -+ break; -+ default: -+ return false; -+ } -+ -+ return true; -+} -+ -+/** -+ * ovpn_peer_get_by_transp_addr_p2p - get peer by transport address in a P2P -+ * instance -+ * @ovpn: the openvpn instance to search -+ * @ss: the transport socket address -+ * -+ * Return: the peer if found or NULL otherwise -+ */ -+static struct ovpn_peer * -+ovpn_peer_get_by_transp_addr_p2p(struct ovpn_struct *ovpn, -+ struct sockaddr_storage *ss) -+{ -+ struct ovpn_peer *tmp, *peer = NULL; -+ -+ rcu_read_lock(); -+ tmp = rcu_dereference(ovpn->peer); -+ if (likely(tmp && ovpn_peer_transp_match(tmp, ss) && -+ ovpn_peer_hold(tmp))) -+ peer = tmp; -+ rcu_read_unlock(); -+ -+ return peer; -+} -+ -+/** -+ * ovpn_peer_get_by_transp_addr - retrieve peer by transport address -+ * @ovpn: the openvpn instance to search -+ * @skb: the skb to retrieve the source transport address from -+ * -+ * Return: a pointer to the peer if found or NULL otherwise -+ */ -+struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_struct *ovpn, -+ struct sk_buff *skb) -+{ -+ struct ovpn_peer *tmp, *peer = NULL; -+ struct sockaddr_storage ss = { 0 }; -+ struct hlist_nulls_head *nhead; -+ struct hlist_nulls_node *ntmp; -+ size_t sa_len; -+ -+ if (unlikely(!ovpn_peer_skb_to_sockaddr(skb, &ss))) -+ return NULL; -+ -+ if (ovpn->mode == OVPN_MODE_P2P) -+ return ovpn_peer_get_by_transp_addr_p2p(ovpn, &ss); -+ -+ switch (ss.ss_family) { -+ case AF_INET: -+ sa_len = sizeof(struct sockaddr_in); -+ break; -+ case AF_INET6: -+ sa_len = sizeof(struct sockaddr_in6); -+ break; -+ default: -+ return NULL; -+ } -+ -+ nhead = ovpn_get_hash_head(ovpn->peers->by_transp_addr, &ss, sa_len); -+ -+ rcu_read_lock(); -+ hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, -+ hash_entry_transp_addr) { -+ if (!ovpn_peer_transp_match(tmp, &ss)) -+ continue; -+ -+ if (!ovpn_peer_hold(tmp)) -+ continue; -+ -+ peer = tmp; -+ break; -+ } -+ rcu_read_unlock(); -+ -+ return peer; -+} -+ -+/** -+ * ovpn_peer_get_by_id_p2p - get peer by ID in a P2P instance -+ * @ovpn: the openvpn instance to search -+ * @peer_id: the ID of the peer to find -+ * -+ * Return: the peer if found or NULL otherwise -+ */ -+static struct ovpn_peer *ovpn_peer_get_by_id_p2p(struct ovpn_struct *ovpn, -+ u32 peer_id) -+{ -+ struct ovpn_peer *tmp, *peer = NULL; -+ -+ rcu_read_lock(); -+ tmp = rcu_dereference(ovpn->peer); -+ if (likely(tmp && tmp->id == peer_id && ovpn_peer_hold(tmp))) -+ peer = tmp; -+ rcu_read_unlock(); -+ -+ return peer; -+} -+ -+/** -+ * ovpn_peer_get_by_id - retrieve peer by ID -+ * @ovpn: the openvpn instance to search -+ * @peer_id: the unique peer identifier to match -+ * -+ * Return: a pointer to the peer if found or NULL otherwise -+ */ -+struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_struct *ovpn, u32 peer_id) -+{ -+ struct ovpn_peer *tmp, *peer = NULL; -+ struct hlist_head *head; -+ -+ if (ovpn->mode == OVPN_MODE_P2P) -+ return ovpn_peer_get_by_id_p2p(ovpn, peer_id); -+ -+ head = ovpn_get_hash_head(ovpn->peers->by_id, &peer_id, -+ sizeof(peer_id)); -+ -+ rcu_read_lock(); -+ hlist_for_each_entry_rcu(tmp, head, hash_entry_id) { -+ if (tmp->id != peer_id) -+ continue; -+ -+ if (!ovpn_peer_hold(tmp)) -+ continue; -+ -+ peer = tmp; -+ break; -+ } -+ rcu_read_unlock(); -+ -+ return peer; -+} -+ -+/** -+ * ovpn_peer_update_local_endpoint - update local endpoint for peer -+ * @peer: peer to update the endpoint for -+ * @skb: incoming packet to retrieve the destination address (local) from -+ */ -+void ovpn_peer_update_local_endpoint(struct ovpn_peer *peer, -+ struct sk_buff *skb) -+{ -+ struct ovpn_bind *bind; -+ -+ rcu_read_lock(); -+ bind = rcu_dereference(peer->bind); -+ if (unlikely(!bind)) -+ goto unlock; -+ -+ spin_lock_bh(&peer->lock); -+ switch (skb_protocol_to_family(skb)) { -+ case AF_INET: -+ if (unlikely(bind->local.ipv4.s_addr != ip_hdr(skb)->daddr)) { -+ netdev_dbg(peer->ovpn->dev, -+ "%s: learning local IPv4 for peer %d (%pI4 -> %pI4)\n", -+ __func__, peer->id, &bind->local.ipv4.s_addr, -+ &ip_hdr(skb)->daddr); -+ bind->local.ipv4.s_addr = ip_hdr(skb)->daddr; -+ } -+ break; -+ case AF_INET6: -+ if (unlikely(!ipv6_addr_equal(&bind->local.ipv6, -+ &ipv6_hdr(skb)->daddr))) { -+ netdev_dbg(peer->ovpn->dev, -+ "%s: learning local IPv6 for peer %d (%pI6c -> %pI6c\n", -+ __func__, peer->id, &bind->local.ipv6, -+ &ipv6_hdr(skb)->daddr); -+ bind->local.ipv6 = ipv6_hdr(skb)->daddr; -+ } -+ break; -+ default: -+ break; -+ } -+ spin_unlock_bh(&peer->lock); -+ -+unlock: -+ rcu_read_unlock(); -+} -+ -+/** -+ * ovpn_peer_get_by_dst - Lookup peer to send skb to -+ * @ovpn: the private data representing the current VPN session -+ * @skb: the skb to extract the destination address from -+ * -+ * This function takes a tunnel packet and looks up the peer to send it to -+ * after encapsulation. The skb is expected to be the in-tunnel packet, without -+ * any OpenVPN related header. -+ * -+ * Assume that the IP header is accessible in the skb data. -+ * -+ * Return: the peer if found or NULL otherwise. -+ */ -+struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_struct *ovpn, -+ struct sk_buff *skb) -+{ -+ struct ovpn_peer *peer = NULL; -+ struct in6_addr addr6; -+ __be32 addr4; -+ -+ /* in P2P mode, no matter the destination, packets are always sent to -+ * the single peer listening on the other side -+ */ -+ if (ovpn->mode == OVPN_MODE_P2P) { -+ rcu_read_lock(); -+ peer = rcu_dereference(ovpn->peer); -+ if (unlikely(peer && !ovpn_peer_hold(peer))) -+ peer = NULL; -+ rcu_read_unlock(); -+ return peer; -+ } -+ -+ rcu_read_lock(); -+ switch (skb_protocol_to_family(skb)) { -+ case AF_INET: -+ addr4 = ovpn_nexthop_from_skb4(skb); -+ peer = ovpn_peer_get_by_vpn_addr4(ovpn, addr4); -+ break; -+ case AF_INET6: -+ addr6 = ovpn_nexthop_from_skb6(skb); -+ peer = ovpn_peer_get_by_vpn_addr6(ovpn, &addr6); -+ break; -+ } -+ -+ if (unlikely(peer && !ovpn_peer_hold(peer))) -+ peer = NULL; -+ rcu_read_unlock(); -+ -+ return peer; -+} -+ -+/** -+ * ovpn_nexthop_from_rt4 - look up the IPv4 nexthop for the given destination -+ * @ovpn: the private data representing the current VPN session -+ * @dest: the destination to be looked up -+ * -+ * Looks up in the IPv4 system routing table the IP of the nexthop to be used -+ * to reach the destination passed as argument. If no nexthop can be found, the -+ * destination itself is returned as it probably has to be used as nexthop. -+ * -+ * Return: the IP of the next hop if found or dest itself otherwise -+ */ -+static __be32 ovpn_nexthop_from_rt4(struct ovpn_struct *ovpn, __be32 dest) -+{ -+ struct rtable *rt; -+ struct flowi4 fl = { -+ .daddr = dest -+ }; -+ -+ rt = ip_route_output_flow(dev_net(ovpn->dev), &fl, NULL); -+ if (IS_ERR(rt)) { -+ net_dbg_ratelimited("%s: no route to host %pI4\n", __func__, -+ &dest); -+ /* if we end up here this packet is probably going to be -+ * thrown away later -+ */ -+ return dest; -+ } -+ -+ if (!rt->rt_uses_gateway) -+ goto out; -+ -+ dest = rt->rt_gw4; -+out: -+ ip_rt_put(rt); -+ return dest; -+} -+ -+/** -+ * ovpn_nexthop_from_rt6 - look up the IPv6 nexthop for the given destination -+ * @ovpn: the private data representing the current VPN session -+ * @dest: the destination to be looked up -+ * -+ * Looks up in the IPv6 system routing table the IP of the nexthop to be used -+ * to reach the destination passed as argument. If no nexthop can be found, the -+ * destination itself is returned as it probably has to be used as nexthop. -+ * -+ * Return: the IP of the next hop if found or dest itself otherwise -+ */ -+static struct in6_addr ovpn_nexthop_from_rt6(struct ovpn_struct *ovpn, -+ struct in6_addr dest) -+{ -+#if IS_ENABLED(CONFIG_IPV6) -+ struct dst_entry *entry; -+ struct rt6_info *rt; -+ struct flowi6 fl = { -+ .daddr = dest, -+ }; -+ -+ entry = ipv6_stub->ipv6_dst_lookup_flow(dev_net(ovpn->dev), NULL, &fl, -+ NULL); -+ if (IS_ERR(entry)) { -+ net_dbg_ratelimited("%s: no route to host %pI6c\n", __func__, -+ &dest); -+ /* if we end up here this packet is probably going to be -+ * thrown away later -+ */ -+ return dest; -+ } -+ -+ rt = dst_rt6_info(entry); -+ -+ if (!(rt->rt6i_flags & RTF_GATEWAY)) -+ goto out; -+ -+ dest = rt->rt6i_gateway; -+out: -+ dst_release((struct dst_entry *)rt); -+#endif -+ return dest; -+} -+ -+/** -+ * ovpn_peer_check_by_src - check that skb source is routed via peer -+ * @ovpn: the openvpn instance to search -+ * @skb: the packet to extract source address from -+ * @peer: the peer to check against the source address -+ * -+ * Return: true if the peer is matching or false otherwise -+ */ -+bool ovpn_peer_check_by_src(struct ovpn_struct *ovpn, struct sk_buff *skb, -+ struct ovpn_peer *peer) -+{ -+ bool match = false; -+ struct in6_addr addr6; -+ __be32 addr4; -+ -+ if (ovpn->mode == OVPN_MODE_P2P) { -+ /* in P2P mode, no matter the destination, packets are always -+ * sent to the single peer listening on the other side -+ */ -+ rcu_read_lock(); -+ match = (peer == rcu_dereference(ovpn->peer)); -+ rcu_read_unlock(); -+ return match; -+ } -+ -+ /* This function performs a reverse path check, therefore we now -+ * lookup the nexthop we would use if we wanted to route a packet -+ * to the source IP. If the nexthop matches the sender we know the -+ * latter is valid and we allow the packet to come in -+ */ -+ -+ switch (skb_protocol_to_family(skb)) { -+ case AF_INET: -+ addr4 = ovpn_nexthop_from_rt4(ovpn, ip_hdr(skb)->saddr); -+ rcu_read_lock(); -+ match = (peer == ovpn_peer_get_by_vpn_addr4(ovpn, addr4)); -+ rcu_read_unlock(); -+ break; -+ case AF_INET6: -+ addr6 = ovpn_nexthop_from_rt6(ovpn, ipv6_hdr(skb)->saddr); -+ rcu_read_lock(); -+ match = (peer == ovpn_peer_get_by_vpn_addr6(ovpn, &addr6)); -+ rcu_read_unlock(); -+ break; -+ } -+ -+ return match; -+} -+ -+void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer) -+ __must_hold(&peer->ovpn->peers->lock) -+{ -+ struct hlist_nulls_head *nhead; -+ -+ if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) { -+ /* remove potential old hashing */ -+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr); -+ -+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr, -+ &peer->vpn_addrs.ipv4, -+ sizeof(peer->vpn_addrs.ipv4)); -+ hlist_nulls_add_head_rcu(&peer->hash_entry_addr4, nhead); -+ } -+ -+ if (!ipv6_addr_any(&peer->vpn_addrs.ipv6)) { -+ /* remove potential old hashing */ -+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr); -+ -+ nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr, -+ &peer->vpn_addrs.ipv6, -+ sizeof(peer->vpn_addrs.ipv6)); -+ hlist_nulls_add_head_rcu(&peer->hash_entry_addr6, nhead); -+ } -+} -+ -+/** -+ * ovpn_peer_add_mp - add peer to related tables in a MP instance -+ * @ovpn: the instance to add the peer to -+ * @peer: the peer to add -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+static int ovpn_peer_add_mp(struct ovpn_struct *ovpn, struct ovpn_peer *peer) -+{ -+ struct sockaddr_storage sa = { 0 }; -+ struct hlist_nulls_head *nhead; -+ struct sockaddr_in6 *sa6; -+ struct sockaddr_in *sa4; -+ struct ovpn_bind *bind; -+ struct ovpn_peer *tmp; -+ size_t salen; -+ int ret = 0; -+ -+ spin_lock_bh(&ovpn->peers->lock); -+ /* do not add duplicates */ -+ tmp = ovpn_peer_get_by_id(ovpn, peer->id); -+ if (tmp) { -+ ovpn_peer_put(tmp); -+ ret = -EEXIST; -+ goto out; -+ } -+ -+ bind = rcu_dereference_protected(peer->bind, true); -+ /* peers connected via TCP have bind == NULL */ -+ if (bind) { -+ switch (bind->remote.in4.sin_family) { -+ case AF_INET: -+ sa4 = (struct sockaddr_in *)&sa; -+ -+ sa4->sin_family = AF_INET; -+ sa4->sin_addr.s_addr = bind->remote.in4.sin_addr.s_addr; -+ sa4->sin_port = bind->remote.in4.sin_port; -+ salen = sizeof(*sa4); -+ break; -+ case AF_INET6: -+ sa6 = (struct sockaddr_in6 *)&sa; -+ -+ sa6->sin6_family = AF_INET6; -+ sa6->sin6_addr = bind->remote.in6.sin6_addr; -+ sa6->sin6_port = bind->remote.in6.sin6_port; -+ salen = sizeof(*sa6); -+ break; -+ default: -+ ret = -EPROTONOSUPPORT; -+ goto out; -+ } -+ -+ nhead = ovpn_get_hash_head(ovpn->peers->by_transp_addr, &sa, -+ salen); -+ hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead); -+ } -+ -+ hlist_add_head_rcu(&peer->hash_entry_id, -+ ovpn_get_hash_head(ovpn->peers->by_id, &peer->id, -+ sizeof(peer->id))); -+ -+ ovpn_peer_hash_vpn_ip(peer); -+out: -+ spin_unlock_bh(&ovpn->peers->lock); -+ return ret; -+} -+ -+/** -+ * ovpn_peer_add_p2p - add peer to related tables in a P2P instance -+ * @ovpn: the instance to add the peer to -+ * @peer: the peer to add -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+static int ovpn_peer_add_p2p(struct ovpn_struct *ovpn, struct ovpn_peer *peer) -+{ -+ struct ovpn_peer *tmp; -+ -+ spin_lock_bh(&ovpn->lock); -+ /* in p2p mode it is possible to have a single peer only, therefore the -+ * old one is released and substituted by the new one -+ */ -+ tmp = rcu_dereference_protected(ovpn->peer, -+ lockdep_is_held(&ovpn->lock)); -+ if (tmp) { -+ tmp->delete_reason = OVPN_DEL_PEER_REASON_TEARDOWN; -+ ovpn_peer_put(tmp); -+ } -+ -+ rcu_assign_pointer(ovpn->peer, peer); -+ spin_unlock_bh(&ovpn->lock); -+ -+ return 0; -+} -+ -+/** -+ * ovpn_peer_add - add peer to the related tables -+ * @ovpn: the openvpn instance the peer belongs to -+ * @peer: the peer object to add -+ * -+ * Assume refcounter was increased by caller -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+int ovpn_peer_add(struct ovpn_struct *ovpn, struct ovpn_peer *peer) -+{ -+ switch (ovpn->mode) { -+ case OVPN_MODE_MP: -+ return ovpn_peer_add_mp(ovpn, peer); -+ case OVPN_MODE_P2P: -+ return ovpn_peer_add_p2p(ovpn, peer); -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+/** -+ * ovpn_peer_unhash - remove peer reference from all hashtables -+ * @peer: the peer to remove -+ * @reason: the delete reason to attach to the peer -+ */ -+static void ovpn_peer_unhash(struct ovpn_peer *peer, -+ enum ovpn_del_peer_reason reason) -+ __must_hold(&ovpn->peers->lock) -+{ -+ hlist_del_init_rcu(&peer->hash_entry_id); -+ -+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr4); -+ hlist_nulls_del_init_rcu(&peer->hash_entry_addr6); -+ hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr); -+ -+ ovpn_peer_put(peer); -+ peer->delete_reason = reason; -+} -+ -+/** -+ * ovpn_peer_del_mp - delete peer from related tables in a MP instance -+ * @peer: the peer to delete -+ * @reason: reason why the peer was deleted (sent to userspace) -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+static int ovpn_peer_del_mp(struct ovpn_peer *peer, -+ enum ovpn_del_peer_reason reason) -+ __must_hold(&peer->ovpn->peers->lock) -+{ -+ struct ovpn_peer *tmp; -+ int ret = -ENOENT; -+ -+ tmp = ovpn_peer_get_by_id(peer->ovpn, peer->id); -+ if (tmp == peer) { -+ ovpn_peer_unhash(peer, reason); -+ ret = 0; -+ } -+ -+ if (tmp) -+ ovpn_peer_put(tmp); -+ -+ return ret; -+} -+ -+/** -+ * ovpn_peer_del_p2p - delete peer from related tables in a P2P instance -+ * @peer: the peer to delete -+ * @reason: reason why the peer was deleted (sent to userspace) -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+static int ovpn_peer_del_p2p(struct ovpn_peer *peer, -+ enum ovpn_del_peer_reason reason) -+ __must_hold(&peer->ovpn->lock) -+{ -+ struct ovpn_peer *tmp; -+ -+ tmp = rcu_dereference_protected(peer->ovpn->peer, -+ lockdep_is_held(&peer->ovpn->lock)); -+ if (tmp != peer) { -+ DEBUG_NET_WARN_ON_ONCE(1); -+ if (tmp) -+ ovpn_peer_put(tmp); -+ -+ return -ENOENT; -+ } -+ -+ tmp->delete_reason = reason; -+ RCU_INIT_POINTER(peer->ovpn->peer, NULL); -+ ovpn_peer_put(tmp); -+ -+ return 0; -+} -+ -+/** -+ * ovpn_peer_release_p2p - release peer upon P2P device teardown -+ * @ovpn: the instance being torn down -+ */ -+void ovpn_peer_release_p2p(struct ovpn_struct *ovpn) -+{ -+ struct ovpn_peer *tmp; -+ -+ spin_lock_bh(&ovpn->lock); -+ tmp = rcu_dereference_protected(ovpn->peer, -+ lockdep_is_held(&ovpn->lock)); -+ if (tmp) -+ ovpn_peer_del_p2p(tmp, OVPN_DEL_PEER_REASON_TEARDOWN); -+ spin_unlock_bh(&ovpn->lock); -+} -+ -+/** -+ * ovpn_peer_del - delete peer from related tables -+ * @peer: the peer object to delete -+ * @reason: reason for deleting peer (will be sent to userspace) -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason) -+{ -+ int ret; -+ -+ switch (peer->ovpn->mode) { -+ case OVPN_MODE_MP: -+ spin_lock_bh(&peer->ovpn->peers->lock); -+ ret = ovpn_peer_del_mp(peer, reason); -+ spin_unlock_bh(&peer->ovpn->peers->lock); -+ return ret; -+ case OVPN_MODE_P2P: -+ spin_lock_bh(&peer->ovpn->lock); -+ ret = ovpn_peer_del_p2p(peer, reason); -+ spin_unlock_bh(&peer->ovpn->lock); -+ return ret; -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+static int ovpn_peer_del_nolock(struct ovpn_peer *peer, -+ enum ovpn_del_peer_reason reason) -+{ -+ switch (peer->ovpn->mode) { -+ case OVPN_MODE_MP: -+ return ovpn_peer_del_mp(peer, reason); -+ case OVPN_MODE_P2P: -+ return ovpn_peer_del_p2p(peer, reason); -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+/** -+ * ovpn_peers_free - free all peers in the instance -+ * @ovpn: the instance whose peers should be released -+ */ -+void ovpn_peers_free(struct ovpn_struct *ovpn) -+{ -+ struct hlist_node *tmp; -+ struct ovpn_peer *peer; -+ int bkt; -+ -+ spin_lock_bh(&ovpn->peers->lock); -+ hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) -+ ovpn_peer_unhash(peer, OVPN_DEL_PEER_REASON_TEARDOWN); -+ spin_unlock_bh(&ovpn->peers->lock); -+} -+ -+static time64_t ovpn_peer_keepalive_work_single(struct ovpn_peer *peer, -+ time64_t now) -+{ -+ time64_t next_run1, next_run2, delta; -+ unsigned long timeout, interval; -+ bool expired; -+ -+ spin_lock_bh(&peer->lock); -+ /* we expect both timers to be configured at the same time, -+ * therefore bail out if either is not set -+ */ -+ if (!peer->keepalive_timeout || !peer->keepalive_interval) { -+ spin_unlock_bh(&peer->lock); -+ return 0; -+ } -+ -+ /* check for peer timeout */ -+ expired = false; -+ timeout = peer->keepalive_timeout; -+ delta = now - peer->last_recv; -+ if (delta < timeout) { -+ peer->keepalive_recv_exp = now + timeout - delta; -+ next_run1 = peer->keepalive_recv_exp; -+ } else if (peer->keepalive_recv_exp > now) { -+ next_run1 = peer->keepalive_recv_exp; -+ } else { -+ expired = true; -+ } -+ -+ if (expired) { -+ /* peer is dead -> kill it and move on */ -+ spin_unlock_bh(&peer->lock); -+ netdev_dbg(peer->ovpn->dev, "peer %u expired\n", -+ peer->id); -+ ovpn_peer_del_nolock(peer, OVPN_DEL_PEER_REASON_EXPIRED); -+ return 0; -+ } -+ -+ /* check for peer keepalive */ -+ expired = false; -+ interval = peer->keepalive_interval; -+ delta = now - peer->last_sent; -+ if (delta < interval) { -+ peer->keepalive_xmit_exp = now + interval - delta; -+ next_run2 = peer->keepalive_xmit_exp; -+ } else if (peer->keepalive_xmit_exp > now) { -+ next_run2 = peer->keepalive_xmit_exp; -+ } else { -+ expired = true; -+ next_run2 = now + interval; -+ } -+ spin_unlock_bh(&peer->lock); -+ -+ if (expired) { -+ /* a keepalive packet is required */ -+ netdev_dbg(peer->ovpn->dev, -+ "sending keepalive to peer %u\n", -+ peer->id); -+ ovpn_xmit_special(peer, ovpn_keepalive_message, -+ sizeof(ovpn_keepalive_message)); -+ } -+ -+ if (next_run1 < next_run2) -+ return next_run1; -+ -+ return next_run2; -+} -+ -+static time64_t ovpn_peer_keepalive_work_mp(struct ovpn_struct *ovpn, -+ time64_t now) -+{ -+ time64_t tmp_next_run, next_run = 0; -+ struct hlist_node *tmp; -+ struct ovpn_peer *peer; -+ int bkt; -+ -+ spin_lock_bh(&ovpn->peers->lock); -+ hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) { -+ tmp_next_run = ovpn_peer_keepalive_work_single(peer, now); -+ if (!tmp_next_run) -+ continue; -+ -+ /* the next worker run will be scheduled based on the shortest -+ * required interval across all peers -+ */ -+ if (!next_run || tmp_next_run < next_run) -+ next_run = tmp_next_run; -+ } -+ spin_unlock_bh(&ovpn->peers->lock); -+ -+ return next_run; -+} -+ -+static time64_t ovpn_peer_keepalive_work_p2p(struct ovpn_struct *ovpn, -+ time64_t now) -+{ -+ struct ovpn_peer *peer; -+ time64_t next_run = 0; -+ -+ spin_lock_bh(&ovpn->lock); -+ peer = rcu_dereference_protected(ovpn->peer, -+ lockdep_is_held(&ovpn->lock)); -+ if (peer) -+ next_run = ovpn_peer_keepalive_work_single(peer, now); -+ spin_unlock_bh(&ovpn->lock); -+ -+ return next_run; -+} -+ -+/** -+ * ovpn_peer_keepalive_work - run keepalive logic on each known peer -+ * @work: pointer to the work member of the related ovpn object -+ * -+ * Each peer has two timers (if configured): -+ * 1. peer timeout: when no data is received for a certain interval, -+ * the peer is considered dead and it gets killed. -+ * 2. peer keepalive: when no data is sent to a certain peer for a -+ * certain interval, a special 'keepalive' packet is explicitly sent. -+ * -+ * This function iterates across the whole peer collection while -+ * checking the timers described above. -+ */ -+void ovpn_peer_keepalive_work(struct work_struct *work) -+{ -+ struct ovpn_struct *ovpn = container_of(work, struct ovpn_struct, -+ keepalive_work.work); -+ time64_t next_run = 0, now = ktime_get_real_seconds(); -+ -+ switch (ovpn->mode) { -+ case OVPN_MODE_MP: -+ next_run = ovpn_peer_keepalive_work_mp(ovpn, now); -+ break; -+ case OVPN_MODE_P2P: -+ next_run = ovpn_peer_keepalive_work_p2p(ovpn, now); -+ break; -+ } -+ -+ /* prevent rearming if the interface is being destroyed */ -+ if (next_run > 0 && ovpn->registered) { -+ netdev_dbg(ovpn->dev, -+ "scheduling keepalive work: now=%llu next_run=%llu delta=%llu\n", -+ next_run, now, next_run - now); -+ schedule_delayed_work(&ovpn->keepalive_work, -+ (next_run - now) * HZ); -+ } -+} -diff --git a/drivers/net/ovpn/peer.h b/drivers/net/ovpn/peer.h -new file mode 100644 -index 000000000000..1adecd0f79f8 ---- /dev/null -+++ b/drivers/net/ovpn/peer.h -@@ -0,0 +1,165 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#ifndef _NET_OVPN_OVPNPEER_H_ -+#define _NET_OVPN_OVPNPEER_H_ -+ -+#include -+#include -+ -+#include "crypto.h" -+#include "stats.h" -+ -+/** -+ * struct ovpn_peer - the main remote peer object -+ * @ovpn: main openvpn instance this peer belongs to -+ * @id: unique identifier -+ * @vpn_addrs: IP addresses assigned over the tunnel -+ * @vpn_addrs.ipv4: IPv4 assigned to peer on the tunnel -+ * @vpn_addrs.ipv6: IPv6 assigned to peer on the tunnel -+ * @hash_entry_id: entry in the peer ID hashtable -+ * @hash_entry_addr4: entry in the peer IPv4 hashtable -+ * @hash_entry_addr6: entry in the peer IPv6 hashtable -+ * @hash_entry_transp_addr: entry in the peer transport address hashtable -+ * @sock: the socket being used to talk to this peer -+ * @tcp: keeps track of TCP specific state -+ * @tcp.strp: stream parser context (TCP only) -+ * @tcp.tx_work: work for deferring outgoing packet processing (TCP only) -+ * @tcp.user_queue: received packets that have to go to userspace (TCP only) -+ * @tcp.tx_in_progress: true if TX is already ongoing (TCP only) -+ * @tcp.out_msg.skb: packet scheduled for sending (TCP only) -+ * @tcp.out_msg.offset: offset where next send should start (TCP only) -+ * @tcp.out_msg.len: remaining data to send within packet (TCP only) -+ * @tcp.sk_cb.sk_data_ready: pointer to original cb (TCP only) -+ * @tcp.sk_cb.sk_write_space: pointer to original cb (TCP only) -+ * @tcp.sk_cb.prot: pointer to original prot object (TCP only) -+ * @tcp.sk_cb.ops: pointer to the original prot_ops object (TCP only) -+ * @crypto: the crypto configuration (ciphers, keys, etc..) -+ * @dst_cache: cache for dst_entry used to send to peer -+ * @bind: remote peer binding -+ * @keepalive_interval: seconds after which a new keepalive should be sent -+ * @keepalive_xmit_exp: future timestamp when next keepalive should be sent -+ * @last_sent: timestamp of the last successfully sent packet -+ * @keepalive_timeout: seconds after which an inactive peer is considered dead -+ * @keepalive_recv_exp: future timestamp when the peer should expire -+ * @last_recv: timestamp of the last authenticated received packet -+ * @halt: true if ovpn_peer_mark_delete was called -+ * @vpn_stats: per-peer in-VPN TX/RX stays -+ * @link_stats: per-peer link/transport TX/RX stats -+ * @delete_reason: why peer was deleted (i.e. timeout, transport error, ..) -+ * @lock: protects binding to peer (bind) -+ * @refcount: reference counter -+ * @rcu: used to free peer in an RCU safe way -+ * @delete_work: deferred cleanup work, used to notify userspace -+ */ -+struct ovpn_peer { -+ struct ovpn_struct *ovpn; -+ u32 id; -+ struct { -+ struct in_addr ipv4; -+ struct in6_addr ipv6; -+ } vpn_addrs; -+ struct hlist_node hash_entry_id; -+ struct hlist_nulls_node hash_entry_addr4; -+ struct hlist_nulls_node hash_entry_addr6; -+ struct hlist_nulls_node hash_entry_transp_addr; -+ struct ovpn_socket *sock; -+ -+ /* state of the TCP reading. Needed to keep track of how much of a -+ * single packet has already been read from the stream and how much is -+ * missing -+ */ -+ struct { -+ struct strparser strp; -+ struct work_struct tx_work; -+ struct sk_buff_head user_queue; -+ bool tx_in_progress; -+ -+ struct { -+ struct sk_buff *skb; -+ int offset; -+ int len; -+ } out_msg; -+ -+ struct { -+ void (*sk_data_ready)(struct sock *sk); -+ void (*sk_write_space)(struct sock *sk); -+ struct proto *prot; -+ const struct proto_ops *ops; -+ } sk_cb; -+ } tcp; -+ struct ovpn_crypto_state crypto; -+ struct dst_cache dst_cache; -+ struct ovpn_bind __rcu *bind; -+ unsigned long keepalive_interval; -+ unsigned long keepalive_xmit_exp; -+ time64_t last_sent; -+ unsigned long keepalive_timeout; -+ unsigned long keepalive_recv_exp; -+ time64_t last_recv; -+ bool halt; -+ struct ovpn_peer_stats vpn_stats; -+ struct ovpn_peer_stats link_stats; -+ enum ovpn_del_peer_reason delete_reason; -+ spinlock_t lock; /* protects bind */ -+ struct kref refcount; -+ struct rcu_head rcu; -+ struct work_struct delete_work; -+}; -+ -+/** -+ * ovpn_peer_hold - increase reference counter -+ * @peer: the peer whose counter should be increased -+ * -+ * Return: true if the counter was increased or false if it was zero already -+ */ -+static inline bool ovpn_peer_hold(struct ovpn_peer *peer) -+{ -+ return kref_get_unless_zero(&peer->refcount); -+} -+ -+void ovpn_peer_release(struct ovpn_peer *peer); -+void ovpn_peer_release_kref(struct kref *kref); -+ -+/** -+ * ovpn_peer_put - decrease reference counter -+ * @peer: the peer whose counter should be decreased -+ */ -+static inline void ovpn_peer_put(struct ovpn_peer *peer) -+{ -+ kref_put(&peer->refcount, ovpn_peer_release_kref); -+} -+ -+struct ovpn_peer *ovpn_peer_new(struct ovpn_struct *ovpn, u32 id); -+int ovpn_peer_add(struct ovpn_struct *ovpn, struct ovpn_peer *peer); -+int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason); -+void ovpn_peer_release_p2p(struct ovpn_struct *ovpn); -+void ovpn_peers_free(struct ovpn_struct *ovpn); -+ -+struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_struct *ovpn, -+ struct sk_buff *skb); -+struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_struct *ovpn, u32 peer_id); -+struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_struct *ovpn, -+ struct sk_buff *skb); -+void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer); -+bool ovpn_peer_check_by_src(struct ovpn_struct *ovpn, struct sk_buff *skb, -+ struct ovpn_peer *peer); -+ -+void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout); -+void ovpn_peer_keepalive_work(struct work_struct *work); -+ -+void ovpn_peer_update_local_endpoint(struct ovpn_peer *peer, -+ struct sk_buff *skb); -+ -+void ovpn_peer_float(struct ovpn_peer *peer, struct sk_buff *skb); -+int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer, -+ const struct sockaddr_storage *ss, -+ const u8 *local_ip); -+ -+#endif /* _NET_OVPN_OVPNPEER_H_ */ -diff --git a/drivers/net/ovpn/pktid.c b/drivers/net/ovpn/pktid.c -new file mode 100644 -index 000000000000..96dc87635670 ---- /dev/null -+++ b/drivers/net/ovpn/pktid.c -@@ -0,0 +1,130 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ * James Yonan -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "ovpnstruct.h" -+#include "main.h" -+#include "packet.h" -+#include "pktid.h" -+ -+void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid) -+{ -+ atomic64_set(&pid->seq_num, 1); -+} -+ -+void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr) -+{ -+ memset(pr, 0, sizeof(*pr)); -+ spin_lock_init(&pr->lock); -+} -+ -+/* Packet replay detection. -+ * Allows ID backtrack of up to REPLAY_WINDOW_SIZE - 1. -+ */ -+int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time) -+{ -+ const unsigned long now = jiffies; -+ int ret; -+ -+ /* ID must not be zero */ -+ if (unlikely(pkt_id == 0)) -+ return -EINVAL; -+ -+ spin_lock_bh(&pr->lock); -+ -+ /* expire backtracks at or below pr->id after PKTID_RECV_EXPIRE time */ -+ if (unlikely(time_after_eq(now, pr->expire))) -+ pr->id_floor = pr->id; -+ -+ /* time changed? */ -+ if (unlikely(pkt_time != pr->time)) { -+ if (pkt_time > pr->time) { -+ /* time moved forward, accept */ -+ pr->base = 0; -+ pr->extent = 0; -+ pr->id = 0; -+ pr->time = pkt_time; -+ pr->id_floor = 0; -+ } else { -+ /* time moved backward, reject */ -+ ret = -ETIME; -+ goto out; -+ } -+ } -+ -+ if (likely(pkt_id == pr->id + 1)) { -+ /* well-formed ID sequence (incremented by 1) */ -+ pr->base = REPLAY_INDEX(pr->base, -1); -+ pr->history[pr->base / 8] |= (1 << (pr->base % 8)); -+ if (pr->extent < REPLAY_WINDOW_SIZE) -+ ++pr->extent; -+ pr->id = pkt_id; -+ } else if (pkt_id > pr->id) { -+ /* ID jumped forward by more than one */ -+ const unsigned int delta = pkt_id - pr->id; -+ -+ if (delta < REPLAY_WINDOW_SIZE) { -+ unsigned int i; -+ -+ pr->base = REPLAY_INDEX(pr->base, -delta); -+ pr->history[pr->base / 8] |= (1 << (pr->base % 8)); -+ pr->extent += delta; -+ if (pr->extent > REPLAY_WINDOW_SIZE) -+ pr->extent = REPLAY_WINDOW_SIZE; -+ for (i = 1; i < delta; ++i) { -+ unsigned int newb = REPLAY_INDEX(pr->base, i); -+ -+ pr->history[newb / 8] &= ~BIT(newb % 8); -+ } -+ } else { -+ pr->base = 0; -+ pr->extent = REPLAY_WINDOW_SIZE; -+ memset(pr->history, 0, sizeof(pr->history)); -+ pr->history[0] = 1; -+ } -+ pr->id = pkt_id; -+ } else { -+ /* ID backtrack */ -+ const unsigned int delta = pr->id - pkt_id; -+ -+ if (delta > pr->max_backtrack) -+ pr->max_backtrack = delta; -+ if (delta < pr->extent) { -+ if (pkt_id > pr->id_floor) { -+ const unsigned int ri = REPLAY_INDEX(pr->base, -+ delta); -+ u8 *p = &pr->history[ri / 8]; -+ const u8 mask = (1 << (ri % 8)); -+ -+ if (*p & mask) { -+ ret = -EINVAL; -+ goto out; -+ } -+ *p |= mask; -+ } else { -+ ret = -EINVAL; -+ goto out; -+ } -+ } else { -+ ret = -EINVAL; -+ goto out; -+ } -+ } -+ -+ pr->expire = now + PKTID_RECV_EXPIRE; -+ ret = 0; -+out: -+ spin_unlock_bh(&pr->lock); -+ return ret; -+} -diff --git a/drivers/net/ovpn/pktid.h b/drivers/net/ovpn/pktid.h -new file mode 100644 -index 000000000000..fe02f0667e1a ---- /dev/null -+++ b/drivers/net/ovpn/pktid.h -@@ -0,0 +1,87 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ * James Yonan -+ */ -+ -+#ifndef _NET_OVPN_OVPNPKTID_H_ -+#define _NET_OVPN_OVPNPKTID_H_ -+ -+#include "packet.h" -+ -+/* If no packets received for this length of time, set a backtrack floor -+ * at highest received packet ID thus far. -+ */ -+#define PKTID_RECV_EXPIRE (30 * HZ) -+ -+/* Packet-ID state for transmitter */ -+struct ovpn_pktid_xmit { -+ atomic64_t seq_num; -+}; -+ -+/* replay window sizing in bytes = 2^REPLAY_WINDOW_ORDER */ -+#define REPLAY_WINDOW_ORDER 8 -+ -+#define REPLAY_WINDOW_BYTES BIT(REPLAY_WINDOW_ORDER) -+#define REPLAY_WINDOW_SIZE (REPLAY_WINDOW_BYTES * 8) -+#define REPLAY_INDEX(base, i) (((base) + (i)) & (REPLAY_WINDOW_SIZE - 1)) -+ -+/* Packet-ID state for receiver. -+ * Other than lock member, can be zeroed to initialize. -+ */ -+struct ovpn_pktid_recv { -+ /* "sliding window" bitmask of recent packet IDs received */ -+ u8 history[REPLAY_WINDOW_BYTES]; -+ /* bit position of deque base in history */ -+ unsigned int base; -+ /* extent (in bits) of deque in history */ -+ unsigned int extent; -+ /* expiration of history in jiffies */ -+ unsigned long expire; -+ /* highest sequence number received */ -+ u32 id; -+ /* highest time stamp received */ -+ u32 time; -+ /* we will only accept backtrack IDs > id_floor */ -+ u32 id_floor; -+ unsigned int max_backtrack; -+ /* protects entire pktd ID state */ -+ spinlock_t lock; -+}; -+ -+/* Get the next packet ID for xmit */ -+static inline int ovpn_pktid_xmit_next(struct ovpn_pktid_xmit *pid, u32 *pktid) -+{ -+ const s64 seq_num = atomic64_fetch_add_unless(&pid->seq_num, 1, -+ 0x100000000LL); -+ /* when the 32bit space is over, we return an error because the packet -+ * ID is used to create the cipher IV and we do not want to reuse the -+ * same value more than once -+ */ -+ if (unlikely(seq_num == 0x100000000LL)) -+ return -ERANGE; -+ -+ *pktid = (u32)seq_num; -+ -+ return 0; -+} -+ -+/* Write 12-byte AEAD IV to dest */ -+static inline void ovpn_pktid_aead_write(const u32 pktid, -+ const struct ovpn_nonce_tail *nt, -+ unsigned char *dest) -+{ -+ *(__force __be32 *)(dest) = htonl(pktid); -+ BUILD_BUG_ON(4 + sizeof(struct ovpn_nonce_tail) != NONCE_SIZE); -+ memcpy(dest + 4, nt->u8, sizeof(struct ovpn_nonce_tail)); -+} -+ -+void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid); -+void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr); -+ -+int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time); -+ -+#endif /* _NET_OVPN_OVPNPKTID_H_ */ -diff --git a/drivers/net/ovpn/proto.h b/drivers/net/ovpn/proto.h -new file mode 100644 -index 000000000000..0de8bafadc89 ---- /dev/null -+++ b/drivers/net/ovpn/proto.h -@@ -0,0 +1,104 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ * James Yonan -+ */ -+ -+#ifndef _NET_OVPN_OVPNPROTO_H_ -+#define _NET_OVPN_OVPNPROTO_H_ -+ -+#include "main.h" -+ -+#include -+ -+/* Methods for operating on the initial command -+ * byte of the OpenVPN protocol. -+ */ -+ -+/* packet opcode (high 5 bits) and key-id (low 3 bits) are combined in -+ * one byte -+ */ -+#define OVPN_KEY_ID_MASK 0x07 -+#define OVPN_OPCODE_SHIFT 3 -+#define OVPN_OPCODE_MASK 0x1F -+/* upper bounds on opcode and key ID */ -+#define OVPN_KEY_ID_MAX (OVPN_KEY_ID_MASK + 1) -+#define OVPN_OPCODE_MAX (OVPN_OPCODE_MASK + 1) -+/* packet opcodes of interest to us */ -+#define OVPN_DATA_V1 6 /* data channel V1 packet */ -+#define OVPN_DATA_V2 9 /* data channel V2 packet */ -+/* size of initial packet opcode */ -+#define OVPN_OP_SIZE_V1 1 -+#define OVPN_OP_SIZE_V2 4 -+#define OVPN_PEER_ID_MASK 0x00FFFFFF -+#define OVPN_PEER_ID_UNDEF 0x00FFFFFF -+/* first byte of exit message */ -+#define OVPN_EXPLICIT_EXIT_NOTIFY_FIRST_BYTE 0x28 -+ -+/** -+ * ovpn_opcode_from_skb - extract OP code from skb at specified offset -+ * @skb: the packet to extract the OP code from -+ * @offset: the offset in the data buffer where the OP code is located -+ * -+ * Note: this function assumes that the skb head was pulled enough -+ * to access the first byte. -+ * -+ * Return: the OP code -+ */ -+static inline u8 ovpn_opcode_from_skb(const struct sk_buff *skb, u16 offset) -+{ -+ u8 byte = *(skb->data + offset); -+ -+ return byte >> OVPN_OPCODE_SHIFT; -+} -+ -+/** -+ * ovpn_peer_id_from_skb - extract peer ID from skb at specified offset -+ * @skb: the packet to extract the OP code from -+ * @offset: the offset in the data buffer where the OP code is located -+ * -+ * Note: this function assumes that the skb head was pulled enough -+ * to access the first 4 bytes. -+ * -+ * Return: the peer ID. -+ */ -+static inline u32 ovpn_peer_id_from_skb(const struct sk_buff *skb, u16 offset) -+{ -+ return ntohl(*(__be32 *)(skb->data + offset)) & OVPN_PEER_ID_MASK; -+} -+ -+/** -+ * ovpn_key_id_from_skb - extract key ID from the skb head -+ * @skb: the packet to extract the key ID code from -+ * -+ * Note: this function assumes that the skb head was pulled enough -+ * to access the first byte. -+ * -+ * Return: the key ID -+ */ -+static inline u8 ovpn_key_id_from_skb(const struct sk_buff *skb) -+{ -+ return *skb->data & OVPN_KEY_ID_MASK; -+} -+ -+/** -+ * ovpn_opcode_compose - combine OP code, key ID and peer ID to wire format -+ * @opcode: the OP code -+ * @key_id: the key ID -+ * @peer_id: the peer ID -+ * -+ * Return: a 4 bytes integer obtained combining all input values following the -+ * OpenVPN wire format. This integer can then be written to the packet header. -+ */ -+static inline u32 ovpn_opcode_compose(u8 opcode, u8 key_id, u32 peer_id) -+{ -+ const u8 op = (opcode << OVPN_OPCODE_SHIFT) | -+ (key_id & OVPN_KEY_ID_MASK); -+ -+ return (op << 24) | (peer_id & OVPN_PEER_ID_MASK); -+} -+ -+#endif /* _NET_OVPN_OVPNPROTO_H_ */ -diff --git a/drivers/net/ovpn/skb.h b/drivers/net/ovpn/skb.h -new file mode 100644 -index 000000000000..96afa01466ab ---- /dev/null -+++ b/drivers/net/ovpn/skb.h -@@ -0,0 +1,56 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ * James Yonan -+ */ -+ -+#ifndef _NET_OVPN_SKB_H_ -+#define _NET_OVPN_SKB_H_ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct ovpn_cb { -+ struct ovpn_peer *peer; -+ struct ovpn_crypto_key_slot *ks; -+ struct aead_request *req; -+ struct scatterlist *sg; -+ unsigned int orig_len; -+ unsigned int payload_offset; -+}; -+ -+static inline struct ovpn_cb *ovpn_skb_cb(struct sk_buff *skb) -+{ -+ BUILD_BUG_ON(sizeof(struct ovpn_cb) > sizeof(skb->cb)); -+ return (struct ovpn_cb *)skb->cb; -+} -+ -+/* Return IP protocol version from skb header. -+ * Return 0 if protocol is not IPv4/IPv6 or cannot be read. -+ */ -+static inline __be16 ovpn_ip_check_protocol(struct sk_buff *skb) -+{ -+ __be16 proto = 0; -+ -+ /* skb could be non-linear, -+ * make sure IP header is in non-fragmented part -+ */ -+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) -+ return 0; -+ -+ if (ip_hdr(skb)->version == 4) -+ proto = htons(ETH_P_IP); -+ else if (ip_hdr(skb)->version == 6) -+ proto = htons(ETH_P_IPV6); -+ -+ return proto; -+} -+ -+#endif /* _NET_OVPN_SKB_H_ */ -diff --git a/drivers/net/ovpn/socket.c b/drivers/net/ovpn/socket.c -new file mode 100644 -index 000000000000..a0c2a02ff205 ---- /dev/null -+++ b/drivers/net/ovpn/socket.c -@@ -0,0 +1,178 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#include -+#include -+ -+#include "ovpnstruct.h" -+#include "main.h" -+#include "io.h" -+#include "peer.h" -+#include "socket.h" -+#include "tcp.h" -+#include "udp.h" -+ -+static void ovpn_socket_detach(struct socket *sock) -+{ -+ if (!sock) -+ return; -+ -+ if (sock->sk->sk_protocol == IPPROTO_UDP) -+ ovpn_udp_socket_detach(sock); -+ else if (sock->sk->sk_protocol == IPPROTO_TCP) -+ ovpn_tcp_socket_detach(sock); -+ -+ sockfd_put(sock); -+} -+ -+static void ovpn_socket_release_work(struct work_struct *work) -+{ -+ struct ovpn_socket *sock = container_of(work, struct ovpn_socket, work); -+ -+ ovpn_socket_detach(sock->sock); -+ kfree_rcu(sock, rcu); -+} -+ -+static void ovpn_socket_schedule_release(struct ovpn_socket *sock) -+{ -+ INIT_WORK(&sock->work, ovpn_socket_release_work); -+ schedule_work(&sock->work); -+} -+ -+/** -+ * ovpn_socket_release_kref - kref_put callback -+ * @kref: the kref object -+ */ -+void ovpn_socket_release_kref(struct kref *kref) -+{ -+ struct ovpn_socket *sock = container_of(kref, struct ovpn_socket, -+ refcount); -+ -+ ovpn_socket_schedule_release(sock); -+} -+ -+static bool ovpn_socket_hold(struct ovpn_socket *sock) -+{ -+ return kref_get_unless_zero(&sock->refcount); -+} -+ -+static struct ovpn_socket *ovpn_socket_get(struct socket *sock) -+{ -+ struct ovpn_socket *ovpn_sock; -+ -+ rcu_read_lock(); -+ ovpn_sock = rcu_dereference_sk_user_data(sock->sk); -+ if (!ovpn_socket_hold(ovpn_sock)) { -+ pr_warn("%s: found ovpn_socket with ref = 0\n", __func__); -+ ovpn_sock = NULL; -+ } -+ rcu_read_unlock(); -+ -+ return ovpn_sock; -+} -+ -+static int ovpn_socket_attach(struct socket *sock, struct ovpn_peer *peer) -+{ -+ int ret = -EOPNOTSUPP; -+ -+ if (!sock || !peer) -+ return -EINVAL; -+ -+ if (sock->sk->sk_protocol == IPPROTO_UDP) -+ ret = ovpn_udp_socket_attach(sock, peer->ovpn); -+ else if (sock->sk->sk_protocol == IPPROTO_TCP) -+ ret = ovpn_tcp_socket_attach(sock, peer); -+ -+ return ret; -+} -+ -+/* Retrieve the corresponding ovpn object from a UDP socket -+ * rcu_read_lock must be held on entry -+ */ -+struct ovpn_struct *ovpn_from_udp_sock(struct sock *sk) -+{ -+ struct ovpn_socket *ovpn_sock; -+ -+ if (unlikely(READ_ONCE(udp_sk(sk)->encap_type) != UDP_ENCAP_OVPNINUDP)) -+ return NULL; -+ -+ ovpn_sock = rcu_dereference_sk_user_data(sk); -+ if (unlikely(!ovpn_sock)) -+ return NULL; -+ -+ /* make sure that sk matches our stored transport socket */ -+ if (unlikely(!ovpn_sock->sock || sk != ovpn_sock->sock->sk)) -+ return NULL; -+ -+ return ovpn_sock->ovpn; -+} -+ -+/** -+ * ovpn_socket_new - create a new socket and initialize it -+ * @sock: the kernel socket to embed -+ * @peer: the peer reachable via this socket -+ * -+ * Return: an openvpn socket on success or a negative error code otherwise -+ */ -+struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer) -+{ -+ struct ovpn_socket *ovpn_sock; -+ int ret; -+ -+ ret = ovpn_socket_attach(sock, peer); -+ if (ret < 0 && ret != -EALREADY) -+ return ERR_PTR(ret); -+ -+ /* if this socket is already owned by this interface, just increase the -+ * refcounter and use it as expected. -+ * -+ * Since UDP sockets can be used to talk to multiple remote endpoints, -+ * openvpn normally instantiates only one socket and shares it among all -+ * its peers. For this reason, when we find out that a socket is already -+ * used for some other peer in *this* instance, we can happily increase -+ * its refcounter and use it normally. -+ */ -+ if (ret == -EALREADY) { -+ /* caller is expected to increase the sock refcounter before -+ * passing it to this function. For this reason we drop it if -+ * not needed, like when this socket is already owned. -+ */ -+ ovpn_sock = ovpn_socket_get(sock); -+ sockfd_put(sock); -+ return ovpn_sock; -+ } -+ -+ ovpn_sock = kzalloc(sizeof(*ovpn_sock), GFP_KERNEL); -+ if (!ovpn_sock) { -+ ret = -ENOMEM; -+ goto err; -+ } -+ -+ ovpn_sock->sock = sock; -+ kref_init(&ovpn_sock->refcount); -+ -+ /* TCP sockets are per-peer, therefore they are linked to their unique -+ * peer -+ */ -+ if (sock->sk->sk_protocol == IPPROTO_TCP) { -+ ovpn_sock->peer = peer; -+ } else { -+ /* in UDP we only link the ovpn instance since the socket is -+ * shared among multiple peers -+ */ -+ ovpn_sock->ovpn = peer->ovpn; -+ } -+ -+ rcu_assign_sk_user_data(sock->sk, ovpn_sock); -+ -+ return ovpn_sock; -+err: -+ ovpn_socket_detach(sock); -+ return ERR_PTR(ret); -+} -diff --git a/drivers/net/ovpn/socket.h b/drivers/net/ovpn/socket.h -new file mode 100644 -index 000000000000..bc22fff453ad ---- /dev/null -+++ b/drivers/net/ovpn/socket.h -@@ -0,0 +1,55 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#ifndef _NET_OVPN_SOCK_H_ -+#define _NET_OVPN_SOCK_H_ -+ -+#include -+#include -+#include -+ -+struct ovpn_struct; -+struct ovpn_peer; -+ -+/** -+ * struct ovpn_socket - a kernel socket referenced in the ovpn code -+ * @ovpn: ovpn instance owning this socket (UDP only) -+ * @peer: unique peer transmitting over this socket (TCP only) -+ * @sock: the low level sock object -+ * @refcount: amount of contexts currently referencing this object -+ * @work: member used to schedule release routine (it may block) -+ * @rcu: member used to schedule RCU destructor callback -+ */ -+struct ovpn_socket { -+ union { -+ struct ovpn_struct *ovpn; -+ struct ovpn_peer *peer; -+ }; -+ -+ struct socket *sock; -+ struct kref refcount; -+ struct work_struct work; -+ struct rcu_head rcu; -+}; -+ -+void ovpn_socket_release_kref(struct kref *kref); -+ -+/** -+ * ovpn_socket_put - decrease reference counter -+ * @sock: the socket whose reference counter should be decreased -+ */ -+static inline void ovpn_socket_put(struct ovpn_socket *sock) -+{ -+ kref_put(&sock->refcount, ovpn_socket_release_kref); -+} -+ -+struct ovpn_socket *ovpn_socket_new(struct socket *sock, -+ struct ovpn_peer *peer); -+ -+#endif /* _NET_OVPN_SOCK_H_ */ -diff --git a/drivers/net/ovpn/stats.c b/drivers/net/ovpn/stats.c -new file mode 100644 -index 000000000000..a383842c3449 ---- /dev/null -+++ b/drivers/net/ovpn/stats.c -@@ -0,0 +1,21 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ */ -+ -+#include -+ -+#include "stats.h" -+ -+void ovpn_peer_stats_init(struct ovpn_peer_stats *ps) -+{ -+ atomic64_set(&ps->rx.bytes, 0); -+ atomic64_set(&ps->rx.packets, 0); -+ -+ atomic64_set(&ps->tx.bytes, 0); -+ atomic64_set(&ps->tx.packets, 0); -+} -diff --git a/drivers/net/ovpn/stats.h b/drivers/net/ovpn/stats.h -new file mode 100644 -index 000000000000..868f49d25eaa ---- /dev/null -+++ b/drivers/net/ovpn/stats.h -@@ -0,0 +1,47 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: James Yonan -+ * Antonio Quartulli -+ * Lev Stipakov -+ */ -+ -+#ifndef _NET_OVPN_OVPNSTATS_H_ -+#define _NET_OVPN_OVPNSTATS_H_ -+ -+/* one stat */ -+struct ovpn_peer_stat { -+ atomic64_t bytes; -+ atomic64_t packets; -+}; -+ -+/* rx and tx stats combined */ -+struct ovpn_peer_stats { -+ struct ovpn_peer_stat rx; -+ struct ovpn_peer_stat tx; -+}; -+ -+void ovpn_peer_stats_init(struct ovpn_peer_stats *ps); -+ -+static inline void ovpn_peer_stats_increment(struct ovpn_peer_stat *stat, -+ const unsigned int n) -+{ -+ atomic64_add(n, &stat->bytes); -+ atomic64_inc(&stat->packets); -+} -+ -+static inline void ovpn_peer_stats_increment_rx(struct ovpn_peer_stats *stats, -+ const unsigned int n) -+{ -+ ovpn_peer_stats_increment(&stats->rx, n); -+} -+ -+static inline void ovpn_peer_stats_increment_tx(struct ovpn_peer_stats *stats, -+ const unsigned int n) -+{ -+ ovpn_peer_stats_increment(&stats->tx, n); -+} -+ -+#endif /* _NET_OVPN_OVPNSTATS_H_ */ -diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c -new file mode 100644 -index 000000000000..d6f377a116ef ---- /dev/null -+++ b/drivers/net/ovpn/tcp.c -@@ -0,0 +1,506 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2019-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ovpnstruct.h" -+#include "main.h" -+#include "io.h" -+#include "packet.h" -+#include "peer.h" -+#include "proto.h" -+#include "skb.h" -+#include "tcp.h" -+ -+static struct proto ovpn_tcp_prot __ro_after_init; -+static struct proto_ops ovpn_tcp_ops __ro_after_init; -+static struct proto ovpn_tcp6_prot; -+static struct proto_ops ovpn_tcp6_ops; -+static DEFINE_MUTEX(tcp6_prot_mutex); -+ -+static int ovpn_tcp_parse(struct strparser *strp, struct sk_buff *skb) -+{ -+ struct strp_msg *rxm = strp_msg(skb); -+ __be16 blen; -+ u16 len; -+ int err; -+ -+ /* when packets are written to the TCP stream, they are prepended with -+ * two bytes indicating the actual packet size. -+ * Here we read those two bytes and move the skb data pointer to the -+ * beginning of the packet -+ */ -+ -+ if (skb->len < rxm->offset + 2) -+ return 0; -+ -+ err = skb_copy_bits(skb, rxm->offset, &blen, sizeof(blen)); -+ if (err < 0) -+ return err; -+ -+ len = be16_to_cpu(blen); -+ if (len < 2) -+ return -EINVAL; -+ -+ return len + 2; -+} -+ -+/* queue skb for sending to userspace via recvmsg on the socket */ -+static void ovpn_tcp_to_userspace(struct ovpn_peer *peer, struct sock *sk, -+ struct sk_buff *skb) -+{ -+ skb_set_owner_r(skb, sk); -+ memset(skb->cb, 0, sizeof(skb->cb)); -+ skb_queue_tail(&peer->tcp.user_queue, skb); -+ peer->tcp.sk_cb.sk_data_ready(sk); -+} -+ -+static void ovpn_tcp_rcv(struct strparser *strp, struct sk_buff *skb) -+{ -+ struct ovpn_peer *peer = container_of(strp, struct ovpn_peer, tcp.strp); -+ struct strp_msg *msg = strp_msg(skb); -+ size_t pkt_len = msg->full_len - 2; -+ size_t off = msg->offset + 2; -+ -+ /* ensure skb->data points to the beginning of the openvpn packet */ -+ if (!pskb_pull(skb, off)) { -+ net_warn_ratelimited("%s: packet too small\n", -+ peer->ovpn->dev->name); -+ goto err; -+ } -+ -+ /* strparser does not trim the skb for us, therefore we do it now */ -+ if (pskb_trim(skb, pkt_len) != 0) { -+ net_warn_ratelimited("%s: trimming skb failed\n", -+ peer->ovpn->dev->name); -+ goto err; -+ } -+ -+ /* we need the first byte of data to be accessible -+ * to extract the opcode and the key ID later on -+ */ -+ if (!pskb_may_pull(skb, 1)) { -+ net_warn_ratelimited("%s: packet too small to fetch opcode\n", -+ peer->ovpn->dev->name); -+ goto err; -+ } -+ -+ /* DATA_V2 packets are handled in kernel, the rest goes to user space */ -+ if (likely(ovpn_opcode_from_skb(skb, 0) == OVPN_DATA_V2)) { -+ /* hold reference to peer as required by ovpn_recv(). -+ * -+ * NOTE: in this context we should already be holding a -+ * reference to this peer, therefore ovpn_peer_hold() is -+ * not expected to fail -+ */ -+ if (WARN_ON(!ovpn_peer_hold(peer))) -+ goto err; -+ -+ ovpn_recv(peer, skb); -+ } else { -+ /* The packet size header must be there when sending the packet -+ * to userspace, therefore we put it back -+ */ -+ skb_push(skb, 2); -+ ovpn_tcp_to_userspace(peer, strp->sk, skb); -+ } -+ -+ return; -+err: -+ netdev_err(peer->ovpn->dev, -+ "cannot process incoming TCP data for peer %u\n", peer->id); -+ dev_core_stats_rx_dropped_inc(peer->ovpn->dev); -+ kfree_skb(skb); -+ ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR); -+} -+ -+static int ovpn_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, -+ int flags, int *addr_len) -+{ -+ int err = 0, off, copied = 0, ret; -+ struct ovpn_socket *sock; -+ struct ovpn_peer *peer; -+ struct sk_buff *skb; -+ -+ rcu_read_lock(); -+ sock = rcu_dereference_sk_user_data(sk); -+ if (!sock || !sock->peer) { -+ rcu_read_unlock(); -+ return -EBADF; -+ } -+ /* we take a reference to the peer linked to this TCP socket, because -+ * in turn the peer holds a reference to the socket itself. -+ * By doing so we also ensure that the peer stays alive along with -+ * the socket while executing this function -+ */ -+ ovpn_peer_hold(sock->peer); -+ peer = sock->peer; -+ rcu_read_unlock(); -+ -+ skb = __skb_recv_datagram(sk, &peer->tcp.user_queue, flags, &off, &err); -+ if (!skb) { -+ if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN) { -+ ret = 0; -+ goto out; -+ } -+ ret = err; -+ goto out; -+ } -+ -+ copied = len; -+ if (copied > skb->len) -+ copied = skb->len; -+ else if (copied < skb->len) -+ msg->msg_flags |= MSG_TRUNC; -+ -+ err = skb_copy_datagram_msg(skb, 0, msg, copied); -+ if (unlikely(err)) { -+ kfree_skb(skb); -+ ret = err; -+ goto out; -+ } -+ -+ if (flags & MSG_TRUNC) -+ copied = skb->len; -+ kfree_skb(skb); -+ ret = copied; -+out: -+ ovpn_peer_put(peer); -+ return ret; -+} -+ -+void ovpn_tcp_socket_detach(struct socket *sock) -+{ -+ struct ovpn_socket *ovpn_sock; -+ struct ovpn_peer *peer; -+ -+ if (!sock) -+ return; -+ -+ rcu_read_lock(); -+ ovpn_sock = rcu_dereference_sk_user_data(sock->sk); -+ -+ if (!ovpn_sock->peer) { -+ rcu_read_unlock(); -+ return; -+ } -+ -+ peer = ovpn_sock->peer; -+ strp_stop(&peer->tcp.strp); -+ -+ skb_queue_purge(&peer->tcp.user_queue); -+ -+ /* restore CBs that were saved in ovpn_sock_set_tcp_cb() */ -+ sock->sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready; -+ sock->sk->sk_write_space = peer->tcp.sk_cb.sk_write_space; -+ sock->sk->sk_prot = peer->tcp.sk_cb.prot; -+ sock->sk->sk_socket->ops = peer->tcp.sk_cb.ops; -+ rcu_assign_sk_user_data(sock->sk, NULL); -+ -+ rcu_read_unlock(); -+ -+ /* cancel any ongoing work. Done after removing the CBs so that these -+ * workers cannot be re-armed -+ */ -+ cancel_work_sync(&peer->tcp.tx_work); -+ strp_done(&peer->tcp.strp); -+} -+ -+static void ovpn_tcp_send_sock(struct ovpn_peer *peer) -+{ -+ struct sk_buff *skb = peer->tcp.out_msg.skb; -+ -+ if (!skb) -+ return; -+ -+ if (peer->tcp.tx_in_progress) -+ return; -+ -+ peer->tcp.tx_in_progress = true; -+ -+ do { -+ int ret = skb_send_sock_locked(peer->sock->sock->sk, skb, -+ peer->tcp.out_msg.offset, -+ peer->tcp.out_msg.len); -+ if (unlikely(ret < 0)) { -+ if (ret == -EAGAIN) -+ goto out; -+ -+ net_warn_ratelimited("%s: TCP error to peer %u: %d\n", -+ peer->ovpn->dev->name, peer->id, -+ ret); -+ -+ /* in case of TCP error we can't recover the VPN -+ * stream therefore we abort the connection -+ */ -+ ovpn_peer_del(peer, -+ OVPN_DEL_PEER_REASON_TRANSPORT_ERROR); -+ break; -+ } -+ -+ peer->tcp.out_msg.len -= ret; -+ peer->tcp.out_msg.offset += ret; -+ } while (peer->tcp.out_msg.len > 0); -+ -+ if (!peer->tcp.out_msg.len) -+ dev_sw_netstats_tx_add(peer->ovpn->dev, 1, skb->len); -+ -+ kfree_skb(peer->tcp.out_msg.skb); -+ peer->tcp.out_msg.skb = NULL; -+ peer->tcp.out_msg.len = 0; -+ peer->tcp.out_msg.offset = 0; -+ -+out: -+ peer->tcp.tx_in_progress = false; -+} -+ -+static void ovpn_tcp_tx_work(struct work_struct *work) -+{ -+ struct ovpn_peer *peer; -+ -+ peer = container_of(work, struct ovpn_peer, tcp.tx_work); -+ -+ lock_sock(peer->sock->sock->sk); -+ ovpn_tcp_send_sock(peer); -+ release_sock(peer->sock->sock->sk); -+} -+ -+void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sk_buff *skb) -+{ -+ if (peer->tcp.out_msg.skb) -+ return; -+ -+ peer->tcp.out_msg.skb = skb; -+ peer->tcp.out_msg.len = skb->len; -+ peer->tcp.out_msg.offset = 0; -+ -+ ovpn_tcp_send_sock(peer); -+} -+ -+static int ovpn_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) -+{ -+ struct ovpn_socket *sock; -+ int ret, linear = PAGE_SIZE; -+ struct ovpn_peer *peer; -+ struct sk_buff *skb; -+ -+ rcu_read_lock(); -+ sock = rcu_dereference_sk_user_data(sk); -+ peer = sock->peer; -+ if (unlikely(!ovpn_peer_hold(peer))) { -+ rcu_read_unlock(); -+ return -EIO; -+ } -+ rcu_read_unlock(); -+ -+ if (msg->msg_flags & ~MSG_DONTWAIT) { -+ ret = -EOPNOTSUPP; -+ goto peer_free; -+ } -+ -+ lock_sock(sk); -+ -+ if (peer->tcp.out_msg.skb) { -+ ret = -EAGAIN; -+ goto unlock; -+ } -+ -+ if (size < linear) -+ linear = size; -+ -+ skb = sock_alloc_send_pskb(sk, linear, size - linear, -+ msg->msg_flags & MSG_DONTWAIT, &ret, 0); -+ if (!skb) { -+ net_err_ratelimited("%s: skb alloc failed: %d\n", -+ sock->peer->ovpn->dev->name, ret); -+ goto unlock; -+ } -+ -+ skb_put(skb, linear); -+ skb->len = size; -+ skb->data_len = size - linear; -+ -+ ret = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); -+ if (ret) { -+ kfree_skb(skb); -+ net_err_ratelimited("%s: skb copy from iter failed: %d\n", -+ sock->peer->ovpn->dev->name, ret); -+ goto unlock; -+ } -+ -+ ovpn_tcp_send_sock_skb(sock->peer, skb); -+ ret = size; -+unlock: -+ release_sock(sk); -+peer_free: -+ ovpn_peer_put(peer); -+ return ret; -+} -+ -+static void ovpn_tcp_data_ready(struct sock *sk) -+{ -+ struct ovpn_socket *sock; -+ -+ trace_sk_data_ready(sk); -+ -+ rcu_read_lock(); -+ sock = rcu_dereference_sk_user_data(sk); -+ strp_data_ready(&sock->peer->tcp.strp); -+ rcu_read_unlock(); -+} -+ -+static void ovpn_tcp_write_space(struct sock *sk) -+{ -+ struct ovpn_socket *sock; -+ -+ rcu_read_lock(); -+ sock = rcu_dereference_sk_user_data(sk); -+ schedule_work(&sock->peer->tcp.tx_work); -+ sock->peer->tcp.sk_cb.sk_write_space(sk); -+ rcu_read_unlock(); -+} -+ -+static void ovpn_tcp_build_protos(struct proto *new_prot, -+ struct proto_ops *new_ops, -+ const struct proto *orig_prot, -+ const struct proto_ops *orig_ops); -+ -+/* Set TCP encapsulation callbacks */ -+int ovpn_tcp_socket_attach(struct socket *sock, struct ovpn_peer *peer) -+{ -+ struct strp_callbacks cb = { -+ .rcv_msg = ovpn_tcp_rcv, -+ .parse_msg = ovpn_tcp_parse, -+ }; -+ int ret; -+ -+ /* make sure no pre-existing encapsulation handler exists */ -+ if (sock->sk->sk_user_data) -+ return -EBUSY; -+ -+ /* sanity check */ -+ if (sock->sk->sk_protocol != IPPROTO_TCP) { -+ netdev_err(peer->ovpn->dev, -+ "provided socket is not TCP as expected\n"); -+ return -EINVAL; -+ } -+ -+ /* only a fully connected socket are expected. Connection should be -+ * handled in userspace -+ */ -+ if (sock->sk->sk_state != TCP_ESTABLISHED) { -+ netdev_err(peer->ovpn->dev, -+ "provided TCP socket is not in ESTABLISHED state: %d\n", -+ sock->sk->sk_state); -+ return -EINVAL; -+ } -+ -+ lock_sock(sock->sk); -+ -+ ret = strp_init(&peer->tcp.strp, sock->sk, &cb); -+ if (ret < 0) { -+ DEBUG_NET_WARN_ON_ONCE(1); -+ release_sock(sock->sk); -+ return ret; -+ } -+ -+ INIT_WORK(&peer->tcp.tx_work, ovpn_tcp_tx_work); -+ __sk_dst_reset(sock->sk); -+ skb_queue_head_init(&peer->tcp.user_queue); -+ -+ /* save current CBs so that they can be restored upon socket release */ -+ peer->tcp.sk_cb.sk_data_ready = sock->sk->sk_data_ready; -+ peer->tcp.sk_cb.sk_write_space = sock->sk->sk_write_space; -+ peer->tcp.sk_cb.prot = sock->sk->sk_prot; -+ peer->tcp.sk_cb.ops = sock->sk->sk_socket->ops; -+ -+ /* assign our static CBs and prot/ops */ -+ sock->sk->sk_data_ready = ovpn_tcp_data_ready; -+ sock->sk->sk_write_space = ovpn_tcp_write_space; -+ -+ if (sock->sk->sk_family == AF_INET) { -+ sock->sk->sk_prot = &ovpn_tcp_prot; -+ sock->sk->sk_socket->ops = &ovpn_tcp_ops; -+ } else { -+ mutex_lock(&tcp6_prot_mutex); -+ if (!ovpn_tcp6_prot.recvmsg) -+ ovpn_tcp_build_protos(&ovpn_tcp6_prot, &ovpn_tcp6_ops, -+ sock->sk->sk_prot, -+ sock->sk->sk_socket->ops); -+ mutex_unlock(&tcp6_prot_mutex); -+ -+ sock->sk->sk_prot = &ovpn_tcp6_prot; -+ sock->sk->sk_socket->ops = &ovpn_tcp6_ops; -+ } -+ -+ /* avoid using task_frag */ -+ sock->sk->sk_allocation = GFP_ATOMIC; -+ sock->sk->sk_use_task_frag = false; -+ -+ /* enqueue the RX worker */ -+ strp_check_rcv(&peer->tcp.strp); -+ -+ release_sock(sock->sk); -+ return 0; -+} -+ -+static void ovpn_tcp_close(struct sock *sk, long timeout) -+{ -+ struct ovpn_socket *sock; -+ -+ rcu_read_lock(); -+ sock = rcu_dereference_sk_user_data(sk); -+ -+ strp_stop(&sock->peer->tcp.strp); -+ barrier(); -+ -+ tcp_close(sk, timeout); -+ -+ ovpn_peer_del(sock->peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR); -+ rcu_read_unlock(); -+} -+ -+static __poll_t ovpn_tcp_poll(struct file *file, struct socket *sock, -+ poll_table *wait) -+{ -+ __poll_t mask = datagram_poll(file, sock, wait); -+ struct ovpn_socket *ovpn_sock; -+ -+ rcu_read_lock(); -+ ovpn_sock = rcu_dereference_sk_user_data(sock->sk); -+ if (!skb_queue_empty(&ovpn_sock->peer->tcp.user_queue)) -+ mask |= EPOLLIN | EPOLLRDNORM; -+ rcu_read_unlock(); -+ -+ return mask; -+} -+ -+static void ovpn_tcp_build_protos(struct proto *new_prot, -+ struct proto_ops *new_ops, -+ const struct proto *orig_prot, -+ const struct proto_ops *orig_ops) -+{ -+ memcpy(new_prot, orig_prot, sizeof(*new_prot)); -+ memcpy(new_ops, orig_ops, sizeof(*new_ops)); -+ new_prot->recvmsg = ovpn_tcp_recvmsg; -+ new_prot->sendmsg = ovpn_tcp_sendmsg; -+ new_prot->close = ovpn_tcp_close; -+ new_ops->poll = ovpn_tcp_poll; -+} -+ -+/* Initialize TCP static objects */ -+void __init ovpn_tcp_init(void) -+{ -+ ovpn_tcp_build_protos(&ovpn_tcp_prot, &ovpn_tcp_ops, &tcp_prot, -+ &inet_stream_ops); -+} -diff --git a/drivers/net/ovpn/tcp.h b/drivers/net/ovpn/tcp.h -new file mode 100644 -index 000000000000..fb2cd0b606b4 ---- /dev/null -+++ b/drivers/net/ovpn/tcp.h -@@ -0,0 +1,44 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2019-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ */ -+ -+#ifndef _NET_OVPN_TCP_H_ -+#define _NET_OVPN_TCP_H_ -+ -+#include -+#include -+#include -+ -+#include "peer.h" -+#include "skb.h" -+#include "socket.h" -+ -+void __init ovpn_tcp_init(void); -+ -+int ovpn_tcp_socket_attach(struct socket *sock, struct ovpn_peer *peer); -+void ovpn_tcp_socket_detach(struct socket *sock); -+void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sk_buff *skb); -+ -+/* Prepare skb and enqueue it for sending to peer. -+ * -+ * Preparation consist in prepending the skb payload with its size. -+ * Required by the OpenVPN protocol in order to extract packets from -+ * the TCP stream on the receiver side. -+ */ -+static inline void ovpn_tcp_send_skb(struct ovpn_peer *peer, -+ struct sk_buff *skb) -+{ -+ u16 len = skb->len; -+ -+ *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len); -+ -+ bh_lock_sock(peer->sock->sock->sk); -+ ovpn_tcp_send_sock_skb(peer, skb); -+ bh_unlock_sock(peer->sock->sock->sk); -+} -+ -+#endif /* _NET_OVPN_TCP_H_ */ -diff --git a/drivers/net/ovpn/udp.c b/drivers/net/ovpn/udp.c -new file mode 100644 -index 000000000000..d1e88ae83843 ---- /dev/null -+++ b/drivers/net/ovpn/udp.c -@@ -0,0 +1,406 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2019-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ovpnstruct.h" -+#include "main.h" -+#include "bind.h" -+#include "io.h" -+#include "peer.h" -+#include "proto.h" -+#include "socket.h" -+#include "udp.h" -+ -+/** -+ * ovpn_udp_encap_recv - Start processing a received UDP packet. -+ * @sk: socket over which the packet was received -+ * @skb: the received packet -+ * -+ * If the first byte of the payload is DATA_V2, the packet is further processed, -+ * otherwise it is forwarded to the UDP stack for delivery to user space. -+ * -+ * Return: -+ * 0 if skb was consumed or dropped -+ * >0 if skb should be passed up to userspace as UDP (packet not consumed) -+ * <0 if skb should be resubmitted as proto -N (packet not consumed) -+ */ -+static int ovpn_udp_encap_recv(struct sock *sk, struct sk_buff *skb) -+{ -+ struct ovpn_peer *peer = NULL; -+ struct ovpn_struct *ovpn; -+ u32 peer_id; -+ u8 opcode; -+ -+ ovpn = ovpn_from_udp_sock(sk); -+ if (unlikely(!ovpn)) { -+ net_err_ratelimited("%s: cannot obtain ovpn object from UDP socket\n", -+ __func__); -+ goto drop_noovpn; -+ } -+ -+ /* Make sure the first 4 bytes of the skb data buffer after the UDP -+ * header are accessible. -+ * They are required to fetch the OP code, the key ID and the peer ID. -+ */ -+ if (unlikely(!pskb_may_pull(skb, sizeof(struct udphdr) + -+ OVPN_OP_SIZE_V2))) { -+ net_dbg_ratelimited("%s: packet too small\n", __func__); -+ goto drop; -+ } -+ -+ opcode = ovpn_opcode_from_skb(skb, sizeof(struct udphdr)); -+ if (unlikely(opcode != OVPN_DATA_V2)) { -+ /* DATA_V1 is not supported */ -+ if (opcode == OVPN_DATA_V1) -+ goto drop; -+ -+ /* unknown or control packet: let it bubble up to userspace */ -+ return 1; -+ } -+ -+ peer_id = ovpn_peer_id_from_skb(skb, sizeof(struct udphdr)); -+ /* some OpenVPN server implementations send data packets with the -+ * peer-id set to undef. In this case we skip the peer lookup by peer-id -+ * and we try with the transport address -+ */ -+ if (peer_id != OVPN_PEER_ID_UNDEF) { -+ peer = ovpn_peer_get_by_id(ovpn, peer_id); -+ if (!peer) { -+ net_err_ratelimited("%s: received data from unknown peer (id: %d)\n", -+ __func__, peer_id); -+ goto drop; -+ } -+ } -+ -+ if (!peer) { -+ /* data packet with undef peer-id */ -+ peer = ovpn_peer_get_by_transp_addr(ovpn, skb); -+ if (unlikely(!peer)) { -+ net_dbg_ratelimited("%s: received data with undef peer-id from unknown source\n", -+ __func__); -+ goto drop; -+ } -+ } -+ -+ /* pop off outer UDP header */ -+ __skb_pull(skb, sizeof(struct udphdr)); -+ ovpn_recv(peer, skb); -+ return 0; -+ -+drop: -+ if (peer) -+ ovpn_peer_put(peer); -+ dev_core_stats_rx_dropped_inc(ovpn->dev); -+drop_noovpn: -+ kfree_skb(skb); -+ return 0; -+} -+ -+/** -+ * ovpn_udp4_output - send IPv4 packet over udp socket -+ * @ovpn: the openvpn instance -+ * @bind: the binding related to the destination peer -+ * @cache: dst cache -+ * @sk: the socket to send the packet over -+ * @skb: the packet to send -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+static int ovpn_udp4_output(struct ovpn_struct *ovpn, struct ovpn_bind *bind, -+ struct dst_cache *cache, struct sock *sk, -+ struct sk_buff *skb) -+{ -+ struct rtable *rt; -+ struct flowi4 fl = { -+ .saddr = bind->local.ipv4.s_addr, -+ .daddr = bind->remote.in4.sin_addr.s_addr, -+ .fl4_sport = inet_sk(sk)->inet_sport, -+ .fl4_dport = bind->remote.in4.sin_port, -+ .flowi4_proto = sk->sk_protocol, -+ .flowi4_mark = sk->sk_mark, -+ }; -+ int ret; -+ -+ local_bh_disable(); -+ rt = dst_cache_get_ip4(cache, &fl.saddr); -+ if (rt) -+ goto transmit; -+ -+ if (unlikely(!inet_confirm_addr(sock_net(sk), NULL, 0, fl.saddr, -+ RT_SCOPE_HOST))) { -+ /* we may end up here when the cached address is not usable -+ * anymore. In this case we reset address/cache and perform a -+ * new look up -+ */ -+ fl.saddr = 0; -+ bind->local.ipv4.s_addr = 0; -+ dst_cache_reset(cache); -+ } -+ -+ rt = ip_route_output_flow(sock_net(sk), &fl, sk); -+ if (IS_ERR(rt) && PTR_ERR(rt) == -EINVAL) { -+ fl.saddr = 0; -+ bind->local.ipv4.s_addr = 0; -+ dst_cache_reset(cache); -+ -+ rt = ip_route_output_flow(sock_net(sk), &fl, sk); -+ } -+ -+ if (IS_ERR(rt)) { -+ ret = PTR_ERR(rt); -+ net_dbg_ratelimited("%s: no route to host %pISpc: %d\n", -+ ovpn->dev->name, &bind->remote.in4, ret); -+ goto err; -+ } -+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr); -+ -+transmit: -+ udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0, -+ ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport, -+ fl.fl4_dport, false, sk->sk_no_check_tx); -+ ret = 0; -+err: -+ local_bh_enable(); -+ return ret; -+} -+ -+#if IS_ENABLED(CONFIG_IPV6) -+/** -+ * ovpn_udp6_output - send IPv6 packet over udp socket -+ * @ovpn: the openvpn instance -+ * @bind: the binding related to the destination peer -+ * @cache: dst cache -+ * @sk: the socket to send the packet over -+ * @skb: the packet to send -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+static int ovpn_udp6_output(struct ovpn_struct *ovpn, struct ovpn_bind *bind, -+ struct dst_cache *cache, struct sock *sk, -+ struct sk_buff *skb) -+{ -+ struct dst_entry *dst; -+ int ret; -+ -+ struct flowi6 fl = { -+ .saddr = bind->local.ipv6, -+ .daddr = bind->remote.in6.sin6_addr, -+ .fl6_sport = inet_sk(sk)->inet_sport, -+ .fl6_dport = bind->remote.in6.sin6_port, -+ .flowi6_proto = sk->sk_protocol, -+ .flowi6_mark = sk->sk_mark, -+ .flowi6_oif = bind->remote.in6.sin6_scope_id, -+ }; -+ -+ local_bh_disable(); -+ dst = dst_cache_get_ip6(cache, &fl.saddr); -+ if (dst) -+ goto transmit; -+ -+ if (unlikely(!ipv6_chk_addr(sock_net(sk), &fl.saddr, NULL, 0))) { -+ /* we may end up here when the cached address is not usable -+ * anymore. In this case we reset address/cache and perform a -+ * new look up -+ */ -+ fl.saddr = in6addr_any; -+ bind->local.ipv6 = in6addr_any; -+ dst_cache_reset(cache); -+ } -+ -+ dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sk), sk, &fl, NULL); -+ if (IS_ERR(dst)) { -+ ret = PTR_ERR(dst); -+ net_dbg_ratelimited("%s: no route to host %pISpc: %d\n", -+ ovpn->dev->name, &bind->remote.in6, ret); -+ goto err; -+ } -+ dst_cache_set_ip6(cache, dst, &fl.saddr); -+ -+transmit: -+ udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0, -+ ip6_dst_hoplimit(dst), 0, fl.fl6_sport, -+ fl.fl6_dport, udp_get_no_check6_tx(sk)); -+ ret = 0; -+err: -+ local_bh_enable(); -+ return ret; -+} -+#endif -+ -+/** -+ * ovpn_udp_output - transmit skb using udp-tunnel -+ * @ovpn: the openvpn instance -+ * @bind: the binding related to the destination peer -+ * @cache: dst cache -+ * @sk: the socket to send the packet over -+ * @skb: the packet to send -+ * -+ * rcu_read_lock should be held on entry. -+ * On return, the skb is consumed. -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+static int ovpn_udp_output(struct ovpn_struct *ovpn, struct ovpn_bind *bind, -+ struct dst_cache *cache, struct sock *sk, -+ struct sk_buff *skb) -+{ -+ int ret; -+ -+ /* set sk to null if skb is already orphaned */ -+ if (!skb->destructor) -+ skb->sk = NULL; -+ -+ /* always permit openvpn-created packets to be (outside) fragmented */ -+ skb->ignore_df = 1; -+ -+ switch (bind->remote.in4.sin_family) { -+ case AF_INET: -+ ret = ovpn_udp4_output(ovpn, bind, cache, sk, skb); -+ break; -+#if IS_ENABLED(CONFIG_IPV6) -+ case AF_INET6: -+ ret = ovpn_udp6_output(ovpn, bind, cache, sk, skb); -+ break; -+#endif -+ default: -+ ret = -EAFNOSUPPORT; -+ break; -+ } -+ -+ return ret; -+} -+ -+/** -+ * ovpn_udp_send_skb - prepare skb and send it over via UDP -+ * @ovpn: the openvpn instance -+ * @peer: the destination peer -+ * @skb: the packet to send -+ */ -+void ovpn_udp_send_skb(struct ovpn_struct *ovpn, struct ovpn_peer *peer, -+ struct sk_buff *skb) -+{ -+ struct ovpn_bind *bind; -+ unsigned int pkt_len; -+ struct socket *sock; -+ int ret = -1; -+ -+ skb->dev = ovpn->dev; -+ /* no checksum performed at this layer */ -+ skb->ip_summed = CHECKSUM_NONE; -+ -+ /* get socket info */ -+ sock = peer->sock->sock; -+ if (unlikely(!sock)) { -+ net_warn_ratelimited("%s: no sock for remote peer\n", __func__); -+ goto out; -+ } -+ -+ rcu_read_lock(); -+ /* get binding */ -+ bind = rcu_dereference(peer->bind); -+ if (unlikely(!bind)) { -+ net_warn_ratelimited("%s: no bind for remote peer\n", __func__); -+ goto out_unlock; -+ } -+ -+ /* crypto layer -> transport (UDP) */ -+ pkt_len = skb->len; -+ ret = ovpn_udp_output(ovpn, bind, &peer->dst_cache, sock->sk, skb); -+ -+out_unlock: -+ rcu_read_unlock(); -+out: -+ if (unlikely(ret < 0)) { -+ dev_core_stats_tx_dropped_inc(ovpn->dev); -+ kfree_skb(skb); -+ return; -+ } -+ -+ dev_sw_netstats_tx_add(ovpn->dev, 1, pkt_len); -+} -+ -+/** -+ * ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn -+ * @sock: socket to configure -+ * @ovpn: the openvp instance to link -+ * -+ * After invoking this function, the sock will be controlled by ovpn so that -+ * any incoming packet may be processed by ovpn first. -+ * -+ * Return: 0 on success or a negative error code otherwise -+ */ -+int ovpn_udp_socket_attach(struct socket *sock, struct ovpn_struct *ovpn) -+{ -+ struct udp_tunnel_sock_cfg cfg = { -+ .encap_type = UDP_ENCAP_OVPNINUDP, -+ .encap_rcv = ovpn_udp_encap_recv, -+ }; -+ struct ovpn_socket *old_data; -+ int ret; -+ -+ /* sanity check */ -+ if (sock->sk->sk_protocol != IPPROTO_UDP) { -+ DEBUG_NET_WARN_ON_ONCE(1); -+ return -EINVAL; -+ } -+ -+ /* make sure no pre-existing encapsulation handler exists */ -+ rcu_read_lock(); -+ old_data = rcu_dereference_sk_user_data(sock->sk); -+ if (!old_data) { -+ /* socket is currently unused - we can take it */ -+ rcu_read_unlock(); -+ setup_udp_tunnel_sock(sock_net(sock->sk), sock, &cfg); -+ return 0; -+ } -+ -+ /* socket is in use. We need to understand if it's owned by this ovpn -+ * instance or by something else. -+ * In the former case, we can increase the refcounter and happily -+ * use it, because the same UDP socket is expected to be shared among -+ * different peers. -+ * -+ * Unlikely TCP, a single UDP socket can be used to talk to many remote -+ * hosts and therefore openvpn instantiates one only for all its peers -+ */ -+ if ((READ_ONCE(udp_sk(sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) && -+ old_data->ovpn == ovpn) { -+ netdev_dbg(ovpn->dev, -+ "%s: provided socket already owned by this interface\n", -+ __func__); -+ ret = -EALREADY; -+ } else { -+ netdev_err(ovpn->dev, -+ "%s: provided socket already taken by other user\n", -+ __func__); -+ ret = -EBUSY; -+ } -+ rcu_read_unlock(); -+ -+ return ret; -+} -+ -+/** -+ * ovpn_udp_socket_detach - clean udp-tunnel status for this socket -+ * @sock: the socket to clean -+ */ -+void ovpn_udp_socket_detach(struct socket *sock) -+{ -+ struct udp_tunnel_sock_cfg cfg = { }; -+ -+ setup_udp_tunnel_sock(sock_net(sock->sk), sock, &cfg); -+} -diff --git a/drivers/net/ovpn/udp.h b/drivers/net/ovpn/udp.h -new file mode 100644 -index 000000000000..fecb68464896 ---- /dev/null -+++ b/drivers/net/ovpn/udp.h -@@ -0,0 +1,26 @@ -+/* SPDX-License-Identifier: GPL-2.0-only */ -+/* OpenVPN data channel offload -+ * -+ * Copyright (C) 2019-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ */ -+ -+#ifndef _NET_OVPN_UDP_H_ -+#define _NET_OVPN_UDP_H_ -+ -+#include -+#include -+ -+struct ovpn_peer; -+struct ovpn_struct; -+struct sk_buff; -+struct socket; -+ -+int ovpn_udp_socket_attach(struct socket *sock, struct ovpn_struct *ovpn); -+void ovpn_udp_socket_detach(struct socket *sock); -+void ovpn_udp_send_skb(struct ovpn_struct *ovpn, struct ovpn_peer *peer, -+ struct sk_buff *skb); -+struct ovpn_struct *ovpn_from_udp_sock(struct sock *sk); -+ -+#endif /* _NET_OVPN_UDP_H_ */ -diff --git a/include/net/netlink.h b/include/net/netlink.h -index db6af207287c..2dc671c977ff 100644 ---- a/include/net/netlink.h -+++ b/include/net/netlink.h -@@ -469,6 +469,7 @@ struct nla_policy { - .max = _len \ - } - #define NLA_POLICY_MIN_LEN(_len) NLA_POLICY_MIN(NLA_BINARY, _len) -+#define NLA_POLICY_MAX_LEN(_len) NLA_POLICY_MAX(NLA_BINARY, _len) - - /** - * struct nl_info - netlink source information -diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h -index 6dc258993b17..9a5419d60100 100644 ---- a/include/uapi/linux/if_link.h -+++ b/include/uapi/linux/if_link.h -@@ -1959,4 +1959,19 @@ enum { - - #define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1) - -+/* OVPN section */ -+ -+enum ovpn_mode { -+ OVPN_MODE_P2P, -+ OVPN_MODE_MP, -+}; -+ -+enum { -+ IFLA_OVPN_UNSPEC, -+ IFLA_OVPN_MODE, -+ __IFLA_OVPN_MAX, -+}; -+ -+#define IFLA_OVPN_MAX (__IFLA_OVPN_MAX - 1) -+ - #endif /* _UAPI_LINUX_IF_LINK_H */ -diff --git a/include/uapi/linux/ovpn.h b/include/uapi/linux/ovpn.h -new file mode 100644 -index 000000000000..7bac0803cd9f ---- /dev/null -+++ b/include/uapi/linux/ovpn.h -@@ -0,0 +1,109 @@ -+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ -+/* Do not edit directly, auto-generated from: */ -+/* Documentation/netlink/specs/ovpn.yaml */ -+/* YNL-GEN uapi header */ -+ -+#ifndef _UAPI_LINUX_OVPN_H -+#define _UAPI_LINUX_OVPN_H -+ -+#define OVPN_FAMILY_NAME "ovpn" -+#define OVPN_FAMILY_VERSION 1 -+ -+#define OVPN_NONCE_TAIL_SIZE 8 -+ -+enum ovpn_cipher_alg { -+ OVPN_CIPHER_ALG_NONE, -+ OVPN_CIPHER_ALG_AES_GCM, -+ OVPN_CIPHER_ALG_CHACHA20_POLY1305, -+}; -+ -+enum ovpn_del_peer_reason { -+ OVPN_DEL_PEER_REASON_TEARDOWN, -+ OVPN_DEL_PEER_REASON_USERSPACE, -+ OVPN_DEL_PEER_REASON_EXPIRED, -+ OVPN_DEL_PEER_REASON_TRANSPORT_ERROR, -+ OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT, -+}; -+ -+enum ovpn_key_slot { -+ OVPN_KEY_SLOT_PRIMARY, -+ OVPN_KEY_SLOT_SECONDARY, -+}; -+ -+enum { -+ OVPN_A_PEER_ID = 1, -+ OVPN_A_PEER_REMOTE_IPV4, -+ OVPN_A_PEER_REMOTE_IPV6, -+ OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID, -+ OVPN_A_PEER_REMOTE_PORT, -+ OVPN_A_PEER_SOCKET, -+ OVPN_A_PEER_VPN_IPV4, -+ OVPN_A_PEER_VPN_IPV6, -+ OVPN_A_PEER_LOCAL_IPV4, -+ OVPN_A_PEER_LOCAL_IPV6, -+ OVPN_A_PEER_LOCAL_PORT, -+ OVPN_A_PEER_KEEPALIVE_INTERVAL, -+ OVPN_A_PEER_KEEPALIVE_TIMEOUT, -+ OVPN_A_PEER_DEL_REASON, -+ OVPN_A_PEER_VPN_RX_BYTES, -+ OVPN_A_PEER_VPN_TX_BYTES, -+ OVPN_A_PEER_VPN_RX_PACKETS, -+ OVPN_A_PEER_VPN_TX_PACKETS, -+ OVPN_A_PEER_LINK_RX_BYTES, -+ OVPN_A_PEER_LINK_TX_BYTES, -+ OVPN_A_PEER_LINK_RX_PACKETS, -+ OVPN_A_PEER_LINK_TX_PACKETS, -+ -+ __OVPN_A_PEER_MAX, -+ OVPN_A_PEER_MAX = (__OVPN_A_PEER_MAX - 1) -+}; -+ -+enum { -+ OVPN_A_KEYCONF_PEER_ID = 1, -+ OVPN_A_KEYCONF_SLOT, -+ OVPN_A_KEYCONF_KEY_ID, -+ OVPN_A_KEYCONF_CIPHER_ALG, -+ OVPN_A_KEYCONF_ENCRYPT_DIR, -+ OVPN_A_KEYCONF_DECRYPT_DIR, -+ -+ __OVPN_A_KEYCONF_MAX, -+ OVPN_A_KEYCONF_MAX = (__OVPN_A_KEYCONF_MAX - 1) -+}; -+ -+enum { -+ OVPN_A_KEYDIR_CIPHER_KEY = 1, -+ OVPN_A_KEYDIR_NONCE_TAIL, -+ -+ __OVPN_A_KEYDIR_MAX, -+ OVPN_A_KEYDIR_MAX = (__OVPN_A_KEYDIR_MAX - 1) -+}; -+ -+enum { -+ OVPN_A_IFINDEX = 1, -+ OVPN_A_IFNAME, -+ OVPN_A_PEER, -+ OVPN_A_KEYCONF, -+ -+ __OVPN_A_MAX, -+ OVPN_A_MAX = (__OVPN_A_MAX - 1) -+}; -+ -+enum { -+ OVPN_CMD_PEER_NEW = 1, -+ OVPN_CMD_PEER_SET, -+ OVPN_CMD_PEER_GET, -+ OVPN_CMD_PEER_DEL, -+ OVPN_CMD_PEER_DEL_NTF, -+ OVPN_CMD_KEY_NEW, -+ OVPN_CMD_KEY_GET, -+ OVPN_CMD_KEY_SWAP, -+ OVPN_CMD_KEY_SWAP_NTF, -+ OVPN_CMD_KEY_DEL, -+ -+ __OVPN_CMD_MAX, -+ OVPN_CMD_MAX = (__OVPN_CMD_MAX - 1) -+}; -+ -+#define OVPN_MCGRP_PEERS "peers" -+ -+#endif /* _UAPI_LINUX_OVPN_H */ -diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h -index 1a0fe8b151fb..f9f8ffddfd0c 100644 ---- a/include/uapi/linux/udp.h -+++ b/include/uapi/linux/udp.h -@@ -43,5 +43,6 @@ struct udphdr { - #define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */ - #define UDP_ENCAP_RXRPC 6 - #define TCP_ENCAP_ESPINTCP 7 /* Yikes, this is really xfrm encap types. */ -+#define UDP_ENCAP_OVPNINUDP 8 /* OpenVPN traffic */ - - #endif /* _UAPI_LINUX_UDP_H */ -diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py -index 717530bc9c52..3ccbb301be87 100755 ---- a/tools/net/ynl/ynl-gen-c.py -+++ b/tools/net/ynl/ynl-gen-c.py -@@ -466,6 +466,8 @@ class TypeBinary(Type): - def _attr_policy(self, policy): - if 'exact-len' in self.checks: - mem = 'NLA_POLICY_EXACT_LEN(' + str(self.get_limit('exact-len')) + ')' -+ elif 'max-len' in self.checks: -+ mem = 'NLA_POLICY_MAX_LEN(' + str(self.get_limit('max-len')) + ')' - else: - mem = '{ ' - if len(self.checks) == 1 and 'min-len' in self.checks: -diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile -index ff18c0361e38..e4b4494b0765 100644 ---- a/tools/testing/selftests/Makefile -+++ b/tools/testing/selftests/Makefile -@@ -69,6 +69,7 @@ TARGETS += net/hsr - TARGETS += net/mptcp - TARGETS += net/netfilter - TARGETS += net/openvswitch -+TARGETS += net/ovpn - TARGETS += net/packetdrill - TARGETS += net/rds - TARGETS += net/tcp_ao -diff --git a/tools/testing/selftests/net/ovpn/.gitignore b/tools/testing/selftests/net/ovpn/.gitignore -new file mode 100644 -index 000000000000..ee44c081ca7c ---- /dev/null -+++ b/tools/testing/selftests/net/ovpn/.gitignore -@@ -0,0 +1,2 @@ -+# SPDX-License-Identifier: GPL-2.0+ -+ovpn-cli -diff --git a/tools/testing/selftests/net/ovpn/Makefile b/tools/testing/selftests/net/ovpn/Makefile -new file mode 100644 -index 000000000000..c76d8fd953c5 ---- /dev/null -+++ b/tools/testing/selftests/net/ovpn/Makefile -@@ -0,0 +1,17 @@ -+# SPDX-License-Identifier: GPL-2.0 -+# Copyright (C) 2020-2024 OpenVPN, Inc. -+# -+CFLAGS = -pedantic -Wextra -Wall -Wl,--no-as-needed -g -O0 -ggdb $(KHDR_INCLUDES) -+CFLAGS += $(shell pkg-config --cflags libnl-3.0 libnl-genl-3.0) -+ -+LDFLAGS = -lmbedtls -lmbedcrypto -+LDFLAGS += $(shell pkg-config --libs libnl-3.0 libnl-genl-3.0) -+ -+TEST_PROGS = test.sh \ -+ test-chachapoly.sh \ -+ test-tcp.sh \ -+ test-float.sh -+ -+TEST_GEN_FILES = ovpn-cli -+ -+include ../../lib.mk -diff --git a/tools/testing/selftests/net/ovpn/config b/tools/testing/selftests/net/ovpn/config -new file mode 100644 -index 000000000000..71946ba9fa17 ---- /dev/null -+++ b/tools/testing/selftests/net/ovpn/config -@@ -0,0 +1,10 @@ -+CONFIG_NET=y -+CONFIG_INET=y -+CONFIG_STREAM_PARSER=y -+CONFIG_NET_UDP_TUNNEL=y -+CONFIG_DST_CACHE=y -+CONFIG_CRYPTO=y -+CONFIG_CRYPTO_AES=y -+CONFIG_CRYPTO_GCM=y -+CONFIG_CRYPTO_CHACHA20POLY1305=y -+CONFIG_OVPN=m -diff --git a/tools/testing/selftests/net/ovpn/data64.key b/tools/testing/selftests/net/ovpn/data64.key -new file mode 100644 -index 000000000000..a99e88c4e290 ---- /dev/null -+++ b/tools/testing/selftests/net/ovpn/data64.key -@@ -0,0 +1,5 @@ -+jRqMACN7d7/aFQNT8S7jkrBD8uwrgHbG5OQZP2eu4R1Y7tfpS2bf5RHv06Vi163CGoaIiTX99R3B -+ia9ycAH8Wz1+9PWv51dnBLur9jbShlgZ2QHLtUc4a/gfT7zZwULXuuxdLnvR21DDeMBaTbkgbai9 -+uvAa7ne1liIgGFzbv+Bas4HDVrygxIxuAnP5Qgc3648IJkZ0QEXPF+O9f0n5+QIvGCxkAUVx+5K6 -+KIs+SoeWXnAopELmoGSjUpFtJbagXK82HfdqpuUxT2Tnuef0/14SzVE/vNleBNu2ZbyrSAaah8tE -+BofkPJUBFY+YQcfZNM5Dgrw3i+Bpmpq/gpdg5w== -diff --git a/tools/testing/selftests/net/ovpn/ovpn-cli.c b/tools/testing/selftests/net/ovpn/ovpn-cli.c -new file mode 100644 -index 000000000000..046dd069aaaf ---- /dev/null -+++ b/tools/testing/selftests/net/ovpn/ovpn-cli.c -@@ -0,0 +1,2370 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* OpenVPN data channel accelerator -+ * -+ * Copyright (C) 2020-2024 OpenVPN, Inc. -+ * -+ * Author: Antonio Quartulli -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+ -+/* defines to make checkpatch happy */ -+#define strscpy strncpy -+#define __always_unused __attribute__((__unused__)) -+ -+/* libnl < 3.5.0 does not set the NLA_F_NESTED on its own, therefore we -+ * have to explicitly do it to prevent the kernel from failing upon -+ * parsing of the message -+ */ -+#define nla_nest_start(_msg, _type) \ -+ nla_nest_start(_msg, (_type) | NLA_F_NESTED) -+ -+uint64_t nla_get_uint(struct nlattr *attr) -+{ -+ if (nla_len(attr) == sizeof(uint32_t)) -+ return nla_get_u32(attr); -+ else -+ return nla_get_u64(attr); -+} -+ -+typedef int (*ovpn_nl_cb)(struct nl_msg *msg, void *arg); -+ -+enum ovpn_key_direction { -+ KEY_DIR_IN = 0, -+ KEY_DIR_OUT, -+}; -+ -+#define KEY_LEN (256 / 8) -+#define NONCE_LEN 8 -+ -+#define PEER_ID_UNDEF 0x00FFFFFF -+ -+struct nl_ctx { -+ struct nl_sock *nl_sock; -+ struct nl_msg *nl_msg; -+ struct nl_cb *nl_cb; -+ -+ int ovpn_dco_id; -+}; -+ -+enum ovpn_cmd { -+ CMD_INVALID, -+ CMD_NEW_IFACE, -+ CMD_DEL_IFACE, -+ CMD_LISTEN, -+ CMD_CONNECT, -+ CMD_NEW_PEER, -+ CMD_NEW_MULTI_PEER, -+ CMD_SET_PEER, -+ CMD_DEL_PEER, -+ CMD_GET_PEER, -+ CMD_NEW_KEY, -+ CMD_DEL_KEY, -+ CMD_GET_KEY, -+ CMD_SWAP_KEYS, -+ CMD_LISTEN_MCAST, -+}; -+ -+struct ovpn_ctx { -+ enum ovpn_cmd cmd; -+ -+ __u8 key_enc[KEY_LEN]; -+ __u8 key_dec[KEY_LEN]; -+ __u8 nonce[NONCE_LEN]; -+ -+ enum ovpn_cipher_alg cipher; -+ -+ sa_family_t sa_family; -+ -+ unsigned long peer_id; -+ unsigned long lport; -+ -+ union { -+ struct sockaddr_in in4; -+ struct sockaddr_in6 in6; -+ } remote; -+ -+ union { -+ struct sockaddr_in in4; -+ struct sockaddr_in6 in6; -+ } peer_ip; -+ -+ bool peer_ip_set; -+ -+ unsigned int ifindex; -+ char ifname[IFNAMSIZ]; -+ enum ovpn_mode mode; -+ bool mode_set; -+ -+ int socket; -+ int cli_socket; -+ -+ __u32 keepalive_interval; -+ __u32 keepalive_timeout; -+ -+ enum ovpn_key_direction key_dir; -+ enum ovpn_key_slot key_slot; -+ int key_id; -+ -+ const char *peers_file; -+}; -+ -+static int ovpn_nl_recvmsgs(struct nl_ctx *ctx) -+{ -+ int ret; -+ -+ ret = nl_recvmsgs(ctx->nl_sock, ctx->nl_cb); -+ -+ switch (ret) { -+ case -NLE_INTR: -+ fprintf(stderr, -+ "netlink received interrupt due to signal - ignoring\n"); -+ break; -+ case -NLE_NOMEM: -+ fprintf(stderr, "netlink out of memory error\n"); -+ break; -+ case -NLE_AGAIN: -+ fprintf(stderr, -+ "netlink reports blocking read - aborting wait\n"); -+ break; -+ default: -+ if (ret) -+ fprintf(stderr, "netlink reports error (%d): %s\n", -+ ret, nl_geterror(-ret)); -+ break; -+ } -+ -+ return ret; -+} -+ -+static struct nl_ctx *nl_ctx_alloc_flags(struct ovpn_ctx *ovpn, int cmd, -+ int flags) -+{ -+ struct nl_ctx *ctx; -+ int err, ret; -+ -+ ctx = calloc(1, sizeof(*ctx)); -+ if (!ctx) -+ return NULL; -+ -+ ctx->nl_sock = nl_socket_alloc(); -+ if (!ctx->nl_sock) { -+ fprintf(stderr, "cannot allocate netlink socket\n"); -+ goto err_free; -+ } -+ -+ nl_socket_set_buffer_size(ctx->nl_sock, 8192, 8192); -+ -+ ret = genl_connect(ctx->nl_sock); -+ if (ret) { -+ fprintf(stderr, "cannot connect to generic netlink: %s\n", -+ nl_geterror(ret)); -+ goto err_sock; -+ } -+ -+ /* enable Extended ACK for detailed error reporting */ -+ err = 1; -+ setsockopt(nl_socket_get_fd(ctx->nl_sock), SOL_NETLINK, NETLINK_EXT_ACK, -+ &err, sizeof(err)); -+ -+ ctx->ovpn_dco_id = genl_ctrl_resolve(ctx->nl_sock, OVPN_FAMILY_NAME); -+ if (ctx->ovpn_dco_id < 0) { -+ fprintf(stderr, "cannot find ovpn_dco netlink component: %d\n", -+ ctx->ovpn_dco_id); -+ goto err_free; -+ } -+ -+ ctx->nl_msg = nlmsg_alloc(); -+ if (!ctx->nl_msg) { -+ fprintf(stderr, "cannot allocate netlink message\n"); -+ goto err_sock; -+ } -+ -+ ctx->nl_cb = nl_cb_alloc(NL_CB_DEFAULT); -+ if (!ctx->nl_cb) { -+ fprintf(stderr, "failed to allocate netlink callback\n"); -+ goto err_msg; -+ } -+ -+ nl_socket_set_cb(ctx->nl_sock, ctx->nl_cb); -+ -+ genlmsg_put(ctx->nl_msg, 0, 0, ctx->ovpn_dco_id, 0, flags, cmd, 0); -+ -+ if (ovpn->ifindex > 0) -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_IFINDEX, ovpn->ifindex); -+ -+ return ctx; -+nla_put_failure: -+err_msg: -+ nlmsg_free(ctx->nl_msg); -+err_sock: -+ nl_socket_free(ctx->nl_sock); -+err_free: -+ free(ctx); -+ return NULL; -+} -+ -+static struct nl_ctx *nl_ctx_alloc(struct ovpn_ctx *ovpn, int cmd) -+{ -+ return nl_ctx_alloc_flags(ovpn, cmd, 0); -+} -+ -+static void nl_ctx_free(struct nl_ctx *ctx) -+{ -+ if (!ctx) -+ return; -+ -+ nl_socket_free(ctx->nl_sock); -+ nlmsg_free(ctx->nl_msg); -+ nl_cb_put(ctx->nl_cb); -+ free(ctx); -+} -+ -+static int ovpn_nl_cb_error(struct sockaddr_nl (*nla)__always_unused, -+ struct nlmsgerr *err, void *arg) -+{ -+ struct nlmsghdr *nlh = (struct nlmsghdr *)err - 1; -+ struct nlattr *tb_msg[NLMSGERR_ATTR_MAX + 1]; -+ int len = nlh->nlmsg_len; -+ struct nlattr *attrs; -+ int *ret = arg; -+ int ack_len = sizeof(*nlh) + sizeof(int) + sizeof(*nlh); -+ -+ *ret = err->error; -+ -+ if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS)) -+ return NL_STOP; -+ -+ if (!(nlh->nlmsg_flags & NLM_F_CAPPED)) -+ ack_len += err->msg.nlmsg_len - sizeof(*nlh); -+ -+ if (len <= ack_len) -+ return NL_STOP; -+ -+ attrs = (void *)((uint8_t *)nlh + ack_len); -+ len -= ack_len; -+ -+ nla_parse(tb_msg, NLMSGERR_ATTR_MAX, attrs, len, NULL); -+ if (tb_msg[NLMSGERR_ATTR_MSG]) { -+ len = strnlen((char *)nla_data(tb_msg[NLMSGERR_ATTR_MSG]), -+ nla_len(tb_msg[NLMSGERR_ATTR_MSG])); -+ fprintf(stderr, "kernel error: %*s\n", len, -+ (char *)nla_data(tb_msg[NLMSGERR_ATTR_MSG])); -+ } -+ -+ if (tb_msg[NLMSGERR_ATTR_MISS_NEST]) { -+ fprintf(stderr, "missing required nesting type %u\n", -+ nla_get_u32(tb_msg[NLMSGERR_ATTR_MISS_NEST])); -+ } -+ -+ if (tb_msg[NLMSGERR_ATTR_MISS_TYPE]) { -+ fprintf(stderr, "missing required attribute type %u\n", -+ nla_get_u32(tb_msg[NLMSGERR_ATTR_MISS_TYPE])); -+ } -+ -+ return NL_STOP; -+} -+ -+static int ovpn_nl_cb_finish(struct nl_msg (*msg)__always_unused, -+ void *arg) -+{ -+ int *status = arg; -+ -+ *status = 0; -+ return NL_SKIP; -+} -+ -+static int ovpn_nl_cb_ack(struct nl_msg (*msg)__always_unused, -+ void *arg) -+{ -+ int *status = arg; -+ -+ *status = 0; -+ return NL_STOP; -+} -+ -+static int ovpn_nl_msg_send(struct nl_ctx *ctx, ovpn_nl_cb cb) -+{ -+ int status = 1; -+ -+ nl_cb_err(ctx->nl_cb, NL_CB_CUSTOM, ovpn_nl_cb_error, &status); -+ nl_cb_set(ctx->nl_cb, NL_CB_FINISH, NL_CB_CUSTOM, ovpn_nl_cb_finish, -+ &status); -+ nl_cb_set(ctx->nl_cb, NL_CB_ACK, NL_CB_CUSTOM, ovpn_nl_cb_ack, &status); -+ -+ if (cb) -+ nl_cb_set(ctx->nl_cb, NL_CB_VALID, NL_CB_CUSTOM, cb, ctx); -+ -+ nl_send_auto_complete(ctx->nl_sock, ctx->nl_msg); -+ -+ while (status == 1) -+ ovpn_nl_recvmsgs(ctx); -+ -+ if (status < 0) -+ fprintf(stderr, "failed to send netlink message: %s (%d)\n", -+ strerror(-status), status); -+ -+ return status; -+} -+ -+static int ovpn_parse_key(const char *file, struct ovpn_ctx *ctx) -+{ -+ int idx_enc, idx_dec, ret = -1; -+ unsigned char *ckey = NULL; -+ __u8 *bkey = NULL; -+ size_t olen = 0; -+ long ckey_len; -+ FILE *fp; -+ -+ fp = fopen(file, "r"); -+ if (!fp) { -+ fprintf(stderr, "cannot open: %s\n", file); -+ return -1; -+ } -+ -+ /* get file size */ -+ fseek(fp, 0L, SEEK_END); -+ ckey_len = ftell(fp); -+ rewind(fp); -+ -+ /* if the file is longer, let's just read a portion */ -+ if (ckey_len > 256) -+ ckey_len = 256; -+ -+ ckey = malloc(ckey_len); -+ if (!ckey) -+ goto err; -+ -+ ret = fread(ckey, 1, ckey_len, fp); -+ if (ret != ckey_len) { -+ fprintf(stderr, -+ "couldn't read enough data from key file: %dbytes read\n", -+ ret); -+ goto err; -+ } -+ -+ olen = 0; -+ ret = mbedtls_base64_decode(NULL, 0, &olen, ckey, ckey_len); -+ if (ret != MBEDTLS_ERR_BASE64_BUFFER_TOO_SMALL) { -+ char buf[256]; -+ -+ mbedtls_strerror(ret, buf, sizeof(buf)); -+ fprintf(stderr, "unexpected base64 error1: %s (%d)\n", buf, -+ ret); -+ -+ goto err; -+ } -+ -+ bkey = malloc(olen); -+ if (!bkey) { -+ fprintf(stderr, "cannot allocate binary key buffer\n"); -+ goto err; -+ } -+ -+ ret = mbedtls_base64_decode(bkey, olen, &olen, ckey, ckey_len); -+ if (ret) { -+ char buf[256]; -+ -+ mbedtls_strerror(ret, buf, sizeof(buf)); -+ fprintf(stderr, "unexpected base64 error2: %s (%d)\n", buf, -+ ret); -+ -+ goto err; -+ } -+ -+ if (olen < 2 * KEY_LEN + NONCE_LEN) { -+ fprintf(stderr, -+ "not enough data in key file, found %zdB but needs %dB\n", -+ olen, 2 * KEY_LEN + NONCE_LEN); -+ goto err; -+ } -+ -+ switch (ctx->key_dir) { -+ case KEY_DIR_IN: -+ idx_enc = 0; -+ idx_dec = 1; -+ break; -+ case KEY_DIR_OUT: -+ idx_enc = 1; -+ idx_dec = 0; -+ break; -+ default: -+ goto err; -+ } -+ -+ memcpy(ctx->key_enc, bkey + KEY_LEN * idx_enc, KEY_LEN); -+ memcpy(ctx->key_dec, bkey + KEY_LEN * idx_dec, KEY_LEN); -+ memcpy(ctx->nonce, bkey + 2 * KEY_LEN, NONCE_LEN); -+ -+ ret = 0; -+ -+err: -+ fclose(fp); -+ free(bkey); -+ free(ckey); -+ -+ return ret; -+} -+ -+static int ovpn_parse_cipher(const char *cipher, struct ovpn_ctx *ctx) -+{ -+ if (strcmp(cipher, "aes") == 0) -+ ctx->cipher = OVPN_CIPHER_ALG_AES_GCM; -+ else if (strcmp(cipher, "chachapoly") == 0) -+ ctx->cipher = OVPN_CIPHER_ALG_CHACHA20_POLY1305; -+ else if (strcmp(cipher, "none") == 0) -+ ctx->cipher = OVPN_CIPHER_ALG_NONE; -+ else -+ return -ENOTSUP; -+ -+ return 0; -+} -+ -+static int ovpn_parse_key_direction(const char *dir, struct ovpn_ctx *ctx) -+{ -+ int in_dir; -+ -+ in_dir = strtoll(dir, NULL, 10); -+ switch (in_dir) { -+ case KEY_DIR_IN: -+ case KEY_DIR_OUT: -+ ctx->key_dir = in_dir; -+ break; -+ default: -+ fprintf(stderr, -+ "invalid key direction provided. Can be 0 or 1 only\n"); -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static int ovpn_socket(struct ovpn_ctx *ctx, sa_family_t family, int proto) -+{ -+ struct sockaddr_storage local_sock = { 0 }; -+ struct sockaddr_in6 *in6; -+ struct sockaddr_in *in; -+ int ret, s, sock_type; -+ size_t sock_len; -+ -+ if (proto == IPPROTO_UDP) -+ sock_type = SOCK_DGRAM; -+ else if (proto == IPPROTO_TCP) -+ sock_type = SOCK_STREAM; -+ else -+ return -EINVAL; -+ -+ s = socket(family, sock_type, 0); -+ if (s < 0) { -+ perror("cannot create socket"); -+ return -1; -+ } -+ -+ switch (family) { -+ case AF_INET: -+ in = (struct sockaddr_in *)&local_sock; -+ in->sin_family = family; -+ in->sin_port = htons(ctx->lport); -+ in->sin_addr.s_addr = htonl(INADDR_ANY); -+ sock_len = sizeof(*in); -+ break; -+ case AF_INET6: -+ in6 = (struct sockaddr_in6 *)&local_sock; -+ in6->sin6_family = family; -+ in6->sin6_port = htons(ctx->lport); -+ in6->sin6_addr = in6addr_any; -+ sock_len = sizeof(*in6); -+ break; -+ default: -+ return -1; -+ } -+ -+ int opt = 1; -+ -+ ret = setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); -+ -+ if (ret < 0) { -+ perror("setsockopt for SO_REUSEADDR"); -+ return ret; -+ } -+ -+ ret = setsockopt(s, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)); -+ if (ret < 0) { -+ perror("setsockopt for SO_REUSEPORT"); -+ return ret; -+ } -+ -+ if (family == AF_INET6) { -+ opt = 0; -+ if (setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, &opt, -+ sizeof(opt))) { -+ perror("failed to set IPV6_V6ONLY"); -+ return -1; -+ } -+ } -+ -+ ret = bind(s, (struct sockaddr *)&local_sock, sock_len); -+ if (ret < 0) { -+ perror("cannot bind socket"); -+ goto err_socket; -+ } -+ -+ ctx->socket = s; -+ ctx->sa_family = family; -+ return 0; -+ -+err_socket: -+ close(s); -+ return -1; -+} -+ -+static int ovpn_udp_socket(struct ovpn_ctx *ctx, sa_family_t family) -+{ -+ return ovpn_socket(ctx, family, IPPROTO_UDP); -+} -+ -+static int ovpn_listen(struct ovpn_ctx *ctx, sa_family_t family) -+{ -+ int ret; -+ -+ ret = ovpn_socket(ctx, family, IPPROTO_TCP); -+ if (ret < 0) -+ return ret; -+ -+ ret = listen(ctx->socket, 10); -+ if (ret < 0) { -+ perror("listen"); -+ close(ctx->socket); -+ return -1; -+ } -+ -+ return 0; -+} -+ -+static int ovpn_accept(struct ovpn_ctx *ctx) -+{ -+ socklen_t socklen; -+ int ret; -+ -+ socklen = sizeof(ctx->remote); -+ ret = accept(ctx->socket, (struct sockaddr *)&ctx->remote, &socklen); -+ if (ret < 0) { -+ perror("accept"); -+ goto err; -+ } -+ -+ fprintf(stderr, "Connection received!\n"); -+ -+ switch (socklen) { -+ case sizeof(struct sockaddr_in): -+ case sizeof(struct sockaddr_in6): -+ break; -+ default: -+ fprintf(stderr, "error: expecting IPv4 or IPv6 connection\n"); -+ close(ret); -+ ret = -EINVAL; -+ goto err; -+ } -+ -+ return ret; -+err: -+ close(ctx->socket); -+ return ret; -+} -+ -+static int ovpn_connect(struct ovpn_ctx *ovpn) -+{ -+ socklen_t socklen; -+ int s, ret; -+ -+ s = socket(ovpn->remote.in4.sin_family, SOCK_STREAM, 0); -+ if (s < 0) { -+ perror("cannot create socket"); -+ return -1; -+ } -+ -+ switch (ovpn->remote.in4.sin_family) { -+ case AF_INET: -+ socklen = sizeof(struct sockaddr_in); -+ break; -+ case AF_INET6: -+ socklen = sizeof(struct sockaddr_in6); -+ break; -+ default: -+ return -EOPNOTSUPP; -+ } -+ -+ ret = connect(s, (struct sockaddr *)&ovpn->remote, socklen); -+ if (ret < 0) { -+ perror("connect"); -+ goto err; -+ } -+ -+ fprintf(stderr, "connected\n"); -+ -+ ovpn->socket = s; -+ -+ return 0; -+err: -+ close(s); -+ return ret; -+} -+ -+static int ovpn_new_peer(struct ovpn_ctx *ovpn, bool is_tcp) -+{ -+ struct nlattr *attr; -+ struct nl_ctx *ctx; -+ int ret = -1; -+ -+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_PEER_NEW); -+ if (!ctx) -+ return -ENOMEM; -+ -+ attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_SOCKET, ovpn->socket); -+ -+ if (!is_tcp) { -+ switch (ovpn->remote.in4.sin_family) { -+ case AF_INET: -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_REMOTE_IPV4, -+ ovpn->remote.in4.sin_addr.s_addr); -+ NLA_PUT_U16(ctx->nl_msg, OVPN_A_PEER_REMOTE_PORT, -+ ovpn->remote.in4.sin_port); -+ break; -+ case AF_INET6: -+ NLA_PUT(ctx->nl_msg, OVPN_A_PEER_REMOTE_IPV6, -+ sizeof(ovpn->remote.in6.sin6_addr), -+ &ovpn->remote.in6.sin6_addr); -+ NLA_PUT_U32(ctx->nl_msg, -+ OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID, -+ ovpn->remote.in6.sin6_scope_id); -+ NLA_PUT_U16(ctx->nl_msg, OVPN_A_PEER_REMOTE_PORT, -+ ovpn->remote.in6.sin6_port); -+ break; -+ default: -+ fprintf(stderr, -+ "Invalid family for remote socket address\n"); -+ goto nla_put_failure; -+ } -+ } -+ -+ if (ovpn->peer_ip_set) { -+ switch (ovpn->peer_ip.in4.sin_family) { -+ case AF_INET: -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_VPN_IPV4, -+ ovpn->peer_ip.in4.sin_addr.s_addr); -+ break; -+ case AF_INET6: -+ NLA_PUT(ctx->nl_msg, OVPN_A_PEER_VPN_IPV6, -+ sizeof(struct in6_addr), -+ &ovpn->peer_ip.in6.sin6_addr); -+ break; -+ default: -+ fprintf(stderr, "Invalid family for peer address\n"); -+ goto nla_put_failure; -+ } -+ } -+ -+ nla_nest_end(ctx->nl_msg, attr); -+ -+ ret = ovpn_nl_msg_send(ctx, NULL); -+nla_put_failure: -+ nl_ctx_free(ctx); -+ return ret; -+} -+ -+static int ovpn_set_peer(struct ovpn_ctx *ovpn) -+{ -+ struct nlattr *attr; -+ struct nl_ctx *ctx; -+ int ret = -1; -+ -+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_PEER_SET); -+ if (!ctx) -+ return -ENOMEM; -+ -+ attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_KEEPALIVE_INTERVAL, -+ ovpn->keepalive_interval); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_KEEPALIVE_TIMEOUT, -+ ovpn->keepalive_timeout); -+ nla_nest_end(ctx->nl_msg, attr); -+ -+ ret = ovpn_nl_msg_send(ctx, NULL); -+nla_put_failure: -+ nl_ctx_free(ctx); -+ return ret; -+} -+ -+static int ovpn_del_peer(struct ovpn_ctx *ovpn) -+{ -+ struct nlattr *attr; -+ struct nl_ctx *ctx; -+ int ret = -1; -+ -+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_PEER_DEL); -+ if (!ctx) -+ return -ENOMEM; -+ -+ attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id); -+ nla_nest_end(ctx->nl_msg, attr); -+ -+ ret = ovpn_nl_msg_send(ctx, NULL); -+nla_put_failure: -+ nl_ctx_free(ctx); -+ return ret; -+} -+ -+static int ovpn_handle_peer(struct nl_msg *msg, void (*arg)__always_unused) -+{ -+ struct nlattr *pattrs[OVPN_A_PEER_MAX + 1]; -+ struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg)); -+ struct nlattr *attrs[OVPN_A_MAX + 1]; -+ __u16 rport = 0, lport = 0; -+ -+ nla_parse(attrs, OVPN_A_MAX, genlmsg_attrdata(gnlh, 0), -+ genlmsg_attrlen(gnlh, 0), NULL); -+ -+ if (!attrs[OVPN_A_PEER]) { -+ fprintf(stderr, "no packet content in netlink message\n"); -+ return NL_SKIP; -+ } -+ -+ nla_parse(pattrs, OVPN_A_PEER_MAX, nla_data(attrs[OVPN_A_PEER]), -+ nla_len(attrs[OVPN_A_PEER]), NULL); -+ -+ if (pattrs[OVPN_A_PEER_ID]) -+ fprintf(stderr, "* Peer %u\n", -+ nla_get_u32(pattrs[OVPN_A_PEER_ID])); -+ -+ if (pattrs[OVPN_A_PEER_VPN_IPV4]) { -+ char buf[INET_ADDRSTRLEN]; -+ -+ inet_ntop(AF_INET, nla_data(pattrs[OVPN_A_PEER_VPN_IPV4]), -+ buf, sizeof(buf)); -+ fprintf(stderr, "\tVPN IPv4: %s\n", buf); -+ } -+ -+ if (pattrs[OVPN_A_PEER_VPN_IPV6]) { -+ char buf[INET6_ADDRSTRLEN]; -+ -+ inet_ntop(AF_INET6, nla_data(pattrs[OVPN_A_PEER_VPN_IPV6]), -+ buf, sizeof(buf)); -+ fprintf(stderr, "\tVPN IPv6: %s\n", buf); -+ } -+ -+ if (pattrs[OVPN_A_PEER_LOCAL_PORT]) -+ lport = ntohs(nla_get_u16(pattrs[OVPN_A_PEER_LOCAL_PORT])); -+ -+ if (pattrs[OVPN_A_PEER_REMOTE_PORT]) -+ rport = ntohs(nla_get_u16(pattrs[OVPN_A_PEER_REMOTE_PORT])); -+ -+ if (pattrs[OVPN_A_PEER_REMOTE_IPV6]) { -+ void *ip = pattrs[OVPN_A_PEER_REMOTE_IPV6]; -+ char buf[INET6_ADDRSTRLEN]; -+ int scope_id = -1; -+ -+ if (pattrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID]) { -+ void *p = pattrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID]; -+ -+ scope_id = nla_get_u32(p); -+ } -+ -+ inet_ntop(AF_INET6, nla_data(ip), buf, sizeof(buf)); -+ fprintf(stderr, "\tRemote: %s:%hu (scope-id: %u)\n", buf, rport, -+ scope_id); -+ -+ if (pattrs[OVPN_A_PEER_LOCAL_IPV6]) { -+ void *ip = pattrs[OVPN_A_PEER_LOCAL_IPV6]; -+ -+ inet_ntop(AF_INET6, nla_data(ip), buf, sizeof(buf)); -+ fprintf(stderr, "\tLocal: %s:%hu\n", buf, lport); -+ } -+ } -+ -+ if (pattrs[OVPN_A_PEER_REMOTE_IPV4]) { -+ void *ip = pattrs[OVPN_A_PEER_REMOTE_IPV4]; -+ char buf[INET_ADDRSTRLEN]; -+ -+ inet_ntop(AF_INET, nla_data(ip), buf, sizeof(buf)); -+ fprintf(stderr, "\tRemote: %s:%hu\n", buf, rport); -+ -+ if (pattrs[OVPN_A_PEER_LOCAL_IPV4]) { -+ void *p = pattrs[OVPN_A_PEER_LOCAL_IPV4]; -+ -+ inet_ntop(AF_INET, nla_data(p), buf, sizeof(buf)); -+ fprintf(stderr, "\tLocal: %s:%hu\n", buf, lport); -+ } -+ } -+ -+ if (pattrs[OVPN_A_PEER_KEEPALIVE_INTERVAL]) { -+ void *p = pattrs[OVPN_A_PEER_KEEPALIVE_INTERVAL]; -+ -+ fprintf(stderr, "\tKeepalive interval: %u sec\n", -+ nla_get_u32(p)); -+ } -+ -+ if (pattrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) -+ fprintf(stderr, "\tKeepalive timeout: %u sec\n", -+ nla_get_u32(pattrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT])); -+ -+ if (pattrs[OVPN_A_PEER_VPN_RX_BYTES]) -+ fprintf(stderr, "\tVPN RX bytes: %" PRIu64 "\n", -+ nla_get_uint(pattrs[OVPN_A_PEER_VPN_RX_BYTES])); -+ -+ if (pattrs[OVPN_A_PEER_VPN_TX_BYTES]) -+ fprintf(stderr, "\tVPN TX bytes: %" PRIu64 "\n", -+ nla_get_uint(pattrs[OVPN_A_PEER_VPN_TX_BYTES])); -+ -+ if (pattrs[OVPN_A_PEER_VPN_RX_PACKETS]) -+ fprintf(stderr, "\tVPN RX packets: %" PRIu64 "\n", -+ nla_get_uint(pattrs[OVPN_A_PEER_VPN_RX_PACKETS])); -+ -+ if (pattrs[OVPN_A_PEER_VPN_TX_PACKETS]) -+ fprintf(stderr, "\tVPN TX packets: %" PRIu64 "\n", -+ nla_get_uint(pattrs[OVPN_A_PEER_VPN_TX_PACKETS])); -+ -+ if (pattrs[OVPN_A_PEER_LINK_RX_BYTES]) -+ fprintf(stderr, "\tLINK RX bytes: %" PRIu64 "\n", -+ nla_get_uint(pattrs[OVPN_A_PEER_LINK_RX_BYTES])); -+ -+ if (pattrs[OVPN_A_PEER_LINK_TX_BYTES]) -+ fprintf(stderr, "\tLINK TX bytes: %" PRIu64 "\n", -+ nla_get_uint(pattrs[OVPN_A_PEER_LINK_TX_BYTES])); -+ -+ if (pattrs[OVPN_A_PEER_LINK_RX_PACKETS]) -+ fprintf(stderr, "\tLINK RX packets: %" PRIu64 "\n", -+ nla_get_uint(pattrs[OVPN_A_PEER_LINK_RX_PACKETS])); -+ -+ if (pattrs[OVPN_A_PEER_LINK_TX_PACKETS]) -+ fprintf(stderr, "\tLINK TX packets: %" PRIu64 "\n", -+ nla_get_uint(pattrs[OVPN_A_PEER_LINK_TX_PACKETS])); -+ -+ return NL_SKIP; -+} -+ -+static int ovpn_get_peer(struct ovpn_ctx *ovpn) -+{ -+ int flags = 0, ret = -1; -+ struct nlattr *attr; -+ struct nl_ctx *ctx; -+ -+ if (ovpn->peer_id == PEER_ID_UNDEF) -+ flags = NLM_F_DUMP; -+ -+ ctx = nl_ctx_alloc_flags(ovpn, OVPN_CMD_PEER_GET, flags); -+ if (!ctx) -+ return -ENOMEM; -+ -+ if (ovpn->peer_id != PEER_ID_UNDEF) { -+ attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id); -+ nla_nest_end(ctx->nl_msg, attr); -+ } -+ -+ ret = ovpn_nl_msg_send(ctx, ovpn_handle_peer); -+nla_put_failure: -+ nl_ctx_free(ctx); -+ return ret; -+} -+ -+static int ovpn_new_key(struct ovpn_ctx *ovpn) -+{ -+ struct nlattr *keyconf, *key_dir; -+ struct nl_ctx *ctx; -+ int ret = -1; -+ -+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_NEW); -+ if (!ctx) -+ return -ENOMEM; -+ -+ keyconf = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_SLOT, ovpn->key_slot); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_KEY_ID, ovpn->key_id); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_CIPHER_ALG, ovpn->cipher); -+ -+ key_dir = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF_ENCRYPT_DIR); -+ NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_CIPHER_KEY, KEY_LEN, ovpn->key_enc); -+ NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_NONCE_TAIL, NONCE_LEN, ovpn->nonce); -+ nla_nest_end(ctx->nl_msg, key_dir); -+ -+ key_dir = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF_DECRYPT_DIR); -+ NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_CIPHER_KEY, KEY_LEN, ovpn->key_dec); -+ NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_NONCE_TAIL, NONCE_LEN, ovpn->nonce); -+ nla_nest_end(ctx->nl_msg, key_dir); -+ -+ nla_nest_end(ctx->nl_msg, keyconf); -+ -+ ret = ovpn_nl_msg_send(ctx, NULL); -+nla_put_failure: -+ nl_ctx_free(ctx); -+ return ret; -+} -+ -+static int ovpn_del_key(struct ovpn_ctx *ovpn) -+{ -+ struct nlattr *keyconf; -+ struct nl_ctx *ctx; -+ int ret = -1; -+ -+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_DEL); -+ if (!ctx) -+ return -ENOMEM; -+ -+ keyconf = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_SLOT, ovpn->key_slot); -+ nla_nest_end(ctx->nl_msg, keyconf); -+ -+ ret = ovpn_nl_msg_send(ctx, NULL); -+nla_put_failure: -+ nl_ctx_free(ctx); -+ return ret; -+} -+ -+static int ovpn_handle_key(struct nl_msg *msg, void (*arg)__always_unused) -+{ -+ struct nlattr *kattrs[OVPN_A_KEYCONF_MAX + 1]; -+ struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg)); -+ struct nlattr *attrs[OVPN_A_MAX + 1]; -+ -+ nla_parse(attrs, OVPN_A_MAX, genlmsg_attrdata(gnlh, 0), -+ genlmsg_attrlen(gnlh, 0), NULL); -+ -+ if (!attrs[OVPN_A_KEYCONF]) { -+ fprintf(stderr, "no packet content in netlink message\n"); -+ return NL_SKIP; -+ } -+ -+ nla_parse(kattrs, OVPN_A_KEYCONF_MAX, nla_data(attrs[OVPN_A_KEYCONF]), -+ nla_len(attrs[OVPN_A_KEYCONF]), NULL); -+ -+ if (kattrs[OVPN_A_KEYCONF_PEER_ID]) -+ fprintf(stderr, "* Peer %u\n", -+ nla_get_u32(kattrs[OVPN_A_KEYCONF_PEER_ID])); -+ if (kattrs[OVPN_A_KEYCONF_SLOT]) { -+ fprintf(stderr, "\t- Slot: "); -+ switch (nla_get_u32(kattrs[OVPN_A_KEYCONF_SLOT])) { -+ case OVPN_KEY_SLOT_PRIMARY: -+ fprintf(stderr, "primary\n"); -+ break; -+ case OVPN_KEY_SLOT_SECONDARY: -+ fprintf(stderr, "secondary\n"); -+ break; -+ default: -+ fprintf(stderr, "invalid (%u)\n", -+ nla_get_u32(kattrs[OVPN_A_KEYCONF_SLOT])); -+ break; -+ } -+ } -+ if (kattrs[OVPN_A_KEYCONF_KEY_ID]) -+ fprintf(stderr, "\t- Key ID: %u\n", -+ nla_get_u32(kattrs[OVPN_A_KEYCONF_KEY_ID])); -+ if (kattrs[OVPN_A_KEYCONF_CIPHER_ALG]) { -+ fprintf(stderr, "\t- Cipher: "); -+ switch (nla_get_u32(kattrs[OVPN_A_KEYCONF_CIPHER_ALG])) { -+ case OVPN_CIPHER_ALG_NONE: -+ fprintf(stderr, "none\n"); -+ break; -+ case OVPN_CIPHER_ALG_AES_GCM: -+ fprintf(stderr, "aes-gcm\n"); -+ break; -+ case OVPN_CIPHER_ALG_CHACHA20_POLY1305: -+ fprintf(stderr, "chacha20poly1305\n"); -+ break; -+ default: -+ fprintf(stderr, "invalid (%u)\n", -+ nla_get_u32(kattrs[OVPN_A_KEYCONF_CIPHER_ALG])); -+ break; -+ } -+ } -+ -+ return NL_SKIP; -+} -+ -+static int ovpn_get_key(struct ovpn_ctx *ovpn) -+{ -+ struct nlattr *keyconf; -+ struct nl_ctx *ctx; -+ int ret = -1; -+ -+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_GET); -+ if (!ctx) -+ return -ENOMEM; -+ -+ keyconf = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_SLOT, ovpn->key_slot); -+ nla_nest_end(ctx->nl_msg, keyconf); -+ -+ ret = ovpn_nl_msg_send(ctx, ovpn_handle_key); -+nla_put_failure: -+ nl_ctx_free(ctx); -+ return ret; -+} -+ -+static int ovpn_swap_keys(struct ovpn_ctx *ovpn) -+{ -+ struct nl_ctx *ctx; -+ struct nlattr *kc; -+ int ret = -1; -+ -+ ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_SWAP); -+ if (!ctx) -+ return -ENOMEM; -+ -+ kc = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF); -+ NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id); -+ nla_nest_end(ctx->nl_msg, kc); -+ -+ ret = ovpn_nl_msg_send(ctx, NULL); -+nla_put_failure: -+ nl_ctx_free(ctx); -+ return ret; -+} -+ -+/** -+ * Helper function used to easily add attributes to a rtnl message -+ */ -+static int ovpn_addattr(struct nlmsghdr *n, int maxlen, int type, -+ const void *data, int alen) -+{ -+ int len = RTA_LENGTH(alen); -+ struct rtattr *rta; -+ -+ if ((int)(NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len)) > maxlen) { -+ fprintf(stderr, "%s: rtnl: message exceeded bound of %d\n", -+ __func__, maxlen); -+ return -EMSGSIZE; -+ } -+ -+ rta = nlmsg_tail(n); -+ rta->rta_type = type; -+ rta->rta_len = len; -+ -+ if (!data) -+ memset(RTA_DATA(rta), 0, alen); -+ else -+ memcpy(RTA_DATA(rta), data, alen); -+ -+ n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len); -+ -+ return 0; -+} -+ -+static struct rtattr *ovpn_nest_start(struct nlmsghdr *msg, size_t max_size, -+ int attr) -+{ -+ struct rtattr *nest = nlmsg_tail(msg); -+ -+ if (ovpn_addattr(msg, max_size, attr, NULL, 0) < 0) -+ return NULL; -+ -+ return nest; -+} -+ -+static void ovpn_nest_end(struct nlmsghdr *msg, struct rtattr *nest) -+{ -+ nest->rta_len = (uint8_t *)nlmsg_tail(msg) - (uint8_t *)nest; -+} -+ -+#define RT_SNDBUF_SIZE (1024 * 2) -+#define RT_RCVBUF_SIZE (1024 * 4) -+ -+/** -+ * Open RTNL socket -+ */ -+static int ovpn_rt_socket(void) -+{ -+ int sndbuf = RT_SNDBUF_SIZE, rcvbuf = RT_RCVBUF_SIZE, fd; -+ -+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); -+ if (fd < 0) { -+ fprintf(stderr, "%s: cannot open netlink socket\n", __func__); -+ return fd; -+ } -+ -+ if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, -+ sizeof(sndbuf)) < 0) { -+ fprintf(stderr, "%s: SO_SNDBUF\n", __func__); -+ close(fd); -+ return -1; -+ } -+ -+ if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf, -+ sizeof(rcvbuf)) < 0) { -+ fprintf(stderr, "%s: SO_RCVBUF\n", __func__); -+ close(fd); -+ return -1; -+ } -+ -+ return fd; -+} -+ -+/** -+ * Bind socket to Netlink subsystem -+ */ -+static int ovpn_rt_bind(int fd, uint32_t groups) -+{ -+ struct sockaddr_nl local = { 0 }; -+ socklen_t addr_len; -+ -+ local.nl_family = AF_NETLINK; -+ local.nl_groups = groups; -+ -+ if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) { -+ fprintf(stderr, "%s: cannot bind netlink socket: %d\n", -+ __func__, errno); -+ return -errno; -+ } -+ -+ addr_len = sizeof(local); -+ if (getsockname(fd, (struct sockaddr *)&local, &addr_len) < 0) { -+ fprintf(stderr, "%s: cannot getsockname: %d\n", __func__, -+ errno); -+ return -errno; -+ } -+ -+ if (addr_len != sizeof(local)) { -+ fprintf(stderr, "%s: wrong address length %d\n", __func__, -+ addr_len); -+ return -EINVAL; -+ } -+ -+ if (local.nl_family != AF_NETLINK) { -+ fprintf(stderr, "%s: wrong address family %d\n", __func__, -+ local.nl_family); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+typedef int (*ovpn_parse_reply_cb)(struct nlmsghdr *msg, void *arg); -+ -+/** -+ * Send Netlink message and run callback on reply (if specified) -+ */ -+static int ovpn_rt_send(struct nlmsghdr *payload, pid_t peer, -+ unsigned int groups, ovpn_parse_reply_cb cb, -+ void *arg_cb) -+{ -+ int len, rem_len, fd, ret, rcv_len; -+ struct sockaddr_nl nladdr = { 0 }; -+ struct nlmsgerr *err; -+ struct nlmsghdr *h; -+ char buf[1024 * 16]; -+ struct iovec iov = { -+ .iov_base = payload, -+ .iov_len = payload->nlmsg_len, -+ }; -+ struct msghdr nlmsg = { -+ .msg_name = &nladdr, -+ .msg_namelen = sizeof(nladdr), -+ .msg_iov = &iov, -+ .msg_iovlen = 1, -+ }; -+ -+ nladdr.nl_family = AF_NETLINK; -+ nladdr.nl_pid = peer; -+ nladdr.nl_groups = groups; -+ -+ payload->nlmsg_seq = time(NULL); -+ -+ /* no need to send reply */ -+ if (!cb) -+ payload->nlmsg_flags |= NLM_F_ACK; -+ -+ fd = ovpn_rt_socket(); -+ if (fd < 0) { -+ fprintf(stderr, "%s: can't open rtnl socket\n", __func__); -+ return -errno; -+ } -+ -+ ret = ovpn_rt_bind(fd, 0); -+ if (ret < 0) { -+ fprintf(stderr, "%s: can't bind rtnl socket\n", __func__); -+ ret = -errno; -+ goto out; -+ } -+ -+ ret = sendmsg(fd, &nlmsg, 0); -+ if (ret < 0) { -+ fprintf(stderr, "%s: rtnl: error on sendmsg()\n", __func__); -+ ret = -errno; -+ goto out; -+ } -+ -+ /* prepare buffer to store RTNL replies */ -+ memset(buf, 0, sizeof(buf)); -+ iov.iov_base = buf; -+ -+ while (1) { -+ /* -+ * iov_len is modified by recvmsg(), therefore has to be initialized before -+ * using it again -+ */ -+ iov.iov_len = sizeof(buf); -+ rcv_len = recvmsg(fd, &nlmsg, 0); -+ if (rcv_len < 0) { -+ if (errno == EINTR || errno == EAGAIN) { -+ fprintf(stderr, "%s: interrupted call\n", -+ __func__); -+ continue; -+ } -+ fprintf(stderr, "%s: rtnl: error on recvmsg()\n", -+ __func__); -+ ret = -errno; -+ goto out; -+ } -+ -+ if (rcv_len == 0) { -+ fprintf(stderr, -+ "%s: rtnl: socket reached unexpected EOF\n", -+ __func__); -+ ret = -EIO; -+ goto out; -+ } -+ -+ if (nlmsg.msg_namelen != sizeof(nladdr)) { -+ fprintf(stderr, -+ "%s: sender address length: %u (expected %zu)\n", -+ __func__, nlmsg.msg_namelen, sizeof(nladdr)); -+ ret = -EIO; -+ goto out; -+ } -+ -+ h = (struct nlmsghdr *)buf; -+ while (rcv_len >= (int)sizeof(*h)) { -+ len = h->nlmsg_len; -+ rem_len = len - sizeof(*h); -+ -+ if (rem_len < 0 || len > rcv_len) { -+ if (nlmsg.msg_flags & MSG_TRUNC) { -+ fprintf(stderr, "%s: truncated message\n", -+ __func__); -+ ret = -EIO; -+ goto out; -+ } -+ fprintf(stderr, "%s: malformed message: len=%d\n", -+ __func__, len); -+ ret = -EIO; -+ goto out; -+ } -+ -+ if (h->nlmsg_type == NLMSG_DONE) { -+ ret = 0; -+ goto out; -+ } -+ -+ if (h->nlmsg_type == NLMSG_ERROR) { -+ err = (struct nlmsgerr *)NLMSG_DATA(h); -+ if (rem_len < (int)sizeof(struct nlmsgerr)) { -+ fprintf(stderr, "%s: ERROR truncated\n", -+ __func__); -+ ret = -EIO; -+ goto out; -+ } -+ -+ if (err->error) { -+ fprintf(stderr, "%s: (%d) %s\n", -+ __func__, err->error, -+ strerror(-err->error)); -+ ret = err->error; -+ goto out; -+ } -+ -+ ret = 0; -+ if (cb) { -+ int r = cb(h, arg_cb); -+ -+ if (r <= 0) -+ ret = r; -+ } -+ goto out; -+ } -+ -+ if (cb) { -+ int r = cb(h, arg_cb); -+ -+ if (r <= 0) { -+ ret = r; -+ goto out; -+ } -+ } else { -+ fprintf(stderr, "%s: RTNL: unexpected reply\n", -+ __func__); -+ } -+ -+ rcv_len -= NLMSG_ALIGN(len); -+ h = (struct nlmsghdr *)((uint8_t *)h + -+ NLMSG_ALIGN(len)); -+ } -+ -+ if (nlmsg.msg_flags & MSG_TRUNC) { -+ fprintf(stderr, "%s: message truncated\n", __func__); -+ continue; -+ } -+ -+ if (rcv_len) { -+ fprintf(stderr, "%s: rtnl: %d not parsed bytes\n", -+ __func__, rcv_len); -+ ret = -1; -+ goto out; -+ } -+ } -+out: -+ close(fd); -+ -+ return ret; -+} -+ -+struct ovpn_link_req { -+ struct nlmsghdr n; -+ struct ifinfomsg i; -+ char buf[256]; -+}; -+ -+static int ovpn_new_iface(struct ovpn_ctx *ovpn) -+{ -+ struct rtattr *linkinfo, *data; -+ struct ovpn_link_req req = { 0 }; -+ int ret = -1; -+ -+ fprintf(stdout, "Creating interface %s with mode %u\n", ovpn->ifname, -+ ovpn->mode); -+ -+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(req.i)); -+ req.n.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; -+ req.n.nlmsg_type = RTM_NEWLINK; -+ -+ if (ovpn_addattr(&req.n, sizeof(req), IFLA_IFNAME, ovpn->ifname, -+ strlen(ovpn->ifname) + 1) < 0) -+ goto err; -+ -+ linkinfo = ovpn_nest_start(&req.n, sizeof(req), IFLA_LINKINFO); -+ if (!linkinfo) -+ goto err; -+ -+ if (ovpn_addattr(&req.n, sizeof(req), IFLA_INFO_KIND, OVPN_FAMILY_NAME, -+ strlen(OVPN_FAMILY_NAME) + 1) < 0) -+ goto err; -+ -+ if (ovpn->mode_set) { -+ data = ovpn_nest_start(&req.n, sizeof(req), IFLA_INFO_DATA); -+ if (!data) -+ goto err; -+ -+ if (ovpn_addattr(&req.n, sizeof(req), IFLA_OVPN_MODE, -+ &ovpn->mode, sizeof(uint8_t)) < 0) -+ goto err; -+ -+ ovpn_nest_end(&req.n, data); -+ } -+ -+ ovpn_nest_end(&req.n, linkinfo); -+ -+ req.i.ifi_family = AF_PACKET; -+ -+ ret = ovpn_rt_send(&req.n, 0, 0, NULL, NULL); -+err: -+ return ret; -+} -+ -+static int ovpn_del_iface(struct ovpn_ctx *ovpn) -+{ -+ struct ovpn_link_req req = { 0 }; -+ -+ fprintf(stdout, "Deleting interface %s ifindex %u\n", ovpn->ifname, -+ ovpn->ifindex); -+ -+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(req.i)); -+ req.n.nlmsg_flags = NLM_F_REQUEST; -+ req.n.nlmsg_type = RTM_DELLINK; -+ -+ req.i.ifi_family = AF_PACKET; -+ req.i.ifi_index = ovpn->ifindex; -+ -+ return ovpn_rt_send(&req.n, 0, 0, NULL, NULL); -+} -+ -+static int nl_seq_check(struct nl_msg (*msg)__always_unused, -+ void (*arg)__always_unused) -+{ -+ return NL_OK; -+} -+ -+struct mcast_handler_args { -+ const char *group; -+ int id; -+}; -+ -+static int mcast_family_handler(struct nl_msg *msg, void *arg) -+{ -+ struct mcast_handler_args *grp = arg; -+ struct nlattr *tb[CTRL_ATTR_MAX + 1]; -+ struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg)); -+ struct nlattr *mcgrp; -+ int rem_mcgrp; -+ -+ nla_parse(tb, CTRL_ATTR_MAX, genlmsg_attrdata(gnlh, 0), -+ genlmsg_attrlen(gnlh, 0), NULL); -+ -+ if (!tb[CTRL_ATTR_MCAST_GROUPS]) -+ return NL_SKIP; -+ -+ nla_for_each_nested(mcgrp, tb[CTRL_ATTR_MCAST_GROUPS], rem_mcgrp) { -+ struct nlattr *tb_mcgrp[CTRL_ATTR_MCAST_GRP_MAX + 1]; -+ -+ nla_parse(tb_mcgrp, CTRL_ATTR_MCAST_GRP_MAX, -+ nla_data(mcgrp), nla_len(mcgrp), NULL); -+ -+ if (!tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME] || -+ !tb_mcgrp[CTRL_ATTR_MCAST_GRP_ID]) -+ continue; -+ if (strncmp(nla_data(tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME]), -+ grp->group, nla_len(tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME]))) -+ continue; -+ grp->id = nla_get_u32(tb_mcgrp[CTRL_ATTR_MCAST_GRP_ID]); -+ break; -+ } -+ -+ return NL_SKIP; -+} -+ -+static int mcast_error_handler(struct sockaddr_nl (*nla)__always_unused, -+ struct nlmsgerr *err, void *arg) -+{ -+ int *ret = arg; -+ -+ *ret = err->error; -+ return NL_STOP; -+} -+ -+static int mcast_ack_handler(struct nl_msg (*msg)__always_unused, void *arg) -+{ -+ int *ret = arg; -+ -+ *ret = 0; -+ return NL_STOP; -+} -+ -+static int ovpn_handle_msg(struct nl_msg *msg, void *arg) -+{ -+ struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg)); -+ struct nlattr *attrs[OVPN_A_MAX + 1]; -+ struct nlmsghdr *nlh = nlmsg_hdr(msg); -+ //enum ovpn_del_peer_reason reason; -+ char ifname[IF_NAMESIZE]; -+ int *ret = arg; -+ __u32 ifindex; -+ -+ fprintf(stderr, "received message from ovpn-dco\n"); -+ -+ *ret = -1; -+ -+ if (!genlmsg_valid_hdr(nlh, 0)) { -+ fprintf(stderr, "invalid header\n"); -+ return NL_STOP; -+ } -+ -+ if (nla_parse(attrs, OVPN_A_MAX, genlmsg_attrdata(gnlh, 0), -+ genlmsg_attrlen(gnlh, 0), NULL)) { -+ fprintf(stderr, "received bogus data from ovpn-dco\n"); -+ return NL_STOP; -+ } -+ -+ if (!attrs[OVPN_A_IFINDEX]) { -+ fprintf(stderr, "no ifindex in this message\n"); -+ return NL_STOP; -+ } -+ -+ ifindex = nla_get_u32(attrs[OVPN_A_IFINDEX]); -+ if (!if_indextoname(ifindex, ifname)) { -+ fprintf(stderr, "cannot resolve ifname for ifindex: %u\n", -+ ifindex); -+ return NL_STOP; -+ } -+ -+ switch (gnlh->cmd) { -+ case OVPN_CMD_PEER_DEL_NTF: -+ /*if (!attrs[OVPN_A_DEL_PEER_REASON]) { -+ * fprintf(stderr, "no reason in DEL_PEER message\n"); -+ * return NL_STOP; -+ *} -+ * -+ *reason = nla_get_u8(attrs[OVPN_A_DEL_PEER_REASON]); -+ *fprintf(stderr, -+ * "received CMD_DEL_PEER, ifname: %s reason: %d\n", -+ * ifname, reason); -+ */ -+ fprintf(stdout, "received CMD_PEER_DEL_NTF\n"); -+ break; -+ case OVPN_CMD_KEY_SWAP_NTF: -+ fprintf(stdout, "received CMD_KEY_SWAP_NTF\n"); -+ break; -+ default: -+ fprintf(stderr, "received unknown command: %d\n", gnlh->cmd); -+ return NL_STOP; -+ } -+ -+ *ret = 0; -+ return NL_OK; -+} -+ -+static int ovpn_get_mcast_id(struct nl_sock *sock, const char *family, -+ const char *group) -+{ -+ struct nl_msg *msg; -+ struct nl_cb *cb; -+ int ret, ctrlid; -+ struct mcast_handler_args grp = { -+ .group = group, -+ .id = -ENOENT, -+ }; -+ -+ msg = nlmsg_alloc(); -+ if (!msg) -+ return -ENOMEM; -+ -+ cb = nl_cb_alloc(NL_CB_DEFAULT); -+ if (!cb) { -+ ret = -ENOMEM; -+ goto out_fail_cb; -+ } -+ -+ ctrlid = genl_ctrl_resolve(sock, "nlctrl"); -+ -+ genlmsg_put(msg, 0, 0, ctrlid, 0, 0, CTRL_CMD_GETFAMILY, 0); -+ -+ ret = -ENOBUFS; -+ NLA_PUT_STRING(msg, CTRL_ATTR_FAMILY_NAME, family); -+ -+ ret = nl_send_auto_complete(sock, msg); -+ if (ret < 0) -+ goto nla_put_failure; -+ -+ ret = 1; -+ -+ nl_cb_err(cb, NL_CB_CUSTOM, mcast_error_handler, &ret); -+ nl_cb_set(cb, NL_CB_ACK, NL_CB_CUSTOM, mcast_ack_handler, &ret); -+ nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, mcast_family_handler, &grp); -+ -+ while (ret > 0) -+ nl_recvmsgs(sock, cb); -+ -+ if (ret == 0) -+ ret = grp.id; -+ nla_put_failure: -+ nl_cb_put(cb); -+ out_fail_cb: -+ nlmsg_free(msg); -+ return ret; -+} -+ -+static int ovpn_listen_mcast(void) -+{ -+ struct nl_sock *sock; -+ struct nl_cb *cb; -+ int mcid, ret; -+ -+ sock = nl_socket_alloc(); -+ if (!sock) { -+ fprintf(stderr, "cannot allocate netlink socket\n"); -+ goto err_free; -+ } -+ -+ nl_socket_set_buffer_size(sock, 8192, 8192); -+ -+ ret = genl_connect(sock); -+ if (ret < 0) { -+ fprintf(stderr, "cannot connect to generic netlink: %s\n", -+ nl_geterror(ret)); -+ goto err_free; -+ } -+ -+ mcid = ovpn_get_mcast_id(sock, OVPN_FAMILY_NAME, OVPN_MCGRP_PEERS); -+ if (mcid < 0) { -+ fprintf(stderr, "cannot get mcast group: %s\n", -+ nl_geterror(mcid)); -+ goto err_free; -+ } -+ -+ ret = nl_socket_add_membership(sock, mcid); -+ if (ret) { -+ fprintf(stderr, "failed to join mcast group: %d\n", ret); -+ goto err_free; -+ } -+ -+ ret = 1; -+ cb = nl_cb_alloc(NL_CB_DEFAULT); -+ nl_cb_set(cb, NL_CB_SEQ_CHECK, NL_CB_CUSTOM, nl_seq_check, NULL); -+ nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, ovpn_handle_msg, &ret); -+ nl_cb_err(cb, NL_CB_CUSTOM, ovpn_nl_cb_error, &ret); -+ -+ while (ret == 1) { -+ int err = nl_recvmsgs(sock, cb); -+ -+ if (err < 0) { -+ fprintf(stderr, -+ "cannot receive netlink message: (%d) %s\n", -+ err, nl_geterror(-err)); -+ ret = -1; -+ break; -+ } -+ } -+ -+ nl_cb_put(cb); -+err_free: -+ nl_socket_free(sock); -+ return ret; -+} -+ -+static void usage(const char *cmd) -+{ -+ fprintf(stderr, -+ "Usage %s [arguments..]\n", -+ cmd); -+ fprintf(stderr, "where can be one of the following\n\n"); -+ -+ fprintf(stderr, "* new_iface [mode]: create new ovpn interface\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, "\tmode:\n"); -+ fprintf(stderr, "\t\t- P2P for peer-to-peer mode (i.e. client)\n"); -+ fprintf(stderr, "\t\t- MP for multi-peer mode (i.e. server)\n"); -+ -+ fprintf(stderr, "* del_iface : delete ovpn interface\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ -+ fprintf(stderr, -+ "* listen [ipv6]: listen for incoming peer TCP connections\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, "\tlport: TCP port to listen to\n"); -+ fprintf(stderr, -+ "\tpeers_file: file containing one peer per line: Line format:\n"); -+ fprintf(stderr, "\t\t \n"); -+ fprintf(stderr, -+ "\tipv6: whether the socket should listen to the IPv6 wildcard address\n"); -+ -+ fprintf(stderr, -+ "* connect [key_file]: start connecting peer of TCP-based VPN session\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, "\tpeer_id: peer ID of the connecting peer\n"); -+ fprintf(stderr, "\traddr: peer IP address to connect to\n"); -+ fprintf(stderr, "\trport: peer TCP port to connect to\n"); -+ fprintf(stderr, -+ "\tkey_file: file containing the symmetric key for encryption\n"); -+ -+ fprintf(stderr, -+ "* new_peer [vpnaddr]: add new peer\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, "\tlport: local UDP port to bind to\n"); -+ fprintf(stderr, -+ "\tpeer_id: peer ID to be used in data packets to/from this peer\n"); -+ fprintf(stderr, "\traddr: peer IP address\n"); -+ fprintf(stderr, "\trport: peer UDP port\n"); -+ fprintf(stderr, "\tvpnaddr: peer VPN IP\n"); -+ -+ fprintf(stderr, -+ "* new_multi_peer : add multiple peers as listed in the file\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, "\tlport: local UDP port to bind to\n"); -+ fprintf(stderr, -+ "\tpeers_file: text file containing one peer per line. Line format:\n"); -+ fprintf(stderr, "\t\t \n"); -+ -+ fprintf(stderr, -+ "* set_peer : set peer attributes\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, "\tpeer_id: peer ID of the peer to modify\n"); -+ fprintf(stderr, -+ "\tkeepalive_interval: interval for sending ping messages\n"); -+ fprintf(stderr, -+ "\tkeepalive_timeout: time after which a peer is timed out\n"); -+ -+ fprintf(stderr, "* del_peer : delete peer\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, "\tpeer_id: peer ID of the peer to delete\n"); -+ -+ fprintf(stderr, "* get_peer [peer_id]: retrieve peer(s) status\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, -+ "\tpeer_id: peer ID of the peer to query. All peers are returned if omitted\n"); -+ -+ fprintf(stderr, -+ "* new_key : set data channel key\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, -+ "\tpeer_id: peer ID of the peer to configure the key for\n"); -+ fprintf(stderr, "\tslot: either 1 (primary) or 2 (secondary)\n"); -+ fprintf(stderr, "\tkey_id: an ID from 0 to 7\n"); -+ fprintf(stderr, -+ "\tcipher: cipher to use, supported: aes (AES-GCM), chachapoly (CHACHA20POLY1305)\n"); -+ fprintf(stderr, -+ "\tkey_dir: key direction, must 0 on one host and 1 on the other\n"); -+ fprintf(stderr, "\tkey_file: file containing the pre-shared key\n"); -+ -+ fprintf(stderr, -+ "* del_key [slot]: erase existing data channel key\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, "\tpeer_id: peer ID of the peer to modify\n"); -+ fprintf(stderr, "\tslot: slot to erase. PRIMARY if omitted\n"); -+ -+ fprintf(stderr, -+ "* get_key : retrieve non sensible key data\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, "\tpeer_id: peer ID of the peer to query\n"); -+ fprintf(stderr, "\tslot: either 1 (primary) or 2 (secondary)\n"); -+ -+ fprintf(stderr, -+ "* swap_keys : swap content of primary and secondary key slots\n"); -+ fprintf(stderr, "\tiface: ovpn interface name\n"); -+ fprintf(stderr, "\tpeer_id: peer ID of the peer to modify\n"); -+ -+ fprintf(stderr, -+ "* listen_mcast: listen to ovpn netlink multicast messages\n"); -+} -+ -+static int ovpn_parse_remote(struct ovpn_ctx *ovpn, const char *host, -+ const char *service, const char *vpnip) -+{ -+ int ret; -+ struct addrinfo *result; -+ struct addrinfo hints = { -+ .ai_family = ovpn->sa_family, -+ .ai_socktype = SOCK_DGRAM, -+ .ai_protocol = IPPROTO_UDP -+ }; -+ -+ if (host) { -+ ret = getaddrinfo(host, service, &hints, &result); -+ if (ret == EAI_NONAME || ret == EAI_FAIL) -+ return -1; -+ -+ if (!(result->ai_family == AF_INET && -+ result->ai_addrlen == sizeof(struct sockaddr_in)) && -+ !(result->ai_family == AF_INET6 && -+ result->ai_addrlen == sizeof(struct sockaddr_in6))) { -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ memcpy(&ovpn->remote, result->ai_addr, result->ai_addrlen); -+ } -+ -+ if (vpnip) { -+ ret = getaddrinfo(vpnip, NULL, &hints, &result); -+ if (ret == EAI_NONAME || ret == EAI_FAIL) -+ return -1; -+ -+ if (!(result->ai_family == AF_INET && -+ result->ai_addrlen == sizeof(struct sockaddr_in)) && -+ !(result->ai_family == AF_INET6 && -+ result->ai_addrlen == sizeof(struct sockaddr_in6))) { -+ ret = -EINVAL; -+ goto out; -+ } -+ -+ memcpy(&ovpn->peer_ip, result->ai_addr, result->ai_addrlen); -+ ovpn->sa_family = result->ai_family; -+ -+ ovpn->peer_ip_set = true; -+ } -+ -+ ret = 0; -+out: -+ freeaddrinfo(result); -+ return ret; -+} -+ -+static int ovpn_parse_new_peer(struct ovpn_ctx *ovpn, const char *peer_id, -+ const char *raddr, const char *rport, -+ const char *vpnip) -+{ -+ ovpn->peer_id = strtoul(peer_id, NULL, 10); -+ if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) { -+ fprintf(stderr, "peer ID value out of range\n"); -+ return -1; -+ } -+ -+ return ovpn_parse_remote(ovpn, raddr, rport, vpnip); -+} -+ -+static int ovpn_parse_key_slot(const char *arg, struct ovpn_ctx *ovpn) -+{ -+ int slot = strtoul(arg, NULL, 10); -+ -+ if (errno == ERANGE || slot < 1 || slot > 2) { -+ fprintf(stderr, "key slot out of range\n"); -+ return -1; -+ } -+ -+ switch (slot) { -+ case 1: -+ ovpn->key_slot = OVPN_KEY_SLOT_PRIMARY; -+ break; -+ case 2: -+ ovpn->key_slot = OVPN_KEY_SLOT_SECONDARY; -+ break; -+ } -+ -+ return 0; -+} -+ -+static int ovpn_send_tcp_data(int socket) -+{ -+ uint16_t len = htons(1000); -+ uint8_t buf[1002]; -+ int ret; -+ -+ memcpy(buf, &len, sizeof(len)); -+ memset(buf + sizeof(len), 0x86, sizeof(buf) - sizeof(len)); -+ -+ ret = send(socket, buf, sizeof(buf), 0); -+ -+ fprintf(stdout, "Sent %u bytes over TCP socket\n", ret); -+ -+ return ret > 0 ? 0 : ret; -+} -+ -+static int ovpn_recv_tcp_data(int socket) -+{ -+ uint8_t buf[1002]; -+ uint16_t len; -+ int ret; -+ -+ ret = recv(socket, buf, sizeof(buf), 0); -+ -+ if (ret < 2) { -+ fprintf(stderr, ">>>> Error while reading TCP data: %d\n", ret); -+ return ret; -+ } -+ -+ memcpy(&len, buf, sizeof(len)); -+ len = ntohs(len); -+ -+ fprintf(stdout, ">>>> Received %u bytes over TCP socket, header: %u\n", -+ ret, len); -+ -+/* int i; -+ * for (i = 2; i < ret; i++) { -+ * fprintf(stdout, "0x%.2x ", buf[i]); -+ * if (i && !((i - 2) % 16)) -+ * fprintf(stdout, "\n"); -+ * } -+ * fprintf(stdout, "\n"); -+ */ -+ return 0; -+} -+ -+static enum ovpn_cmd ovpn_parse_cmd(const char *cmd) -+{ -+ if (!strcmp(cmd, "new_iface")) -+ return CMD_NEW_IFACE; -+ -+ if (!strcmp(cmd, "del_iface")) -+ return CMD_DEL_IFACE; -+ -+ if (!strcmp(cmd, "listen")) -+ return CMD_LISTEN; -+ -+ if (!strcmp(cmd, "connect")) -+ return CMD_CONNECT; -+ -+ if (!strcmp(cmd, "new_peer")) -+ return CMD_NEW_PEER; -+ -+ if (!strcmp(cmd, "new_multi_peer")) -+ return CMD_NEW_MULTI_PEER; -+ -+ if (!strcmp(cmd, "set_peer")) -+ return CMD_SET_PEER; -+ -+ if (!strcmp(cmd, "del_peer")) -+ return CMD_DEL_PEER; -+ -+ if (!strcmp(cmd, "get_peer")) -+ return CMD_GET_PEER; -+ -+ if (!strcmp(cmd, "new_key")) -+ return CMD_NEW_KEY; -+ -+ if (!strcmp(cmd, "del_key")) -+ return CMD_DEL_KEY; -+ -+ if (!strcmp(cmd, "get_key")) -+ return CMD_GET_KEY; -+ -+ if (!strcmp(cmd, "swap_keys")) -+ return CMD_SWAP_KEYS; -+ -+ if (!strcmp(cmd, "listen_mcast")) -+ return CMD_LISTEN_MCAST; -+ -+ return CMD_INVALID; -+} -+ -+static int ovpn_run_cmd(struct ovpn_ctx *ovpn) -+{ -+ char peer_id[10], vpnip[INET6_ADDRSTRLEN], raddr[128], rport[10]; -+ int n, ret; -+ FILE *fp; -+ -+ switch (ovpn->cmd) { -+ case CMD_NEW_IFACE: -+ ret = ovpn_new_iface(ovpn); -+ break; -+ case CMD_DEL_IFACE: -+ ret = ovpn_del_iface(ovpn); -+ break; -+ case CMD_LISTEN: -+ ret = ovpn_listen(ovpn, ovpn->sa_family); -+ if (ret < 0) { -+ fprintf(stderr, "cannot listen on TCP socket\n"); -+ return ret; -+ } -+ -+ fp = fopen(ovpn->peers_file, "r"); -+ if (!fp) { -+ fprintf(stderr, "cannot open file: %s\n", -+ ovpn->peers_file); -+ return -1; -+ } -+ -+ while ((n = fscanf(fp, "%s %s\n", peer_id, vpnip)) == 2) { -+ struct ovpn_ctx peer_ctx = { 0 }; -+ -+ peer_ctx.ifindex = ovpn->ifindex; -+ peer_ctx.sa_family = ovpn->sa_family; -+ -+ peer_ctx.socket = ovpn_accept(ovpn); -+ if (peer_ctx.socket < 0) { -+ fprintf(stderr, "cannot accept connection!\n"); -+ return -1; -+ } -+ -+ /* store the socket of the first peer to test TCP I/O */ -+ if (ovpn->cli_socket < 0) -+ ovpn->cli_socket = peer_ctx.socket; -+ -+ ret = ovpn_parse_new_peer(&peer_ctx, peer_id, NULL, -+ NULL, vpnip); -+ if (ret < 0) { -+ fprintf(stderr, "error while parsing line\n"); -+ return -1; -+ } -+ -+ ret = ovpn_new_peer(&peer_ctx, true); -+ if (ret < 0) { -+ fprintf(stderr, -+ "cannot add peer to VPN: %s %s\n", -+ peer_id, vpnip); -+ return ret; -+ } -+ } -+ -+ if (ovpn->cli_socket >= 0) -+ ret = ovpn_recv_tcp_data(ovpn->cli_socket); -+ -+ break; -+ case CMD_CONNECT: -+ ret = ovpn_connect(ovpn); -+ if (ret < 0) { -+ fprintf(stderr, "cannot connect TCP socket\n"); -+ return ret; -+ } -+ -+ ret = ovpn_new_peer(ovpn, true); -+ if (ret < 0) { -+ fprintf(stderr, "cannot add peer to VPN\n"); -+ close(ovpn->socket); -+ return ret; -+ } -+ -+ if (ovpn->cipher != OVPN_CIPHER_ALG_NONE) { -+ ret = ovpn_new_key(ovpn); -+ if (ret < 0) { -+ fprintf(stderr, "cannot set key\n"); -+ return ret; -+ } -+ } -+ -+ ret = ovpn_send_tcp_data(ovpn->socket); -+ break; -+ case CMD_NEW_PEER: -+ ret = ovpn_udp_socket(ovpn, AF_INET6); //ovpn->sa_family ? -+ if (ret < 0) -+ return ret; -+ -+ ret = ovpn_new_peer(ovpn, false); -+ break; -+ case CMD_NEW_MULTI_PEER: -+ ret = ovpn_udp_socket(ovpn, AF_INET6); -+ if (ret < 0) -+ return ret; -+ -+ fp = fopen(ovpn->peers_file, "r"); -+ if (!fp) { -+ fprintf(stderr, "cannot open file: %s\n", -+ ovpn->peers_file); -+ return -1; -+ } -+ -+ while ((n = fscanf(fp, "%s %s %s %s\n", peer_id, raddr, rport, -+ vpnip)) == 4) { -+ struct ovpn_ctx peer_ctx = { 0 }; -+ -+ peer_ctx.ifindex = ovpn->ifindex; -+ peer_ctx.socket = ovpn->socket; -+ peer_ctx.sa_family = AF_UNSPEC; -+ -+ ret = ovpn_parse_new_peer(&peer_ctx, peer_id, raddr, -+ rport, vpnip); -+ if (ret < 0) { -+ fprintf(stderr, "error while parsing line\n"); -+ return -1; -+ } -+ -+ ret = ovpn_new_peer(&peer_ctx, false); -+ if (ret < 0) { -+ fprintf(stderr, -+ "cannot add peer to VPN: %s %s %s %s\n", -+ peer_id, raddr, rport, vpnip); -+ return ret; -+ } -+ } -+ break; -+ case CMD_SET_PEER: -+ ret = ovpn_set_peer(ovpn); -+ break; -+ case CMD_DEL_PEER: -+ ret = ovpn_del_peer(ovpn); -+ break; -+ case CMD_GET_PEER: -+ if (ovpn->peer_id == PEER_ID_UNDEF) -+ fprintf(stderr, "List of peers connected to: %s\n", -+ ovpn->ifname); -+ -+ ret = ovpn_get_peer(ovpn); -+ break; -+ case CMD_NEW_KEY: -+ ret = ovpn_new_key(ovpn); -+ break; -+ case CMD_DEL_KEY: -+ ret = ovpn_del_key(ovpn); -+ break; -+ case CMD_GET_KEY: -+ ret = ovpn_get_key(ovpn); -+ break; -+ case CMD_SWAP_KEYS: -+ ret = ovpn_swap_keys(ovpn); -+ break; -+ case CMD_LISTEN_MCAST: -+ ret = ovpn_listen_mcast(); -+ break; -+ case CMD_INVALID: -+ break; -+ } -+ -+ return ret; -+} -+ -+static int ovpn_parse_cmd_args(struct ovpn_ctx *ovpn, int argc, char *argv[]) -+{ -+ int ret; -+ -+ /* no args required for LISTEN_MCAST */ -+ if (ovpn->cmd == CMD_LISTEN_MCAST) -+ return 0; -+ -+ /* all commands need an ifname */ -+ if (argc < 3) -+ return -EINVAL; -+ -+ strscpy(ovpn->ifname, argv[2], IFNAMSIZ - 1); -+ ovpn->ifname[IFNAMSIZ - 1] = '\0'; -+ -+ /* all commands, except NEW_IFNAME, needs an ifindex */ -+ if (ovpn->cmd != CMD_NEW_IFACE) { -+ ovpn->ifindex = if_nametoindex(ovpn->ifname); -+ if (!ovpn->ifindex) { -+ fprintf(stderr, "cannot find interface: %s\n", -+ strerror(errno)); -+ return -1; -+ } -+ } -+ -+ switch (ovpn->cmd) { -+ case CMD_NEW_IFACE: -+ if (argc < 4) -+ break; -+ -+ if (!strcmp(argv[3], "P2P")) { -+ ovpn->mode = OVPN_MODE_P2P; -+ } else if (!strcmp(argv[3], "MP")) { -+ ovpn->mode = OVPN_MODE_MP; -+ } else { -+ fprintf(stderr, "Cannot parse iface mode: %s\n", -+ argv[3]); -+ return -1; -+ } -+ ovpn->mode_set = true; -+ break; -+ case CMD_DEL_IFACE: -+ break; -+ case CMD_LISTEN: -+ if (argc < 5) -+ return -EINVAL; -+ -+ ovpn->lport = strtoul(argv[3], NULL, 10); -+ if (errno == ERANGE || ovpn->lport > 65535) { -+ fprintf(stderr, "lport value out of range\n"); -+ return -1; -+ } -+ -+ ovpn->peers_file = argv[4]; -+ -+ if (argc > 5 && !strcmp(argv[5], "ipv6")) -+ ovpn->sa_family = AF_INET6; -+ break; -+ case CMD_CONNECT: -+ if (argc < 6) -+ return -EINVAL; -+ -+ ovpn->sa_family = AF_INET; -+ -+ ret = ovpn_parse_new_peer(ovpn, argv[3], argv[4], argv[5], -+ NULL); -+ if (ret < 0) { -+ fprintf(stderr, "Cannot parse remote peer data\n"); -+ return -1; -+ } -+ -+ if (argc > 6) { -+ ovpn->key_slot = OVPN_KEY_SLOT_PRIMARY; -+ ovpn->key_id = 0; -+ ovpn->cipher = OVPN_CIPHER_ALG_AES_GCM; -+ ovpn->key_dir = KEY_DIR_OUT; -+ -+ ret = ovpn_parse_key(argv[6], ovpn); -+ if (ret) -+ return -1; -+ } -+ break; -+ case CMD_NEW_PEER: -+ if (argc < 7) -+ return -EINVAL; -+ -+ ovpn->lport = strtoul(argv[4], NULL, 10); -+ if (errno == ERANGE || ovpn->lport > 65535) { -+ fprintf(stderr, "lport value out of range\n"); -+ return -1; -+ } -+ -+ const char *vpnip = (argc > 7) ? argv[7] : NULL; -+ -+ ret = ovpn_parse_new_peer(ovpn, argv[3], argv[5], argv[6], -+ vpnip); -+ if (ret < 0) -+ return -1; -+ break; -+ case CMD_NEW_MULTI_PEER: -+ if (argc < 5) -+ return -EINVAL; -+ -+ ovpn->lport = strtoul(argv[3], NULL, 10); -+ if (errno == ERANGE || ovpn->lport > 65535) { -+ fprintf(stderr, "lport value out of range\n"); -+ return -1; -+ } -+ -+ ovpn->peers_file = argv[4]; -+ break; -+ case CMD_SET_PEER: -+ if (argc < 6) -+ return -EINVAL; -+ -+ ovpn->peer_id = strtoul(argv[3], NULL, 10); -+ if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) { -+ fprintf(stderr, "peer ID value out of range\n"); -+ return -1; -+ } -+ -+ ovpn->keepalive_interval = strtoul(argv[4], NULL, 10); -+ if (errno == ERANGE) { -+ fprintf(stderr, -+ "keepalive interval value out of range\n"); -+ return -1; -+ } -+ -+ ovpn->keepalive_timeout = strtoul(argv[5], NULL, 10); -+ if (errno == ERANGE) { -+ fprintf(stderr, -+ "keepalive interval value out of range\n"); -+ return -1; -+ } -+ break; -+ case CMD_DEL_PEER: -+ if (argc < 4) -+ return -EINVAL; -+ -+ ovpn->peer_id = strtoul(argv[3], NULL, 10); -+ if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) { -+ fprintf(stderr, "peer ID value out of range\n"); -+ return -1; -+ } -+ break; -+ case CMD_GET_PEER: -+ ovpn->peer_id = PEER_ID_UNDEF; -+ if (argc > 3) { -+ ovpn->peer_id = strtoul(argv[3], NULL, 10); -+ if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) { -+ fprintf(stderr, "peer ID value out of range\n"); -+ return -1; -+ } -+ } -+ break; -+ case CMD_NEW_KEY: -+ if (argc < 9) -+ return -EINVAL; -+ -+ ovpn->peer_id = strtoul(argv[3], NULL, 10); -+ if (errno == ERANGE) { -+ fprintf(stderr, "peer ID value out of range\n"); -+ return -1; -+ } -+ -+ ret = ovpn_parse_key_slot(argv[4], ovpn); -+ if (ret) -+ return -1; -+ -+ ovpn->key_id = strtoul(argv[5], NULL, 10); -+ if (errno == ERANGE || ovpn->key_id > 2) { -+ fprintf(stderr, "key ID out of range\n"); -+ return -1; -+ } -+ -+ ret = ovpn_parse_cipher(argv[6], ovpn); -+ if (ret < 0) -+ return -1; -+ -+ ret = ovpn_parse_key_direction(argv[7], ovpn); -+ if (ret < 0) -+ return -1; -+ -+ ret = ovpn_parse_key(argv[8], ovpn); -+ if (ret) -+ return -1; -+ break; -+ case CMD_DEL_KEY: -+ if (argc < 4) -+ return -EINVAL; -+ -+ ovpn->peer_id = strtoul(argv[3], NULL, 10); -+ if (errno == ERANGE) { -+ fprintf(stderr, "peer ID value out of range\n"); -+ return -1; -+ } -+ -+ ret = ovpn_parse_key_slot(argv[4], ovpn); -+ if (ret) -+ return ret; -+ break; -+ case CMD_GET_KEY: -+ if (argc < 5) -+ return -EINVAL; -+ -+ ovpn->peer_id = strtoul(argv[3], NULL, 10); -+ if (errno == ERANGE) { -+ fprintf(stderr, "peer ID value out of range\n"); -+ return -1; -+ } -+ -+ ret = ovpn_parse_key_slot(argv[4], ovpn); -+ if (ret) -+ return ret; -+ break; -+ case CMD_SWAP_KEYS: -+ if (argc < 4) -+ return -EINVAL; -+ -+ ovpn->peer_id = strtoul(argv[3], NULL, 10); -+ if (errno == ERANGE) { -+ fprintf(stderr, "peer ID value out of range\n"); -+ return -1; -+ } -+ break; -+ case CMD_LISTEN_MCAST: -+ break; -+ case CMD_INVALID: -+ break; -+ } -+ -+ return 0; -+} -+ -+int main(int argc, char *argv[]) -+{ -+ struct ovpn_ctx ovpn; -+ int ret; -+ -+ if (argc < 2) { -+ usage(argv[0]); -+ return -1; -+ } -+ -+ memset(&ovpn, 0, sizeof(ovpn)); -+ ovpn.sa_family = AF_INET; -+ ovpn.cipher = OVPN_CIPHER_ALG_NONE; -+ ovpn.cli_socket = -1; -+ -+ ovpn.cmd = ovpn_parse_cmd(argv[1]); -+ if (ovpn.cmd == CMD_INVALID) { -+ fprintf(stderr, "Error: unknown command.\n\n"); -+ usage(argv[0]); -+ return -1; -+ } -+ -+ ret = ovpn_parse_cmd_args(&ovpn, argc, argv); -+ if (ret < 0) { -+ fprintf(stderr, "Error: invalid arguments.\n\n"); -+ if (ret == -EINVAL) -+ usage(argv[0]); -+ return ret; -+ } -+ -+ ret = ovpn_run_cmd(&ovpn); -+ if (ret) -+ fprintf(stderr, "Cannot execute command: %s (%d)\n", -+ strerror(-ret), ret); -+ -+ return ret; -+} -diff --git a/tools/testing/selftests/net/ovpn/tcp_peers.txt b/tools/testing/selftests/net/ovpn/tcp_peers.txt -new file mode 100644 -index 000000000000..d753eebe8716 ---- /dev/null -+++ b/tools/testing/selftests/net/ovpn/tcp_peers.txt -@@ -0,0 +1,5 @@ -+1 5.5.5.2 -+2 5.5.5.3 -+3 5.5.5.4 -+4 5.5.5.5 -+5 5.5.5.6 -diff --git a/tools/testing/selftests/net/ovpn/test-chachapoly.sh b/tools/testing/selftests/net/ovpn/test-chachapoly.sh -new file mode 100755 -index 000000000000..79788f10d33b ---- /dev/null -+++ b/tools/testing/selftests/net/ovpn/test-chachapoly.sh -@@ -0,0 +1,9 @@ -+#!/bin/bash -+# SPDX-License-Identifier: GPL-2.0 -+# Copyright (C) 2024 OpenVPN, Inc. -+# -+# Author: Antonio Quartulli -+ -+ALG="chachapoly" -+ -+source test.sh -diff --git a/tools/testing/selftests/net/ovpn/test-float.sh b/tools/testing/selftests/net/ovpn/test-float.sh -new file mode 100755 -index 000000000000..93e1b729861d ---- /dev/null -+++ b/tools/testing/selftests/net/ovpn/test-float.sh -@@ -0,0 +1,9 @@ -+#!/bin/bash -+# SPDX-License-Identifier: GPL-2.0 -+# Copyright (C) 2024 OpenVPN, Inc. -+# -+# Author: Antonio Quartulli -+ -+FLOAT="1" -+ -+source test.sh -diff --git a/tools/testing/selftests/net/ovpn/test-tcp.sh b/tools/testing/selftests/net/ovpn/test-tcp.sh -new file mode 100755 -index 000000000000..7542f595cc56 ---- /dev/null -+++ b/tools/testing/selftests/net/ovpn/test-tcp.sh -@@ -0,0 +1,9 @@ -+#!/bin/bash -+# SPDX-License-Identifier: GPL-2.0 -+# Copyright (C) 2024 OpenVPN, Inc. -+# -+# Author: Antonio Quartulli -+ -+PROTO="TCP" -+ -+source test.sh -diff --git a/tools/testing/selftests/net/ovpn/test.sh b/tools/testing/selftests/net/ovpn/test.sh -new file mode 100755 -index 000000000000..07f3a82df8f3 ---- /dev/null -+++ b/tools/testing/selftests/net/ovpn/test.sh -@@ -0,0 +1,183 @@ -+#!/bin/bash -+# SPDX-License-Identifier: GPL-2.0 -+# Copyright (C) 2020-2024 OpenVPN, Inc. -+# -+# Author: Antonio Quartulli -+ -+#set -x -+set -e -+ -+UDP_PEERS_FILE=${UDP_PEERS_FILE:-udp_peers.txt} -+TCP_PEERS_FILE=${TCP_PEERS_FILE:-tcp_peers.txt} -+OVPN_CLI=${OVPN_CLI:-./ovpn-cli} -+ALG=${ALG:-aes} -+PROTO=${PROTO:-UDP} -+FLOAT=${FLOAT:-0} -+ -+create_ns() { -+ ip netns add peer${1} -+} -+ -+setup_ns() { -+ MODE="P2P" -+ -+ if [ ${1} -eq 0 ]; then -+ MODE="MP" -+ for p in $(seq 1 ${NUM_PEERS}); do -+ ip link add veth${p} netns peer0 type veth peer name veth${p} netns peer${p} -+ -+ ip -n peer0 addr add 10.10.${p}.1/24 dev veth${p} -+ ip -n peer0 link set veth${p} up -+ -+ ip -n peer${p} addr add 10.10.${p}.2/24 dev veth${p} -+ ip -n peer${p} link set veth${p} up -+ done -+ fi -+ -+ ip netns exec peer${1} ${OVPN_CLI} new_iface tun${1} $MODE -+ ip -n peer${1} addr add ${2} dev tun${1} -+ ip -n peer${1} link set tun${1} up -+} -+ -+add_peer() { -+ if [ "${PROTO}" == "UDP" ]; then -+ if [ ${1} -eq 0 ]; then -+ ip netns exec peer0 ${OVPN_CLI} new_multi_peer tun0 1 ${UDP_PEERS_FILE} -+ -+ for p in $(seq 1 ${NUM_PEERS}); do -+ ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 1 0 ${ALG} 0 \ -+ data64.key -+ done -+ else -+ ip netns exec peer${1} ${OVPN_CLI} new_peer tun${1} ${1} 1 10.10.${1}.1 1 -+ ip netns exec peer${1} ${OVPN_CLI} new_key tun${1} ${1} 1 0 ${ALG} 1 \ -+ data64.key -+ fi -+ else -+ if [ ${1} -eq 0 ]; then -+ (ip netns exec peer0 ${OVPN_CLI} listen tun0 1 ${TCP_PEERS_FILE} && { -+ for p in $(seq 1 ${NUM_PEERS}); do -+ ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 1 0 \ -+ ${ALG} 0 data64.key -+ done -+ }) & -+ sleep 5 -+ else -+ ip netns exec peer${1} ${OVPN_CLI} connect tun${1} ${1} 10.10.${1}.1 1 \ -+ data64.key -+ fi -+ fi -+} -+ -+cleanup() { -+ for p in $(seq 1 10); do -+ ip -n peer0 link del veth${p} 2>/dev/null || true -+ done -+ for p in $(seq 0 10); do -+ ip netns exec peer${p} ${OVPN_CLI} del_iface tun${p} 2>/dev/null || true -+ ip netns del peer${p} 2>/dev/null || true -+ done -+} -+ -+if [ "${PROTO}" == "UDP" ]; then -+ NUM_PEERS=${NUM_PEERS:-$(wc -l ${UDP_PEERS_FILE} | awk '{print $1}')} -+else -+ NUM_PEERS=${NUM_PEERS:-$(wc -l ${TCP_PEERS_FILE} | awk '{print $1}')} -+fi -+ -+cleanup -+ -+modprobe -q ovpn || true -+ -+for p in $(seq 0 ${NUM_PEERS}); do -+ create_ns ${p} -+done -+ -+for p in $(seq 0 ${NUM_PEERS}); do -+ setup_ns ${p} 5.5.5.$((${p} + 1))/24 -+done -+ -+for p in $(seq 0 ${NUM_PEERS}); do -+ add_peer ${p} -+done -+ -+for p in $(seq 1 ${NUM_PEERS}); do -+ ip netns exec peer0 ${OVPN_CLI} set_peer tun0 ${p} 60 120 -+ ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} ${p} 60 120 -+done -+ -+for p in $(seq 1 ${NUM_PEERS}); do -+ ip netns exec peer0 ping -qfc 1000 -w 5 5.5.5.$((${p} + 1)) -+done -+ -+if [ "$FLOAT" == "1" ]; then -+ # make clients float.. -+ for p in $(seq 1 ${NUM_PEERS}); do -+ ip -n peer${p} addr del 10.10.${p}.2/24 dev veth${p} -+ ip -n peer${p} addr add 10.10.${p}.3/24 dev veth${p} -+ done -+ for p in $(seq 1 ${NUM_PEERS}); do -+ ip netns exec peer${p} ping -qfc 1000 -w 5 5.5.5.1 -+ done -+fi -+ -+ip netns exec peer0 iperf3 -1 -s & -+sleep 1 -+ip netns exec peer1 iperf3 -Z -t 3 -c 5.5.5.1 -+ -+echo "Adding secondary key and then swap:" -+for p in $(seq 1 ${NUM_PEERS}); do -+ ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 2 1 ${ALG} 0 data64.key -+ ip netns exec peer${p} ${OVPN_CLI} new_key tun${p} ${p} 2 1 ${ALG} 1 data64.key -+ ip netns exec peer${p} ${OVPN_CLI} swap_keys tun${p} ${p} -+done -+ -+sleep 1 -+echo "Querying all peers:" -+ip netns exec peer0 ${OVPN_CLI} get_peer tun0 -+ip netns exec peer1 ${OVPN_CLI} get_peer tun1 -+ -+echo "Querying peer 1:" -+ip netns exec peer0 ${OVPN_CLI} get_peer tun0 1 -+ -+echo "Querying non-existent peer 10:" -+ip netns exec peer0 ${OVPN_CLI} get_peer tun0 10 || true -+ -+echo "Deleting peer 1:" -+ip netns exec peer0 ${OVPN_CLI} del_peer tun0 1 -+ip netns exec peer1 ${OVPN_CLI} del_peer tun1 1 -+ -+echo "Querying keys:" -+for p in $(seq 2 ${NUM_PEERS}); do -+ ip netns exec peer${p} ${OVPN_CLI} get_key tun${p} ${p} 1 -+ ip netns exec peer${p} ${OVPN_CLI} get_key tun${p} ${p} 2 -+done -+ -+echo "Deleting keys:" -+for p in $(seq 2 ${NUM_PEERS}); do -+ ip netns exec peer${p} ${OVPN_CLI} del_key tun${p} ${p} 1 -+ ip netns exec peer${p} ${OVPN_CLI} del_key tun${p} ${p} 2 -+done -+ -+echo "Setting timeout to 10s MP:" -+# bring ifaces down to prevent traffic being sent -+for p in $(seq 0 ${NUM_PEERS}); do -+ ip -n peer${p} link set tun${p} down -+done -+# set short timeout -+for p in $(seq 2 ${NUM_PEERS}); do -+ ip netns exec peer0 ${OVPN_CLI} set_peer tun0 ${p} 10 10 || true -+ ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} ${p} 0 0 -+done -+# wait for peers to timeout -+sleep 15 -+ -+echo "Setting timeout to 10s P2P:" -+for p in $(seq 2 ${NUM_PEERS}); do -+ ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} ${p} 10 10 -+done -+sleep 15 -+ -+cleanup -+ -+modprobe -r ovpn || true -diff --git a/tools/testing/selftests/net/ovpn/udp_peers.txt b/tools/testing/selftests/net/ovpn/udp_peers.txt -new file mode 100644 -index 000000000000..32f14bd9347a ---- /dev/null -+++ b/tools/testing/selftests/net/ovpn/udp_peers.txt -@@ -0,0 +1,5 @@ -+1 10.10.1.2 1 5.5.5.2 -+2 10.10.2.2 1 5.5.5.3 -+3 10.10.3.2 1 5.5.5.4 -+4 10.10.4.2 1 5.5.5.5 -+5 10.10.5.2 1 5.5.5.6 --- -2.47.0 - -From df5fd664d8ea01bad5af3ff4c6575bda0917383c Mon Sep 17 00:00:00 2001 -From: Peter Jung -Date: Mon, 11 Nov 2024 09:22:05 +0100 -Subject: [PATCH 11/13] perf-per-core +Date: Mon, 18 Nov 2024 13:25:32 +0100 +Subject: [PATCH 09/12] perf-per-core Signed-off-by: Peter Jung --- @@ -29135,10 +19593,443 @@ index 8277c64f88db..b5a5e1411469 100644 -- 2.47.0 -From 063003cdcfa118a0e75173a1c02094c2978bc532 Mon Sep 17 00:00:00 2001 +From fe6edd05ea01c2a1b677489dfe6a1b5a4d6bd4c6 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 11 Nov 2024 09:22:19 +0100 -Subject: [PATCH 12/13] t2 +Date: Mon, 18 Nov 2024 13:26:10 +0100 +Subject: [PATCH 10/12] pksm + +Signed-off-by: Peter Jung +--- + arch/alpha/kernel/syscalls/syscall.tbl | 3 + + arch/arm/tools/syscall.tbl | 3 + + arch/m68k/kernel/syscalls/syscall.tbl | 3 + + arch/microblaze/kernel/syscalls/syscall.tbl | 3 + + arch/mips/kernel/syscalls/syscall_n32.tbl | 3 + + arch/mips/kernel/syscalls/syscall_n64.tbl | 3 + + arch/mips/kernel/syscalls/syscall_o32.tbl | 3 + + arch/parisc/kernel/syscalls/syscall.tbl | 3 + + arch/powerpc/kernel/syscalls/syscall.tbl | 3 + + arch/s390/kernel/syscalls/syscall.tbl | 3 + + arch/sh/kernel/syscalls/syscall.tbl | 3 + + arch/sparc/kernel/syscalls/syscall.tbl | 3 + + arch/x86/entry/syscalls/syscall_32.tbl | 3 + + arch/x86/entry/syscalls/syscall_64.tbl | 3 + + arch/xtensa/kernel/syscalls/syscall.tbl | 3 + + include/linux/syscalls.h | 3 + + include/uapi/asm-generic/unistd.h | 9 +- + kernel/sys.c | 138 ++++++++++++++++++ + kernel/sys_ni.c | 3 + + scripts/syscall.tbl | 3 + + .../arch/powerpc/entry/syscalls/syscall.tbl | 3 + + .../perf/arch/s390/entry/syscalls/syscall.tbl | 3 + + 22 files changed, 206 insertions(+), 1 deletion(-) + +diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl +index 74720667fe09..e6a11f3c0a2e 100644 +--- a/arch/alpha/kernel/syscalls/syscall.tbl ++++ b/arch/alpha/kernel/syscalls/syscall.tbl +@@ -502,3 +502,6 @@ + 570 common lsm_set_self_attr sys_lsm_set_self_attr + 571 common lsm_list_modules sys_lsm_list_modules + 572 common mseal sys_mseal ++573 common process_ksm_enable sys_process_ksm_enable ++574 common process_ksm_disable sys_process_ksm_disable ++575 common process_ksm_status sys_process_ksm_status +diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl +index 23c98203c40f..10a3099decbe 100644 +--- a/arch/arm/tools/syscall.tbl ++++ b/arch/arm/tools/syscall.tbl +@@ -477,3 +477,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status +diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl +index 22a3cbd4c602..12d2c7594bf0 100644 +--- a/arch/m68k/kernel/syscalls/syscall.tbl ++++ b/arch/m68k/kernel/syscalls/syscall.tbl +@@ -462,3 +462,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status +diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl +index 2b81a6bd78b2..e2a93c856eed 100644 +--- a/arch/microblaze/kernel/syscalls/syscall.tbl ++++ b/arch/microblaze/kernel/syscalls/syscall.tbl +@@ -468,3 +468,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status +diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl +index 953f5b7dc723..b921fbf56fa6 100644 +--- a/arch/mips/kernel/syscalls/syscall_n32.tbl ++++ b/arch/mips/kernel/syscalls/syscall_n32.tbl +@@ -401,3 +401,6 @@ + 460 n32 lsm_set_self_attr sys_lsm_set_self_attr + 461 n32 lsm_list_modules sys_lsm_list_modules + 462 n32 mseal sys_mseal ++463 n32 process_ksm_enable sys_process_ksm_enable ++464 n32 process_ksm_disable sys_process_ksm_disable ++465 n32 process_ksm_status sys_process_ksm_status +diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl +index 1464c6be6eb3..8d7f9ddd66f4 100644 +--- a/arch/mips/kernel/syscalls/syscall_n64.tbl ++++ b/arch/mips/kernel/syscalls/syscall_n64.tbl +@@ -377,3 +377,6 @@ + 460 n64 lsm_set_self_attr sys_lsm_set_self_attr + 461 n64 lsm_list_modules sys_lsm_list_modules + 462 n64 mseal sys_mseal ++463 n64 process_ksm_enable sys_process_ksm_enable ++464 n64 process_ksm_disable sys_process_ksm_disable ++465 n64 process_ksm_status sys_process_ksm_status +diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl +index 2439a2491cff..9d6142739954 100644 +--- a/arch/mips/kernel/syscalls/syscall_o32.tbl ++++ b/arch/mips/kernel/syscalls/syscall_o32.tbl +@@ -450,3 +450,6 @@ + 460 o32 lsm_set_self_attr sys_lsm_set_self_attr + 461 o32 lsm_list_modules sys_lsm_list_modules + 462 o32 mseal sys_mseal ++463 o32 process_ksm_enable sys_process_ksm_enable ++464 o32 process_ksm_disable sys_process_ksm_disable ++465 o32 process_ksm_status sys_process_ksm_status +diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl +index 66dc406b12e4..9d46476fd908 100644 +--- a/arch/parisc/kernel/syscalls/syscall.tbl ++++ b/arch/parisc/kernel/syscalls/syscall.tbl +@@ -461,3 +461,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status +diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl +index ebae8415dfbb..16f71bc2f6f0 100644 +--- a/arch/powerpc/kernel/syscalls/syscall.tbl ++++ b/arch/powerpc/kernel/syscalls/syscall.tbl +@@ -553,3 +553,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status +diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl +index 01071182763e..7394bad8178e 100644 +--- a/arch/s390/kernel/syscalls/syscall.tbl ++++ b/arch/s390/kernel/syscalls/syscall.tbl +@@ -465,3 +465,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status sys_process_ksm_status +diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl +index c55fd7696d40..b9fc31221b87 100644 +--- a/arch/sh/kernel/syscalls/syscall.tbl ++++ b/arch/sh/kernel/syscalls/syscall.tbl +@@ -466,3 +466,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status +diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl +index cfdfb3707c16..0d79fd772854 100644 +--- a/arch/sparc/kernel/syscalls/syscall.tbl ++++ b/arch/sparc/kernel/syscalls/syscall.tbl +@@ -508,3 +508,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status +diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl +index 534c74b14fab..c546a30575f1 100644 +--- a/arch/x86/entry/syscalls/syscall_32.tbl ++++ b/arch/x86/entry/syscalls/syscall_32.tbl +@@ -468,3 +468,6 @@ + 460 i386 lsm_set_self_attr sys_lsm_set_self_attr + 461 i386 lsm_list_modules sys_lsm_list_modules + 462 i386 mseal sys_mseal ++463 i386 process_ksm_enable sys_process_ksm_enable ++464 i386 process_ksm_disable sys_process_ksm_disable ++465 i386 process_ksm_status sys_process_ksm_status +diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl +index 7093ee21c0d1..0fcd10ba8dfe 100644 +--- a/arch/x86/entry/syscalls/syscall_64.tbl ++++ b/arch/x86/entry/syscalls/syscall_64.tbl +@@ -386,6 +386,9 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status + + # + # Due to a historical design error, certain syscalls are numbered differently +diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl +index 67083fc1b2f5..c1aecee4ad9b 100644 +--- a/arch/xtensa/kernel/syscalls/syscall.tbl ++++ b/arch/xtensa/kernel/syscalls/syscall.tbl +@@ -433,3 +433,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status +diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h +index 5758104921e6..cc9c4fac2412 100644 +--- a/include/linux/syscalls.h ++++ b/include/linux/syscalls.h +@@ -818,6 +818,9 @@ asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); + asmlinkage long sys_process_madvise(int pidfd, const struct iovec __user *vec, + size_t vlen, int behavior, unsigned int flags); + asmlinkage long sys_process_mrelease(int pidfd, unsigned int flags); ++asmlinkage long sys_process_ksm_enable(int pidfd, unsigned int flags); ++asmlinkage long sys_process_ksm_disable(int pidfd, unsigned int flags); ++asmlinkage long sys_process_ksm_status(int pidfd, unsigned int flags); + asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, + unsigned long prot, unsigned long pgoff, + unsigned long flags); +diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h +index 5bf6148cac2b..613e559ad6e0 100644 +--- a/include/uapi/asm-generic/unistd.h ++++ b/include/uapi/asm-generic/unistd.h +@@ -841,8 +841,15 @@ __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules) + #define __NR_mseal 462 + __SYSCALL(__NR_mseal, sys_mseal) + ++#define __NR_process_ksm_enable 463 ++__SYSCALL(__NR_process_ksm_enable, sys_process_ksm_enable) ++#define __NR_process_ksm_disable 464 ++__SYSCALL(__NR_process_ksm_disable, sys_process_ksm_disable) ++#define __NR_process_ksm_status 465 ++__SYSCALL(__NR_process_ksm_status, sys_process_ksm_status) ++ + #undef __NR_syscalls +-#define __NR_syscalls 463 ++#define __NR_syscalls 466 + + /* + * 32 bit systems traditionally used different +diff --git a/kernel/sys.c b/kernel/sys.c +index 4da31f28fda8..fcd3aeaddd05 100644 +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -2791,6 +2791,144 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, + return error; + } + ++#ifdef CONFIG_KSM ++enum pkc_action { ++ PKSM_ENABLE = 0, ++ PKSM_DISABLE, ++ PKSM_STATUS, ++}; ++ ++static long do_process_ksm_control(int pidfd, enum pkc_action action) ++{ ++ long ret; ++ struct task_struct *task; ++ struct mm_struct *mm; ++ unsigned int f_flags; ++ ++ task = pidfd_get_task(pidfd, &f_flags); ++ if (IS_ERR(task)) { ++ ret = PTR_ERR(task); ++ goto out; ++ } ++ ++ /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ ++ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); ++ if (IS_ERR_OR_NULL(mm)) { ++ ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; ++ goto release_task; ++ } ++ ++ /* Require CAP_SYS_NICE for influencing process performance. */ ++ if (!capable(CAP_SYS_NICE)) { ++ ret = -EPERM; ++ goto release_mm; ++ } ++ ++ if (mmap_write_lock_killable(mm)) { ++ ret = -EINTR; ++ goto release_mm; ++ } ++ ++ switch (action) { ++ case PKSM_ENABLE: ++ ret = ksm_enable_merge_any(mm); ++ break; ++ case PKSM_DISABLE: ++ ret = ksm_disable_merge_any(mm); ++ break; ++ case PKSM_STATUS: ++ ret = !!test_bit(MMF_VM_MERGE_ANY, &mm->flags); ++ break; ++ } ++ ++ mmap_write_unlock(mm); ++ ++release_mm: ++ mmput(mm); ++release_task: ++ put_task_struct(task); ++out: ++ return ret; ++} ++#endif /* CONFIG_KSM */ ++ ++SYSCALL_DEFINE2(process_ksm_enable, int, pidfd, unsigned int, flags) ++{ ++#ifdef CONFIG_KSM ++ if (flags != 0) ++ return -EINVAL; ++ ++ return do_process_ksm_control(pidfd, PKSM_ENABLE); ++#else /* CONFIG_KSM */ ++ return -ENOSYS; ++#endif /* CONFIG_KSM */ ++} ++ ++SYSCALL_DEFINE2(process_ksm_disable, int, pidfd, unsigned int, flags) ++{ ++#ifdef CONFIG_KSM ++ if (flags != 0) ++ return -EINVAL; ++ ++ return do_process_ksm_control(pidfd, PKSM_DISABLE); ++#else /* CONFIG_KSM */ ++ return -ENOSYS; ++#endif /* CONFIG_KSM */ ++} ++ ++SYSCALL_DEFINE2(process_ksm_status, int, pidfd, unsigned int, flags) ++{ ++#ifdef CONFIG_KSM ++ if (flags != 0) ++ return -EINVAL; ++ ++ return do_process_ksm_control(pidfd, PKSM_STATUS); ++#else /* CONFIG_KSM */ ++ return -ENOSYS; ++#endif /* CONFIG_KSM */ ++} ++ ++#ifdef CONFIG_KSM ++static ssize_t process_ksm_enable_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%u\n", __NR_process_ksm_enable); ++} ++static struct kobj_attribute process_ksm_enable_attr = __ATTR_RO(process_ksm_enable); ++ ++static ssize_t process_ksm_disable_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%u\n", __NR_process_ksm_disable); ++} ++static struct kobj_attribute process_ksm_disable_attr = __ATTR_RO(process_ksm_disable); ++ ++static ssize_t process_ksm_status_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%u\n", __NR_process_ksm_status); ++} ++static struct kobj_attribute process_ksm_status_attr = __ATTR_RO(process_ksm_status); ++ ++static struct attribute *process_ksm_sysfs_attrs[] = { ++ &process_ksm_enable_attr.attr, ++ &process_ksm_disable_attr.attr, ++ &process_ksm_status_attr.attr, ++ NULL, ++}; ++ ++static const struct attribute_group process_ksm_sysfs_attr_group = { ++ .attrs = process_ksm_sysfs_attrs, ++ .name = "process_ksm", ++}; ++ ++static int __init process_ksm_sysfs_init(void) ++{ ++ return sysfs_create_group(kernel_kobj, &process_ksm_sysfs_attr_group); ++} ++subsys_initcall(process_ksm_sysfs_init); ++#endif /* CONFIG_KSM */ ++ + SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, + struct getcpu_cache __user *, unused) + { +diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c +index c00a86931f8c..d82213d68522 100644 +--- a/kernel/sys_ni.c ++++ b/kernel/sys_ni.c +@@ -186,6 +186,9 @@ COND_SYSCALL(mincore); + COND_SYSCALL(madvise); + COND_SYSCALL(process_madvise); + COND_SYSCALL(process_mrelease); ++COND_SYSCALL(process_ksm_enable); ++COND_SYSCALL(process_ksm_disable); ++COND_SYSCALL(process_ksm_status); + COND_SYSCALL(remap_file_pages); + COND_SYSCALL(mbind); + COND_SYSCALL(get_mempolicy); +diff --git a/scripts/syscall.tbl b/scripts/syscall.tbl +index 845e24eb372e..227d9cc12365 100644 +--- a/scripts/syscall.tbl ++++ b/scripts/syscall.tbl +@@ -403,3 +403,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status +diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +index ebae8415dfbb..16f71bc2f6f0 100644 +--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl ++++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +@@ -553,3 +553,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status +diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl +index 01071182763e..7394bad8178e 100644 +--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl ++++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl +@@ -465,3 +465,6 @@ + 460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr + 461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules + 462 common mseal sys_mseal sys_mseal ++463 common process_ksm_enable sys_process_ksm_enable sys_process_ksm_enable ++464 common process_ksm_disable sys_process_ksm_disable sys_process_ksm_disable ++465 common process_ksm_status sys_process_ksm_status sys_process_ksm_status +-- +2.47.0 + +From 770d2e8fd2693929d69488c27b284466c2cda390 Mon Sep 17 00:00:00 2001 +From: Peter Jung +Date: Mon, 18 Nov 2024 13:26:31 +0100 +Subject: [PATCH 11/12] t2 Signed-off-by: Peter Jung --- @@ -29293,7 +20184,7 @@ index 14e093da3ccd..ccd7bd29a6d6 100644 ---- diff --git a/MAINTAINERS b/MAINTAINERS -index f509050e63ed..a3bbf3d5fb9e 100644 +index 889e074c143b..b737a7b5e767 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7013,6 +7013,12 @@ S: Supported @@ -29310,10 +20201,10 @@ index f509050e63ed..a3bbf3d5fb9e 100644 S: Orphan T: git https://gitlab.freedesktop.org/drm/misc/kernel.git diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -index f6a6fc6a4f5c..e71b6dfad958 100644 +index ebc13f056153..b232245cf6b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -@@ -2260,6 +2260,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, +@@ -2262,6 +2262,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, int ret, retry = 0, i; bool supports_atomic = false; @@ -39384,10 +30275,10 @@ index 4427572b2477..b60c99d61882 100755 -- 2.47.0 -From 126ef40989e28bba3ff5a4bb41333942de1c9dbf Mon Sep 17 00:00:00 2001 +From e2b61e68148654e850dc0dc004907522f3f6eea1 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Mon, 11 Nov 2024 09:22:31 +0100 -Subject: [PATCH 13/13] zstd +Date: Mon, 18 Nov 2024 13:26:48 +0100 +Subject: [PATCH 12/12] zstd Signed-off-by: Peter Jung ---