From 1b2f74c827224475d930974fe902bc8dec51b35d Mon Sep 17 00:00:00 2001 From: ferreo Date: Mon, 10 Mar 2025 12:03:05 +0100 Subject: [PATCH] Update patches/0001-cachyos-base-all.patch --- patches/0001-cachyos-base-all.patch | 10169 +++++++++++++++++++++----- 1 file changed, 8203 insertions(+), 1966 deletions(-) diff --git a/patches/0001-cachyos-base-all.patch b/patches/0001-cachyos-base-all.patch index 735adf0..3974df3 100644 --- a/patches/0001-cachyos-base-all.patch +++ b/patches/0001-cachyos-base-all.patch @@ -1,6 +1,6 @@ -From 521dea9a496313ed35debb9af0dcf2c0faeef35d Mon Sep 17 00:00:00 2001 +From 573ceb814e8c895d9c2c2c9f9055ca06d9cd8fcf Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:37:58 +0100 +Date: Fri, 7 Mar 2025 19:27:14 +0100 Subject: [PATCH 01/12] amd-pstate Signed-off-by: Peter Jung @@ -883,31 +883,31 @@ index cd573bc6b6db..9747e3be6cee 100644 -- 2.48.0.rc1 -From 31240ebeb2bb55bec0be0f5a3d1949980a0d5531 Mon Sep 17 00:00:00 2001 +From f657f25b4d37648fef98078084d2a59c4a7efad8 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:45:47 +0100 +Date: Fri, 7 Mar 2025 19:27:28 +0100 Subject: [PATCH 02/12] amd-tlb-broadcast Signed-off-by: Peter Jung --- arch/x86/Kconfig | 2 +- - arch/x86/Kconfig.cpu | 5 + + arch/x86/Kconfig.cpu | 4 + arch/x86/hyperv/mmu.c | 1 - arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/disabled-features.h | 8 +- - arch/x86/include/asm/invlpgb.h | 107 +++++ - arch/x86/include/asm/mmu.h | 6 + - arch/x86/include/asm/mmu_context.h | 14 + + arch/x86/include/asm/mmu.h | 12 + + arch/x86/include/asm/mmu_context.h | 10 +- arch/x86/include/asm/msr-index.h | 2 + arch/x86/include/asm/paravirt.h | 5 - arch/x86/include/asm/paravirt_types.h | 2 - - arch/x86/include/asm/tlbflush.h | 100 ++++- + arch/x86/include/asm/tlb.h | 138 +++++++ + arch/x86/include/asm/tlbflush.h | 69 ++++ arch/x86/kernel/alternative.c | 10 +- - arch/x86/kernel/cpu/amd.c | 12 + + arch/x86/kernel/cpu/amd.c | 10 + arch/x86/kernel/kvm.c | 1 - arch/x86/kernel/paravirt.c | 6 - arch/x86/mm/pgtable.c | 16 +- - arch/x86/mm/tlb.c | 518 +++++++++++++++++++++-- + arch/x86/mm/tlb.c | 450 ++++++++++++++++++++--- arch/x86/xen/mmu_pv.c | 1 - include/linux/mm_types.h | 1 + mm/memory.c | 1 - @@ -915,11 +915,10 @@ Signed-off-by: Peter Jung mm/swap_state.c | 1 - mm/vma.c | 2 - tools/arch/x86/include/asm/msr-index.h | 2 + - 25 files changed, 732 insertions(+), 94 deletions(-) - create mode 100644 arch/x86/include/asm/invlpgb.h + 25 files changed, 668 insertions(+), 89 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index c2fb8fe86a45..2a4653d19299 100644 +index 757333fe82c7..3d143bd2c054 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -273,7 +273,7 @@ config X86 @@ -932,28 +931,20 @@ index c2fb8fe86a45..2a4653d19299 100644 select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu -index 2a7279d80460..abe013a1b076 100644 +index 2a7279d80460..25c55cc17c5e 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu -@@ -395,6 +395,10 @@ config X86_VMX_FEATURE_NAMES - def_bool y - depends on IA32_FEAT_CTL +@@ -401,6 +401,10 @@ menuconfig PROCESSOR_SELECT + This lets you choose what x86 vendor support code your kernel + will include. -+config X86_BROADCAST_TLB_FLUSH ++config BROADCAST_TLB_FLUSH + def_bool y + depends on CPU_SUP_AMD && 64BIT + - menuconfig PROCESSOR_SELECT - bool "Supported processor vendors" if EXPERT - help -@@ -431,6 +435,7 @@ config CPU_SUP_CYRIX_32 - config CPU_SUP_AMD + config CPU_SUP_INTEL default y - bool "Support AMD processors" if PROCESSOR_SELECT -+ select X86_BROADCAST_TLB_FLUSH - help - This enables detection, tunings and quirks for AMD processors - + bool "Support Intel processors" if PROCESSOR_SELECT diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 1cc113200ff5..cbe6c71e17c1 100644 --- a/arch/x86/hyperv/mmu.c @@ -965,26 +956,26 @@ index 1cc113200ff5..cbe6c71e17c1 100644 - pv_ops.mmu.tlb_remove_table = tlb_remove_table; } diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h -index 645aa360628d..989e4c9cad2e 100644 +index 645aa360628d..bf727839326f 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -338,6 +338,7 @@ #define X86_FEATURE_CLZERO (13*32+ 0) /* "clzero" CLZERO instruction */ #define X86_FEATURE_IRPERF (13*32+ 1) /* "irperf" Instructions Retired Count */ #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* "xsaveerptr" Always save/restore FP error pointers */ -+#define X86_FEATURE_INVLPGB (13*32+ 3) /* INVLPGB and TLBSYNC instruction supported. */ ++#define X86_FEATURE_INVLPGB (13*32+ 3) /* INVLPGB and TLBSYNC instructions supported */ #define X86_FEATURE_RDPRU (13*32+ 4) /* "rdpru" Read processor register at user level */ #define X86_FEATURE_WBNOINVD (13*32+ 9) /* "wbnoinvd" WBNOINVD instruction */ #define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h -index c492bdc97b05..625a89259968 100644 +index c492bdc97b05..be8c38855068 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -129,6 +129,12 @@ #define DISABLE_SEV_SNP (1 << (X86_FEATURE_SEV_SNP & 31)) #endif -+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH ++#ifdef CONFIG_BROADCAST_TLB_FLUSH +#define DISABLE_INVLPGB 0 +#else +#define DISABLE_INVLPGB (1 << (X86_FEATURE_INVLPGB & 31)) @@ -1002,172 +993,75 @@ index c492bdc97b05..625a89259968 100644 #define DISABLED_MASK14 0 #define DISABLED_MASK15 0 #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \ -diff --git a/arch/x86/include/asm/invlpgb.h b/arch/x86/include/asm/invlpgb.h -new file mode 100644 -index 000000000000..220aba708b72 ---- /dev/null -+++ b/arch/x86/include/asm/invlpgb.h -@@ -0,0 +1,107 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+#ifndef _ASM_X86_INVLPGB -+#define _ASM_X86_INVLPGB -+ -+#include -+#include -+#include -+ -+/* -+ * INVLPGB does broadcast TLB invalidation across all the CPUs in the system. -+ * -+ * The INVLPGB instruction is weakly ordered, and a batch of invalidations can -+ * be done in a parallel fashion. -+ * -+ * TLBSYNC is used to ensure that pending INVLPGB invalidations initiated from -+ * this CPU have completed. -+ */ -+static inline void __invlpgb(unsigned long asid, unsigned long pcid, -+ unsigned long addr, u16 extra_count, -+ bool pmd_stride, u8 flags) -+{ -+ u32 edx = (pcid << 16) | asid; -+ u32 ecx = (pmd_stride << 31) | extra_count; -+ u64 rax = addr | flags; -+ -+ /* The low bits in rax are for flags. Verify addr is clean. */ -+ VM_WARN_ON_ONCE(addr & ~PAGE_MASK); -+ -+ /* INVLPGB; supported in binutils >= 2.36. */ -+ asm volatile(".byte 0x0f, 0x01, 0xfe" : : "a" (rax), "c" (ecx), "d" (edx)); -+} -+ -+/* Wait for INVLPGB originated by this CPU to complete. */ -+static inline void __tlbsync(void) -+{ -+ cant_migrate(); -+ /* TLBSYNC: supported in binutils >= 0.36. */ -+ asm volatile(".byte 0x0f, 0x01, 0xff" ::: "memory"); -+} -+ -+/* -+ * INVLPGB can be targeted by virtual address, PCID, ASID, or any combination -+ * of the three. For example: -+ * - INVLPGB_VA | INVLPGB_INCLUDE_GLOBAL: invalidate all TLB entries at the address -+ * - INVLPGB_PCID: invalidate all TLB entries matching the PCID -+ * -+ * The first can be used to invalidate (kernel) mappings at a particular -+ * address across all processes. -+ * -+ * The latter invalidates all TLB entries matching a PCID. -+ */ -+#define INVLPGB_VA BIT(0) -+#define INVLPGB_PCID BIT(1) -+#define INVLPGB_ASID BIT(2) -+#define INVLPGB_INCLUDE_GLOBAL BIT(3) -+#define INVLPGB_FINAL_ONLY BIT(4) -+#define INVLPGB_INCLUDE_NESTED BIT(5) -+ -+/* Flush all mappings for a given pcid and addr, not including globals. */ -+static inline void invlpgb_flush_user(unsigned long pcid, -+ unsigned long addr) -+{ -+ __invlpgb(0, pcid, addr, 0, 0, INVLPGB_PCID | INVLPGB_VA); -+ __tlbsync(); -+} -+ -+static inline void __invlpgb_flush_user_nr_nosync(unsigned long pcid, -+ unsigned long addr, -+ u16 nr, -+ bool pmd_stride, -+ bool freed_tables) -+{ -+ u8 flags = INVLPGB_PCID | INVLPGB_VA; -+ -+ if (!freed_tables) -+ flags |= INVLPGB_FINAL_ONLY; -+ -+ __invlpgb(0, pcid, addr, nr - 1, pmd_stride, flags); -+} -+ -+/* Flush all mappings for a given PCID, not including globals. */ -+static inline void __invlpgb_flush_single_pcid_nosync(unsigned long pcid) -+{ -+ __invlpgb(0, pcid, 0, 0, 0, INVLPGB_PCID); -+} -+ -+/* Flush all mappings, including globals, for all PCIDs. */ -+static inline void invlpgb_flush_all(void) -+{ -+ __invlpgb(0, 0, 0, 0, 0, INVLPGB_INCLUDE_GLOBAL); -+ __tlbsync(); -+} -+ -+/* Flush addr, including globals, for all PCIDs. */ -+static inline void __invlpgb_flush_addr_nosync(unsigned long addr, u16 nr) -+{ -+ __invlpgb(0, 0, addr, nr - 1, 0, INVLPGB_INCLUDE_GLOBAL); -+} -+ -+/* Flush all mappings for all PCIDs except globals. */ -+static inline void invlpgb_flush_all_nonglobals(void) -+{ -+ __invlpgb(0, 0, 0, 0, 0, 0); -+ __tlbsync(); -+} -+ -+#endif /* _ASM_X86_INVLPGB */ diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h -index 3b496cdcb74b..d71cd599fec4 100644 +index 3b496cdcb74b..8b8055a8eb9e 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h -@@ -69,6 +69,12 @@ typedef struct { +@@ -69,6 +69,18 @@ typedef struct { u16 pkey_allocation_map; s16 execute_only_pkey; #endif + -+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH ++#ifdef CONFIG_BROADCAST_TLB_FLUSH ++ /* ++ * The global ASID will be a non-zero value when the process has ++ * the same ASID across all CPUs, allowing it to make use of ++ * hardware-assisted remote TLB invalidation like AMD INVLPGB. ++ */ + u16 global_asid; ++ ++ /* The process is transitioning to a new global ASID number. */ + bool asid_transition; +#endif -+ } mm_context_t; #define INIT_MM_CONTEXT(mm) \ diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h -index 795fdd53bd0a..d670699d32c2 100644 +index 795fdd53bd0a..2398058b6e83 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h -@@ -139,6 +139,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm) +@@ -2,7 +2,6 @@ + #ifndef _ASM_X86_MMU_CONTEXT_H + #define _ASM_X86_MMU_CONTEXT_H + +-#include + #include + #include + #include +@@ -13,6 +12,7 @@ + #include + #include + #include ++#include + + extern atomic64_t last_mm_ctx_id; + +@@ -139,6 +139,11 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm) #define enter_lazy_tlb enter_lazy_tlb extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); -+extern void destroy_context_free_global_asid(struct mm_struct *mm); ++#define mm_init_global_asid mm_init_global_asid ++extern void mm_init_global_asid(struct mm_struct *mm); ++ ++extern void mm_free_global_asid(struct mm_struct *mm); + /* * Init a new mm. Used on mm copies, like at fork() * and on mm's that are brand-new, like at execve(). -@@ -161,6 +163,14 @@ static inline int init_new_context(struct task_struct *tsk, +@@ -161,6 +166,8 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.execute_only_pkey = -1; } #endif + -+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH -+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) { -+ mm->context.global_asid = 0; -+ mm->context.asid_transition = false; -+ } -+#endif -+ ++ mm_init_global_asid(mm); mm_reset_untag_mask(mm); init_new_context_ldt(mm); return 0; -@@ -170,6 +180,10 @@ static inline int init_new_context(struct task_struct *tsk, +@@ -170,6 +177,7 @@ static inline int init_new_context(struct task_struct *tsk, static inline void destroy_context(struct mm_struct *mm) { destroy_context_ldt(mm); -+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH -+ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) -+ destroy_context_free_global_asid(mm); -+#endif ++ mm_free_global_asid(mm); } extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, @@ -1220,11 +1114,165 @@ index 8d4fbe1be489..13405959e4db 100644 /* Hook for intercepting the destruction of an mm_struct. */ void (*exit_mmap)(struct mm_struct *mm); void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc); +diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h +index 4d3c9d00d6b6..a74b57512761 100644 +--- a/arch/x86/include/asm/tlb.h ++++ b/arch/x86/include/asm/tlb.h +@@ -6,6 +6,9 @@ + static inline void tlb_flush(struct mmu_gather *tlb); + + #include ++#include ++#include ++#include + + static inline void tlb_flush(struct mmu_gather *tlb) + { +@@ -38,4 +41,139 @@ static inline void invlpg(unsigned long addr) + { + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); + } ++enum addr_stride { ++ PTE_STRIDE = 0, ++ PMD_STRIDE = 1 ++}; ++ ++/* ++ * INVLPGB can be targeted by virtual address, PCID, ASID, or any combination ++ * of the three. For example: ++ * - FLAG_VA | FLAG_INCLUDE_GLOBAL: invalidate all TLB entries at the address ++ * - FLAG_PCID: invalidate all TLB entries matching the PCID ++ * ++ * The first is used to invalidate (kernel) mappings at a particular ++ * address across all processes. ++ * ++ * The latter invalidates all TLB entries matching a PCID. ++ */ ++#define INVLPGB_FLAG_VA BIT(0) ++#define INVLPGB_FLAG_PCID BIT(1) ++#define INVLPGB_FLAG_ASID BIT(2) ++#define INVLPGB_FLAG_INCLUDE_GLOBAL BIT(3) ++#define INVLPGB_FLAG_FINAL_ONLY BIT(4) ++#define INVLPGB_FLAG_INCLUDE_NESTED BIT(5) ++ ++/* The implied mode when all bits are clear: */ ++#define INVLPGB_MODE_ALL_NONGLOBALS 0UL ++ ++#ifdef CONFIG_BROADCAST_TLB_FLUSH ++/* ++ * INVLPGB does broadcast TLB invalidation across all the CPUs in the system. ++ * ++ * The INVLPGB instruction is weakly ordered, and a batch of invalidations can ++ * be done in a parallel fashion. ++ * ++ * The instruction takes the number of extra pages to invalidate, beyond the ++ * first page, while __invlpgb gets the more human readable number of pages to ++ * invalidate. ++ * ++ * The bits in rax[0:2] determine respectively which components of the address ++ * (VA, PCID, ASID) get compared when flushing. If neither bits are set, *any* ++ * address in the specified range matches. ++ * ++ * Since it is desired to only flush TLB entries for the ASID that is executing ++ * the instruction (a host/hypervisor or a guest), the ASID valid bit should ++ * always be set. On a host/hypervisor, the hardware will use the ASID value ++ * specified in EDX[15:0] (which should be 0). On a guest, the hardware will ++ * use the actual ASID value of the guest. ++ * ++ * TLBSYNC is used to ensure that pending INVLPGB invalidations initiated from ++ * this CPU have completed. ++ */ ++static inline void __invlpgb(unsigned long asid, unsigned long pcid, ++ unsigned long addr, u16 nr_pages, ++ enum addr_stride stride, u8 flags) ++{ ++ u64 rax = addr | flags | INVLPGB_FLAG_ASID; ++ u32 ecx = (stride << 31) | (nr_pages - 1); ++ u32 edx = (pcid << 16) | asid; ++ ++ /* The low bits in rax are for flags. Verify addr is clean. */ ++ VM_WARN_ON_ONCE(addr & ~PAGE_MASK); ++ ++ /* INVLPGB; supported in binutils >= 2.36. */ ++ asm volatile(".byte 0x0f, 0x01, 0xfe" :: "a" (rax), "c" (ecx), "d" (edx)); ++} ++ ++static inline void __invlpgb_all(unsigned long asid, unsigned long pcid, u8 flags) ++{ ++ __invlpgb(asid, pcid, 0, 1, 0, flags); ++} ++ ++static inline void __tlbsync(void) ++{ ++ /* ++ * TLBSYNC waits for INVLPGB instructions originating on the same CPU ++ * to have completed. Print a warning if the task has been migrated, ++ * and might not be waiting on all the INVLPGBs issued during this TLB ++ * invalidation sequence. ++ */ ++ cant_migrate(); ++ ++ /* TLBSYNC: supported in binutils >= 0.36. */ ++ asm volatile(".byte 0x0f, 0x01, 0xff" ::: "memory"); ++} ++#else ++/* Some compilers (I'm looking at you clang!) simply can't do DCE */ ++static inline void __invlpgb(unsigned long asid, unsigned long pcid, ++ unsigned long addr, u16 nr_pages, ++ enum addr_stride s, u8 flags) { } ++static inline void __invlpgb_all(unsigned long asid, unsigned long pcid, u8 flags) { } ++static inline void __tlbsync(void) { } ++#endif ++ ++static inline void invlpgb_flush_user_nr_nosync(unsigned long pcid, ++ unsigned long addr, ++ u16 nr, bool stride) ++{ ++ enum addr_stride str = stride ? PMD_STRIDE : PTE_STRIDE; ++ u8 flags = INVLPGB_FLAG_PCID | INVLPGB_FLAG_VA; ++ ++ __invlpgb(0, pcid, addr, nr, str, flags); ++} ++ ++/* Flush all mappings for a given PCID, not including globals. */ ++static inline void invlpgb_flush_single_pcid_nosync(unsigned long pcid) ++{ ++ __invlpgb_all(0, pcid, INVLPGB_FLAG_PCID); ++} ++ ++/* Flush all mappings, including globals, for all PCIDs. */ ++static inline void invlpgb_flush_all(void) ++{ ++ /* ++ * TLBSYNC at the end needs to make sure all flushes done on the ++ * current CPU have been executed system-wide. Therefore, make ++ * sure nothing gets migrated in-between but disable preemption ++ * as it is cheaper. ++ */ ++ guard(preempt)(); ++ __invlpgb_all(0, 0, INVLPGB_FLAG_INCLUDE_GLOBAL); ++ __tlbsync(); ++} ++ ++/* Flush addr, including globals, for all PCIDs. */ ++static inline void invlpgb_flush_addr_nosync(unsigned long addr, u16 nr) ++{ ++ __invlpgb(0, 0, addr, nr, PTE_STRIDE, INVLPGB_FLAG_INCLUDE_GLOBAL); ++} ++ ++/* Flush all mappings for all PCIDs except globals. */ ++static inline void invlpgb_flush_all_nonglobals(void) ++{ ++ guard(preempt)(); ++ __invlpgb_all(0, 0, INVLPGB_MODE_ALL_NONGLOBALS); ++ __tlbsync(); ++} + #endif /* _ASM_X86_TLB_H */ diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h -index 02fc2aa06e9e..89dddbcd1322 100644 +index 02fc2aa06e9e..0bc91488c9c2 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h -@@ -6,10 +6,12 @@ +@@ -6,6 +6,7 @@ #include #include @@ -1232,45 +1280,22 @@ index 02fc2aa06e9e..89dddbcd1322 100644 #include #include #include - #include -+#include - #include - #include - #include -@@ -104,6 +106,9 @@ struct tlb_state { - * need to be invalidated. - */ - bool invalidate_other; -+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH -+ bool need_tlbsync; -+#endif - - #ifdef CONFIG_ADDRESS_MASKING - /* -@@ -183,6 +188,13 @@ static inline void cr4_init_shadow(void) +@@ -183,6 +184,9 @@ static inline void cr4_init_shadow(void) extern unsigned long mmu_cr4_features; extern u32 *trampoline_cr4_features; -+/* How many pages can we invalidate with one INVLPGB. */ -+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH ++/* How many pages can be invalidated with one INVLPGB. */ +extern u16 invlpgb_count_max; -+#else -+#define invlpgb_count_max 1 -+#endif + extern void initialize_tlbstate_and_flush(void); /* -@@ -231,6 +243,82 @@ void flush_tlb_one_kernel(unsigned long addr); +@@ -231,6 +235,71 @@ void flush_tlb_one_kernel(unsigned long addr); void flush_tlb_multi(const struct cpumask *cpumask, const struct flush_tlb_info *info); -+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH +static inline bool is_dyn_asid(u16 asid) +{ -+ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB)) -+ return true; -+ + return asid < TLB_NR_DYN_ASIDS; +} + @@ -1279,14 +1304,7 @@ index 02fc2aa06e9e..89dddbcd1322 100644 + return !is_dyn_asid(asid); +} + -+static inline bool in_asid_transition(struct mm_struct *mm) -+{ -+ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB)) -+ return false; -+ -+ return mm && READ_ONCE(mm->context.asid_transition); -+} -+ ++#ifdef CONFIG_BROADCAST_TLB_FLUSH +static inline u16 mm_global_asid(struct mm_struct *mm) +{ + u16 asid; @@ -1301,74 +1319,49 @@ index 02fc2aa06e9e..89dddbcd1322 100644 + + return asid; +} ++ ++static inline void mm_init_global_asid(struct mm_struct *mm) ++{ ++ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) { ++ mm->context.global_asid = 0; ++ mm->context.asid_transition = false; ++ } ++} ++ ++static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) ++{ ++ /* ++ * Notably flush_tlb_mm_range() -> broadcast_tlb_flush() -> ++ * finish_asid_transition() needs to observe asid_transition = true ++ * once it observes global_asid. ++ */ ++ mm->context.asid_transition = true; ++ smp_store_release(&mm->context.global_asid, asid); ++} ++ ++static inline void mm_clear_asid_transition(struct mm_struct *mm) ++{ ++ WRITE_ONCE(mm->context.asid_transition, false); ++} ++ ++static inline bool mm_in_asid_transition(struct mm_struct *mm) ++{ ++ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB)) ++ return false; ++ ++ return mm && READ_ONCE(mm->context.asid_transition); ++} +#else -+static inline bool is_dyn_asid(u16 asid) -+{ -+ return true; -+} -+ -+static inline bool is_global_asid(u16 asid) -+{ -+ return false; -+} -+ -+static inline bool in_asid_transition(struct mm_struct *mm) -+{ -+ return false; -+} -+ -+static inline u16 mm_global_asid(struct mm_struct *mm) -+{ -+ return 0; -+} -+ -+static inline bool needs_global_asid_reload(struct mm_struct *next, u16 prev_asid) -+{ -+ return false; -+} -+ -+static inline void broadcast_tlb_flush(struct flush_tlb_info *info) -+{ -+ VM_WARN_ON_ONCE(1); -+} -+ -+static inline void consider_global_asid(struct mm_struct *mm) -+{ -+} -+ -+static inline void tlbsync(void) -+{ -+} -+#endif ++static inline u16 mm_global_asid(struct mm_struct *mm) { return 0; } ++static inline void mm_init_global_asid(struct mm_struct *mm) { } ++static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) { } ++static inline void mm_clear_asid_transition(struct mm_struct *mm) { } ++static inline bool mm_in_asid_transition(struct mm_struct *mm) { return false; } ++#endif /* CONFIG_BROADCAST_TLB_FLUSH */ + #ifdef CONFIG_PARAVIRT #include #endif -@@ -278,21 +366,15 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) - return atomic64_inc_return(&mm->context.tlb_gen); - } - --static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, -- struct mm_struct *mm, -- unsigned long uaddr) --{ -- inc_mm_tlb_gen(mm); -- cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); -- mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); --} -- - static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm) - { - flush_tlb_mm(mm); - } - - extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); -+extern void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, -+ struct mm_struct *mm, -+ unsigned long uaddr); - - static inline bool pte_flags_need_flush(unsigned long oldflags, - unsigned long newflags, diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 243843e44e89..c71b575bf229 100644 --- a/arch/x86/kernel/alternative.c @@ -1403,7 +1396,7 @@ index 243843e44e89..c71b575bf229 100644 { memcpy(dst, src, len); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 79d2e17f6582..21076252a491 100644 +index 79d2e17f6582..05ca61b66461 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -29,6 +29,8 @@ @@ -1421,21 +1414,19 @@ index 79d2e17f6582..21076252a491 100644 clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); + + /* Enable Translation Cache Extension */ -+ if (cpu_feature_enabled(X86_FEATURE_TCE)) ++ if (cpu_has(c, X86_FEATURE_TCE)) + msr_set_bit(MSR_EFER, _EFER_TCE); } #ifdef CONFIG_X86_32 -@@ -1135,6 +1141,12 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) +@@ -1135,6 +1141,10 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) tlb_lli_2m[ENTRIES] = eax & mask; tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; + + /* Max number of pages INVLPGB can invalidate in one shot */ -+ if (boot_cpu_has(X86_FEATURE_INVLPGB)) { -+ cpuid(0x80000008, &eax, &ebx, &ecx, &edx); -+ invlpgb_count_max = (edx & 0xffff) + 1; -+ } ++ if (cpu_has(c, X86_FEATURE_INVLPGB)) ++ invlpgb_count_max = (cpuid_edx(0x80000008) & 0xffff) + 1; } static const struct cpu_dev amd_cpu_dev = { @@ -1530,7 +1521,7 @@ index 5745a354a241..3dc4af1f7868 100644 #endif /* CONFIG_PGTABLE_LEVELS > 4 */ #endif /* CONFIG_PGTABLE_LEVELS > 3 */ diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c -index 90a9e4740913..482b7def3677 100644 +index 90a9e4740913..7505c2d94bc0 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -74,13 +74,15 @@ @@ -1557,10 +1548,10 @@ index 90a9e4740913..482b7def3677 100644 } + /* -+ * TLB consistency for global ASIDs is maintained with broadcast TLB -+ * flushing. The TLB is never outdated, and does not need flushing. ++ * TLB consistency for global ASIDs is maintained with hardware assisted ++ * remote TLB flushing. Global ASIDs are always up to date. + */ -+ if (static_cpu_has(X86_FEATURE_INVLPGB)) { ++ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) { + u16 global_asid = mm_global_asid(next); + + if (global_asid) { @@ -1573,62 +1564,63 @@ index 90a9e4740913..482b7def3677 100644 if (this_cpu_read(cpu_tlbstate.invalidate_other)) clear_asid_other(); -@@ -251,6 +267,301 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, +@@ -251,6 +267,268 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, *need_flush = true; } -+#ifdef CONFIG_X86_BROADCAST_TLB_FLUSH +/* -+ * Logic for broadcast TLB invalidation. ++ * Global ASIDs are allocated for multi-threaded processes that are ++ * active on multiple CPUs simultaneously, giving each of those ++ * processes the same PCID on every CPU, for use with hardware-assisted ++ * TLB shootdown on remote CPUs, like AMD INVLPGB or Intel RAR. ++ * ++ * These global ASIDs are held for the lifetime of the process. + */ +static DEFINE_RAW_SPINLOCK(global_asid_lock); +static u16 last_global_asid = MAX_ASID_AVAILABLE; -+static DECLARE_BITMAP(global_asid_used, MAX_ASID_AVAILABLE) = { 0 }; -+static DECLARE_BITMAP(global_asid_freed, MAX_ASID_AVAILABLE) = { 0 }; ++static DECLARE_BITMAP(global_asid_used, MAX_ASID_AVAILABLE); ++static DECLARE_BITMAP(global_asid_freed, MAX_ASID_AVAILABLE); +static int global_asid_available = MAX_ASID_AVAILABLE - TLB_NR_DYN_ASIDS - 1; + ++/* ++ * When the search for a free ASID in the global ASID space reaches ++ * MAX_ASID_AVAILABLE, a global TLB flush guarantees that previously ++ * freed global ASIDs are safe to re-use. ++ * ++ * This way the global flush only needs to happen at ASID rollover ++ * time, and not at ASID allocation time. ++ */ +static void reset_global_asid_space(void) +{ + lockdep_assert_held(&global_asid_lock); + -+ /* -+ * A global TLB flush guarantees that any stale entries from -+ * previously freed global ASIDs get flushed from the TLB -+ * everywhere, making these global ASIDs safe to reuse. -+ */ + invlpgb_flush_all_nonglobals(); + + /* -+ * Clear all the previously freed global ASIDs from the -+ * broadcast_asid_used bitmap, now that the global TLB flush -+ * has made them actually available for re-use. ++ * The TLB flush above makes it safe to re-use the previously ++ * freed global ASIDs. + */ + bitmap_andnot(global_asid_used, global_asid_used, + global_asid_freed, MAX_ASID_AVAILABLE); + bitmap_clear(global_asid_freed, 0, MAX_ASID_AVAILABLE); + -+ /* -+ * ASIDs 0-TLB_NR_DYN_ASIDS are used for CPU-local ASID -+ * assignments, for tasks doing IPI based TLB shootdowns. -+ * Restart the search from the start of the global ASID space. -+ */ ++ /* Restart the search from the start of global ASID space. */ + last_global_asid = TLB_NR_DYN_ASIDS; +} + -+static u16 get_global_asid(void) ++static u16 allocate_global_asid(void) +{ -+ + u16 asid; + + lockdep_assert_held(&global_asid_lock); + -+ /* The previous allocated ASID is at the top of the address space. */ ++ /* The previous allocation hit the edge of available address space */ + if (last_global_asid >= MAX_ASID_AVAILABLE - 1) + reset_global_asid_space(); + + asid = find_next_zero_bit(global_asid_used, MAX_ASID_AVAILABLE, last_global_asid); + -+ if (asid >= MAX_ASID_AVAILABLE) { ++ if (asid >= MAX_ASID_AVAILABLE && !global_asid_available) { + /* This should never happen. */ + VM_WARN_ONCE(1, "Unable to allocate global ASID despite %d available\n", + global_asid_available); @@ -1643,40 +1635,7 @@ index 90a9e4740913..482b7def3677 100644 +} + +/* -+ * Returns true if the mm is transitioning from a CPU-local ASID to a global -+ * (INVLPGB) ASID, or the other way around. -+ */ -+static bool needs_global_asid_reload(struct mm_struct *next, u16 prev_asid) -+{ -+ u16 global_asid = mm_global_asid(next); -+ -+ /* Process is transitioning to a global ASID */ -+ if (global_asid && prev_asid != global_asid) -+ return true; -+ -+ /* Transition from global->local ASID does not currently happen. */ -+ if (!global_asid && is_global_asid(prev_asid)) -+ return true; -+ -+ return false; -+} -+ -+void destroy_context_free_global_asid(struct mm_struct *mm) -+{ -+ if (!mm->context.global_asid) -+ return; -+ -+ guard(raw_spinlock_irqsave)(&global_asid_lock); -+ -+ /* The global ASID can be re-used only after flush at wrap-around. */ -+ __set_bit(mm->context.global_asid, global_asid_freed); -+ -+ mm->context.global_asid = 0; -+ global_asid_available++; -+} -+ -+/* -+ * Check whether a process is currently active on more than "threshold" CPUs. ++ * Check whether a process is currently active on more than @threshold CPUs. + * This is a cheap estimation on whether or not it may make sense to assign + * a global ASID to this process, and use broadcast TLB invalidation. + */ @@ -1715,46 +1674,78 @@ index 90a9e4740913..482b7def3677 100644 + guard(raw_spinlock_irqsave)(&global_asid_lock); + + /* This process is already using broadcast TLB invalidation. */ -+ if (mm->context.global_asid) ++ if (mm_global_asid(mm)) + return; + -+ /* The last global ASID was consumed while waiting for the lock. */ ++ /* ++ * The last global ASID was consumed while waiting for the lock. ++ * ++ * If this fires, a more aggressive ASID reuse scheme might be ++ * needed. ++ */ + if (!global_asid_available) { + VM_WARN_ONCE(1, "Ran out of global ASIDs\n"); + return; + } + -+ asid = get_global_asid(); ++ asid = allocate_global_asid(); + if (!asid) + return; + -+ /* -+ * Notably flush_tlb_mm_range() -> broadcast_tlb_flush() -> -+ * finish_asid_transition() needs to observe asid_transition = true -+ * once it observes global_asid. -+ */ -+ mm->context.asid_transition = true; -+ smp_store_release(&mm->context.global_asid, asid); ++ mm_assign_global_asid(mm, asid); ++} ++ ++void mm_free_global_asid(struct mm_struct *mm) ++{ ++ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB)) ++ return; ++ ++ if (!mm_global_asid(mm)) ++ return; ++ ++ guard(raw_spinlock_irqsave)(&global_asid_lock); ++ ++ /* The global ASID can be re-used only after flush at wrap-around. */ ++#ifdef CONFIG_BROADCAST_TLB_FLUSH ++ __set_bit(mm->context.global_asid, global_asid_freed); ++ ++ mm->context.global_asid = 0; ++ global_asid_available++; ++#endif +} + +/* -+ * x86 has 4k ASIDs (2k when compiled with KPTI), but the largest -+ * x86 systems have over 8k CPUs. Because of this potential ASID -+ * shortage, global ASIDs are handed out to processes that have -+ * frequent TLB flushes and are active on 4 or more CPUs simultaneously. ++ * Is the mm transitioning from a CPU-local ASID to a global ASID? ++ */ ++static bool mm_needs_global_asid(struct mm_struct *mm, u16 asid) ++{ ++ u16 global_asid = mm_global_asid(mm); ++ ++ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB)) ++ return false; ++ ++ /* Process is transitioning to a global ASID */ ++ if (global_asid && asid != global_asid) ++ return true; ++ ++ return false; ++} ++ ++/* ++ * x86 has 4k ASIDs (2k when compiled with KPTI), but the largest x86 ++ * systems have over 8k CPUs. Because of this potential ASID shortage, ++ * global ASIDs are handed out to processes that have frequent TLB ++ * flushes and are active on 4 or more CPUs simultaneously. + */ +static void consider_global_asid(struct mm_struct *mm) +{ -+ if (!static_cpu_has(X86_FEATURE_INVLPGB)) ++ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB)) + return; + + /* Check every once in a while. */ + if ((current->pid & 0x1f) != (jiffies & 0x1f)) + return; + -+ if (!READ_ONCE(global_asid_available)) -+ return; -+ + /* + * Assign a global ASID if the process is active on + * 4 or more CPUs simultaneously. @@ -1769,7 +1760,7 @@ index 90a9e4740913..482b7def3677 100644 + int bc_asid = mm_global_asid(mm); + int cpu; + -+ if (!READ_ONCE(mm->context.asid_transition)) ++ if (!mm_in_asid_transition(mm)) + return; + + for_each_cpu(cpu, mm_cpumask(mm)) { @@ -1799,45 +1790,13 @@ index 90a9e4740913..482b7def3677 100644 + } + + /* All the CPUs running this process are using the global ASID. */ -+ WRITE_ONCE(mm->context.asid_transition, false); -+} -+ -+static inline void tlbsync(void) -+{ -+ if (!this_cpu_read(cpu_tlbstate.need_tlbsync)) -+ return; -+ __tlbsync(); -+ this_cpu_write(cpu_tlbstate.need_tlbsync, false); -+} -+ -+static inline void invlpgb_flush_user_nr_nosync(unsigned long pcid, -+ unsigned long addr, -+ u16 nr, bool pmd_stride, -+ bool freed_tables) -+{ -+ __invlpgb_flush_user_nr_nosync(pcid, addr, nr, pmd_stride, freed_tables); -+ if (!this_cpu_read(cpu_tlbstate.need_tlbsync)) -+ this_cpu_write(cpu_tlbstate.need_tlbsync, true); -+} -+ -+static inline void invlpgb_flush_single_pcid_nosync(unsigned long pcid) -+{ -+ __invlpgb_flush_single_pcid_nosync(pcid); -+ if (!this_cpu_read(cpu_tlbstate.need_tlbsync)) -+ this_cpu_write(cpu_tlbstate.need_tlbsync, true); -+} -+ -+static inline void invlpgb_flush_addr_nosync(unsigned long addr, u16 nr) -+{ -+ __invlpgb_flush_addr_nosync(addr, nr); -+ if (!this_cpu_read(cpu_tlbstate.need_tlbsync)) -+ this_cpu_write(cpu_tlbstate.need_tlbsync, true); ++ mm_clear_asid_transition(mm); +} + +static void broadcast_tlb_flush(struct flush_tlb_info *info) +{ + bool pmd = info->stride_shift == PMD_SHIFT; -+ unsigned long asid = info->mm->context.global_asid; ++ unsigned long asid = mm_global_asid(info->mm); + unsigned long addr = info->start; + + /* @@ -1848,7 +1807,7 @@ index 90a9e4740913..482b7def3677 100644 + if (info->end == TLB_FLUSH_ALL) { + invlpgb_flush_single_pcid_nosync(kern_pcid(asid)); + /* Do any CPUs supporting INVLPGB need PTI? */ -+ if (static_cpu_has(X86_FEATURE_PTI)) ++ if (cpu_feature_enabled(X86_FEATURE_PTI)) + invlpgb_flush_single_pcid_nosync(user_pcid(asid)); + } else do { + unsigned long nr = 1; @@ -1858,9 +1817,9 @@ index 90a9e4740913..482b7def3677 100644 + nr = clamp_val(nr, 1, invlpgb_count_max); + } + -+ invlpgb_flush_user_nr_nosync(kern_pcid(asid), addr, nr, pmd, info->freed_tables); -+ if (static_cpu_has(X86_FEATURE_PTI)) -+ invlpgb_flush_user_nr_nosync(user_pcid(asid), addr, nr, pmd, info->freed_tables); ++ invlpgb_flush_user_nr_nosync(kern_pcid(asid), addr, nr, pmd); ++ if (cpu_feature_enabled(X86_FEATURE_PTI)) ++ invlpgb_flush_user_nr_nosync(user_pcid(asid), addr, nr, pmd); + + addr += nr << info->stride_shift; + } while (addr < info->end); @@ -1868,50 +1827,35 @@ index 90a9e4740913..482b7def3677 100644 + finish_asid_transition(info); + + /* Wait for the INVLPGBs kicked off above to finish. */ -+ tlbsync(); ++ __tlbsync(); +} -+#endif /* CONFIG_X86_BROADCAST_TLB_FLUSH */ + /* * Given an ASID, flush the corresponding user ASID. We can delay this * until the next time we switch to it. -@@ -512,6 +823,8 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, - if (IS_ENABLED(CONFIG_PROVE_LOCKING)) - WARN_ON_ONCE(!irqs_disabled()); - -+ tlbsync(); -+ - /* - * Verify that CR3 is what we think it is. This will catch - * hypothetical buggy code that directly switches to swapper_pg_dir -@@ -556,8 +869,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, +@@ -556,7 +834,8 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, */ if (prev == next) { /* Not actually switching mm's */ - VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != -- next->context.ctx_id); + VM_WARN_ON(is_dyn_asid(prev_asid) && -+ this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != -+ next->context.ctx_id); ++ this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != + next->context.ctx_id); /* - * If this races with another thread that enables lam, 'new_lam' -@@ -573,6 +887,23 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, +@@ -573,6 +852,20 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, !cpumask_test_cpu(cpu, mm_cpumask(next)))) cpumask_set_cpu(cpu, mm_cpumask(next)); -+ /* -+ * Check if the current mm is transitioning to a new ASID. -+ */ -+ if (needs_global_asid_reload(next, prev_asid)) { ++ /* Check if the current mm is transitioning to a global ASID */ ++ if (mm_needs_global_asid(next, prev_asid)) { + next_tlb_gen = atomic64_read(&next->context.tlb_gen); -+ + choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); + goto reload_tlb; + } + + /* -+ * Broadcast TLB invalidation keeps this PCID up to date ++ * Broadcast TLB invalidation keeps this ASID up to date + * all the time. + */ + if (is_global_asid(prev_asid)) @@ -1920,7 +1864,7 @@ index 90a9e4740913..482b7def3677 100644 /* * If the CPU is not in lazy TLB mode, we are just switching * from one thread in a process to another thread in the same -@@ -607,30 +938,32 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, +@@ -607,30 +900,32 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, cond_mitigation(tsk); /* @@ -1928,7 +1872,7 @@ index 90a9e4740913..482b7def3677 100644 - * Skip kernel threads; we never send init_mm TLB flushing IPIs, - * but the bitmap manipulation can cause cache line contention. + * Let nmi_uaccess_okay() and finish_asid_transition() -+ * know that we're changing CR3. ++ * know that CR3 is changing. */ - if (prev != &init_mm) { - VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, @@ -1966,16 +1910,7 @@ index 90a9e4740913..482b7def3677 100644 this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); load_new_mm_cr3(next->pgd, new_asid, new_lam, true); -@@ -671,6 +1004,8 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, - */ - void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) - { -+ tlbsync(); -+ - if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) - return; - -@@ -749,7 +1084,7 @@ static void flush_tlb_func(void *info) +@@ -749,7 +1044,7 @@ static void flush_tlb_func(void *info) const struct flush_tlb_info *f = info; struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); @@ -1984,7 +1919,7 @@ index 90a9e4740913..482b7def3677 100644 bool local = smp_processor_id() == f->initiating_cpu; unsigned long nr_invalidate = 0; u64 mm_tlb_gen; -@@ -760,15 +1095,28 @@ static void flush_tlb_func(void *info) +@@ -760,15 +1055,28 @@ static void flush_tlb_func(void *info) if (!local) { inc_irq_stat(irq_tlb_count); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); @@ -2004,7 +1939,7 @@ index 90a9e4740913..482b7def3677 100644 return; + /* Reload the ASID if transitioning into or out of a global ASID */ -+ if (needs_global_asid_reload(loaded_mm, loaded_mm_asid)) { ++ if (mm_needs_global_asid(loaded_mm, loaded_mm_asid)) { + switch_mm_irqs_off(NULL, loaded_mm, NULL); + loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); + } @@ -2016,7 +1951,7 @@ index 90a9e4740913..482b7def3677 100644 VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != loaded_mm->context.ctx_id); -@@ -786,6 +1134,8 @@ static void flush_tlb_func(void *info) +@@ -786,6 +1094,8 @@ static void flush_tlb_func(void *info) return; } @@ -2025,32 +1960,32 @@ index 90a9e4740913..482b7def3677 100644 if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID && f->new_tlb_gen <= local_tlb_gen)) { /* -@@ -953,7 +1303,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask, +@@ -953,7 +1263,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask, * up on the new contents of what used to be page tables, while * doing a speculative memory access. */ - if (info->freed_tables) -+ if (info->freed_tables || in_asid_transition(info->mm)) ++ if (info->freed_tables || mm_in_asid_transition(info->mm)) on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true); else on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func, -@@ -1009,6 +1359,15 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm, - info->initiating_cpu = smp_processor_id(); - info->trim_cpumask = 0; +@@ -1000,6 +1310,15 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm, + BUG_ON(this_cpu_inc_return(flush_tlb_info_idx) != 1); + #endif + /* + * If the number of flushes is so large that a full flush + * would be faster, do a full flush. + */ + if ((end - start) >> stride_shift > tlb_single_page_flush_ceiling) { -+ info->start = 0; -+ info->end = TLB_FLUSH_ALL; ++ start = 0; ++ end = TLB_FLUSH_ALL; + } + - return info; - } - -@@ -1026,17 +1385,8 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + info->start = start; + info->end = end; + info->mm = mm; +@@ -1026,17 +1345,8 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, bool freed_tables) { struct flush_tlb_info *info; @@ -2069,7 +2004,7 @@ index 90a9e4740913..482b7def3677 100644 /* This is also a barrier that synchronizes with switch_mm(). */ new_tlb_gen = inc_mm_tlb_gen(mm); -@@ -1049,9 +1399,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, +@@ -1049,9 +1359,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, * a local TLB flush is needed. Optimize this use-case by calling * flush_tlb_func_local() directly in this case. */ @@ -2083,145 +2018,110 @@ index 90a9e4740913..482b7def3677 100644 } else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { lockdep_assert_irqs_enabled(); local_irq_disable(); -@@ -1065,6 +1418,16 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, +@@ -1064,7 +1377,6 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); } - -+static bool broadcast_flush_tlb_all(void) -+{ -+ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB)) -+ return false; -+ -+ guard(preempt)(); -+ invlpgb_flush_all(); -+ return true; -+} -+ +- static void do_flush_tlb_all(void *info) { count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); -@@ -1073,10 +1436,34 @@ static void do_flush_tlb_all(void *info) - +@@ -1074,7 +1386,32 @@ static void do_flush_tlb_all(void *info) void flush_tlb_all(void) { -+ if (broadcast_flush_tlb_all()) -+ return; count_vm_tlb_event(NR_TLB_REMOTE_FLUSH); - on_each_cpu(do_flush_tlb_all, NULL, 1); - } - -+static bool broadcast_kernel_range_flush(struct flush_tlb_info *info) -+{ -+ unsigned long addr; -+ unsigned long nr; +- on_each_cpu(do_flush_tlb_all, NULL, 1); + -+ if (!cpu_feature_enabled(X86_FEATURE_INVLPGB)) -+ return false; -+ -+ if (info->end == TLB_FLUSH_ALL) { ++ /* First try (faster) hardware-assisted TLB invalidation. */ ++ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) + invlpgb_flush_all(); -+ return true; -+ } ++ else ++ /* Fall back to the IPI-based invalidation. */ ++ on_each_cpu(do_flush_tlb_all, NULL, 1); ++} ++ ++/* Flush an arbitrarily large range of memory with INVLPGB. */ ++static void invlpgb_kernel_range_flush(struct flush_tlb_info *info) ++{ ++ unsigned long addr, nr; + + for (addr = info->start; addr < info->end; addr += nr << PAGE_SHIFT) { + nr = (info->end - addr) >> PAGE_SHIFT; ++ ++ /* ++ * INVLPGB has a limit on the size of ranges it can ++ * flush. Break up large flushes. ++ */ + nr = clamp_val(nr, 1, invlpgb_count_max); ++ + invlpgb_flush_addr_nosync(addr, nr); + } -+ tlbsync(); -+ return true; -+} -+ - static void do_kernel_range_flush(void *info) - { - struct flush_tlb_info *f = info; -@@ -1089,22 +1476,21 @@ static void do_kernel_range_flush(void *info) ++ __tlbsync(); + } - void flush_tlb_kernel_range(unsigned long start, unsigned long end) + static void do_kernel_range_flush(void *info) +@@ -1087,24 +1424,37 @@ static void do_kernel_range_flush(void *info) + flush_tlb_one_kernel(addr); + } + +-void flush_tlb_kernel_range(unsigned long start, unsigned long end) ++static void kernel_tlb_flush_all(struct flush_tlb_info *info) { - /* Balance as user space task's flush, a bit conservative */ - if (end == TLB_FLUSH_ALL || - (end - start) > tlb_single_page_flush_ceiling << PAGE_SHIFT) { -- on_each_cpu(do_flush_tlb_all, NULL, 1); ++ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) ++ invlpgb_flush_all(); ++ else + on_each_cpu(do_flush_tlb_all, NULL, 1); - } else { - struct flush_tlb_info *info; -+ struct flush_tlb_info *info; -+ -+ guard(preempt)(); - +- - preempt_disable(); - info = get_flush_tlb_info(NULL, start, end, 0, false, - TLB_GENERATION_INVALID); -+ info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false, -+ TLB_GENERATION_INVALID); ++} -+ if (broadcast_kernel_range_flush(info)) -+ ; /* Fall through. */ -+ else if (info->end == TLB_FLUSH_ALL) -+ on_each_cpu(do_flush_tlb_all, NULL, 1); ++static void kernel_tlb_flush_range(struct flush_tlb_info *info) ++{ ++ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) ++ invlpgb_kernel_range_flush(info); + else on_each_cpu(do_kernel_range_flush, info, 1); ++} - put_flush_tlb_info(); - preempt_enable(); - } ++void flush_tlb_kernel_range(unsigned long start, unsigned long end) ++{ ++ struct flush_tlb_info *info; ++ ++ guard(preempt)(); ++ ++ info = get_flush_tlb_info(NULL, start, end, PAGE_SHIFT, false, ++ TLB_GENERATION_INVALID); ++ ++ if (info->end == TLB_FLUSH_ALL) ++ kernel_tlb_flush_all(info); ++ else ++ kernel_tlb_flush_range(info); ++ + put_flush_tlb_info(); } /* -@@ -1292,12 +1678,52 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) - local_irq_enable(); - } - -+ /* -+ * If we issued (asynchronous) INVLPGB flushes, wait for them here. -+ * The cpumask above contains only CPUs that were running tasks -+ * not using broadcast TLB flushing. -+ */ -+ tlbsync(); -+ - cpumask_clear(&batch->cpumask); - - put_flush_tlb_info(); - put_cpu(); - } - -+void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, -+ struct mm_struct *mm, -+ unsigned long uaddr) -+{ -+ u16 asid = mm_global_asid(mm); -+ -+ if (asid) { -+ invlpgb_flush_user_nr_nosync(kern_pcid(asid), uaddr, 1, false, false); -+ /* Do any CPUs supporting INVLPGB need PTI? */ -+ if (static_cpu_has(X86_FEATURE_PTI)) -+ invlpgb_flush_user_nr_nosync(user_pcid(asid), uaddr, 1, false, false); -+ -+ /* -+ * Some CPUs might still be using a local ASID for this -+ * process, and require IPIs, while others are using the -+ * global ASID. -+ * -+ * In this corner case we need to do both the broadcast -+ * TLB invalidation, and send IPIs. The IPIs will help -+ * stragglers transition to the broadcast ASID. -+ */ -+ if (in_asid_transition(mm)) -+ asid = 0; -+ } -+ -+ if (!asid) { -+ inc_mm_tlb_gen(mm); -+ cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); -+ } -+ -+ mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); -+} -+ - /* - * Blindly accessing user memory from NMI context can be dangerous - * if we're in the middle of switching the current user task or +@@ -1283,7 +1633,9 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) + * a local TLB flush is needed. Optimize this use-case by calling + * flush_tlb_func_local() directly in this case. + */ +- if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) { ++ if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) { ++ invlpgb_flush_all_nonglobals(); ++ } else if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) { + flush_tlb_multi(&batch->cpumask, info); + } else if (cpumask_test_cpu(cpu, &batch->cpumask)) { + lockdep_assert_irqs_enabled(); diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index d078de2c952b..38971c6dcd4b 100644 --- a/arch/x86/xen/mmu_pv.c @@ -2235,10 +2135,10 @@ index d078de2c952b..38971c6dcd4b 100644 .pgd_alloc = xen_pgd_alloc, .pgd_free = xen_pgd_free, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index 332cee285662..29e6d8e6d0e5 100644 +index 14fc1b39c0cf..a199e299b0d4 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h -@@ -1401,6 +1401,7 @@ enum tlb_flush_reason { +@@ -1402,6 +1402,7 @@ enum tlb_flush_reason { TLB_LOCAL_SHOOTDOWN, TLB_LOCAL_MM_SHOOTDOWN, TLB_REMOTE_SEND_IPI, @@ -2333,9 +2233,9 @@ index 3ae84c3b8e6d..dc1c1057f26e 100644 -- 2.48.0.rc1 -From 29126d387284698bd160aeea6086ed8bafc53134 Mon Sep 17 00:00:00 2001 +From c6c1051263666de07c75e583032d94f8638a52b8 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:39:56 +0100 +Date: Fri, 7 Mar 2025 19:27:38 +0100 Subject: [PATCH 03/12] bbr3 Signed-off-by: Peter Jung @@ -2389,10 +2289,10 @@ index c7f42844c79a..170250145598 100644 #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ diff --git a/include/net/tcp.h b/include/net/tcp.h -index e9b37b76e894..419fda8c64e5 100644 +index bc04599547c3..1ac0efa5a854 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h -@@ -375,6 +375,8 @@ static inline void tcp_dec_quickack_mode(struct sock *sk) +@@ -376,6 +376,8 @@ static inline void tcp_dec_quickack_mode(struct sock *sk) #define TCP_ECN_QUEUE_CWR 2 #define TCP_ECN_DEMAND_CWR 4 #define TCP_ECN_SEEN 8 @@ -2401,7 +2301,7 @@ index e9b37b76e894..419fda8c64e5 100644 enum tcp_tw_status { TCP_TW_SUCCESS = 0, -@@ -779,6 +781,15 @@ static inline void tcp_fast_path_check(struct sock *sk) +@@ -793,6 +795,15 @@ static inline void tcp_fast_path_check(struct sock *sk) u32 tcp_delack_max(const struct sock *sk); @@ -2417,7 +2317,7 @@ index e9b37b76e894..419fda8c64e5 100644 /* Compute the actual rto_min value */ static inline u32 tcp_rto_min(const struct sock *sk) { -@@ -884,6 +895,11 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) +@@ -898,6 +909,11 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) return max_t(s64, t1 - t0, 0); } @@ -2429,7 +2329,7 @@ index e9b37b76e894..419fda8c64e5 100644 /* provide the departure time in us unit */ static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb) { -@@ -973,9 +989,14 @@ struct tcp_skb_cb { +@@ -987,9 +1003,14 @@ struct tcp_skb_cb { /* pkts S/ACKed so far upon tx of skb, incl retrans: */ __u32 delivered; /* start of send pipeline phase */ @@ -2446,7 +2346,7 @@ index e9b37b76e894..419fda8c64e5 100644 } tx; /* only used for outgoing skbs */ union { struct inet_skb_parm h4; -@@ -1088,6 +1109,7 @@ enum tcp_ca_event { +@@ -1102,6 +1123,7 @@ enum tcp_ca_event { CA_EVENT_LOSS, /* loss timeout */ CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ @@ -2454,7 +2354,7 @@ index e9b37b76e894..419fda8c64e5 100644 }; /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ -@@ -1110,7 +1132,11 @@ enum tcp_ca_ack_event_flags { +@@ -1124,7 +1146,11 @@ enum tcp_ca_ack_event_flags { #define TCP_CONG_NON_RESTRICTED 0x1 /* Requires ECN/ECT set on all packets */ #define TCP_CONG_NEEDS_ECN 0x2 @@ -2467,7 +2367,7 @@ index e9b37b76e894..419fda8c64e5 100644 union tcp_cc_info; -@@ -1130,10 +1156,13 @@ struct ack_sample { +@@ -1144,10 +1170,13 @@ struct ack_sample { */ struct rate_sample { u64 prior_mstamp; /* starting timestamp for interval */ @@ -2482,7 +2382,7 @@ index e9b37b76e894..419fda8c64e5 100644 long interval_us; /* time for tp->delivered to incr "delivered" */ u32 snd_interval_us; /* snd interval for delivered packets */ u32 rcv_interval_us; /* rcv interval for delivered packets */ -@@ -1144,7 +1173,9 @@ struct rate_sample { +@@ -1158,7 +1187,9 @@ struct rate_sample { u32 last_end_seq; /* end_seq of most recently ACKed packet */ bool is_app_limited; /* is sample from packet with bubble in pipe? */ bool is_retrans; /* is sample from retransmission? */ @@ -2492,7 +2392,7 @@ index e9b37b76e894..419fda8c64e5 100644 }; struct tcp_congestion_ops { -@@ -1168,8 +1199,11 @@ struct tcp_congestion_ops { +@@ -1182,8 +1213,11 @@ struct tcp_congestion_ops { /* hook for packet ack accounting (optional) */ void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); @@ -2506,7 +2406,7 @@ index e9b37b76e894..419fda8c64e5 100644 /* call when packets are delivered to update cwnd and pacing rate, * after all the ca_state processing. (optional) -@@ -1235,6 +1269,14 @@ static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer) +@@ -1249,6 +1283,14 @@ static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer) } #endif @@ -2521,7 +2421,7 @@ index e9b37b76e894..419fda8c64e5 100644 static inline bool tcp_ca_needs_ecn(const struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); -@@ -1254,6 +1296,7 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) +@@ -1268,6 +1310,7 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) void tcp_set_ca_state(struct sock *sk, const u8 ca_state); /* From tcp_rate.c */ @@ -2529,7 +2429,7 @@ index e9b37b76e894..419fda8c64e5 100644 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb); void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, struct rate_sample *rs); -@@ -1266,6 +1309,21 @@ static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2) +@@ -1280,6 +1323,21 @@ static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2) return t1 > t2 || (t1 == t2 && after(seq1, seq2)); } @@ -2551,7 +2451,7 @@ index e9b37b76e894..419fda8c64e5 100644 /* These functions determine how the current flow behaves in respect of SACK * handling. SACK is negotiated with the peer, and therefore it can vary * between different flows. -@@ -2417,7 +2475,7 @@ struct tcp_plb_state { +@@ -2431,7 +2489,7 @@ struct tcp_plb_state { u8 consec_cong_rounds:5, /* consecutive congested rounds */ unused:3; u32 pause_until; /* jiffies32 when PLB can resume rerouting */ @@ -2690,10 +2590,10 @@ index 554804774628..2279e6e7bc9c 100644 .undo_cwnd = bpf_tcp_ca_undo_cwnd, .sndbuf_expand = bpf_tcp_ca_sndbuf_expand, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c -index 0d704bda6c41..d652078f6aec 100644 +index d74281eca14f..61aa756120ad 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c -@@ -3385,6 +3385,7 @@ int tcp_disconnect(struct sock *sk, int flags) +@@ -3379,6 +3379,7 @@ int tcp_disconnect(struct sock *sk, int flags) tp->rx_opt.dsack = 0; tp->rx_opt.num_sacks = 0; tp->rcv_ooopack = 0; @@ -2701,7 +2601,7 @@ index 0d704bda6c41..d652078f6aec 100644 /* Clean up fastopen related fields */ -@@ -4111,6 +4112,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) +@@ -4105,6 +4106,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_options |= TCPI_OPT_ECN; if (tp->ecn_flags & TCP_ECN_SEEN) info->tcpi_options |= TCPI_OPT_ECN_SEEN; @@ -5367,10 +5267,10 @@ index df758adbb445..e98e5dbc050e 100644 icsk->icsk_ca_ops->init(sk); if (tcp_ca_needs_ecn(sk)) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c -index 4811727b8a02..ba8b714fb693 100644 +index 0ee22e10fcfa..492c143aed1b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c -@@ -370,7 +370,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) +@@ -376,7 +376,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) tcp_enter_quickack_mode(sk, 2); break; case INET_ECN_CE: @@ -5379,7 +5279,7 @@ index 4811727b8a02..ba8b714fb693 100644 tcp_ca_event(sk, CA_EVENT_ECN_IS_CE); if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { -@@ -381,7 +381,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) +@@ -387,7 +387,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) tp->ecn_flags |= TCP_ECN_SEEN; break; default: @@ -5388,7 +5288,7 @@ index 4811727b8a02..ba8b714fb693 100644 tcp_ca_event(sk, CA_EVENT_ECN_NO_CE); tp->ecn_flags |= TCP_ECN_SEEN; break; -@@ -1120,7 +1120,12 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) +@@ -1126,7 +1126,12 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) */ static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb) { @@ -5401,7 +5301,7 @@ index 4811727b8a02..ba8b714fb693 100644 } void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb) -@@ -1501,6 +1506,17 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, +@@ -1507,6 +1512,17 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev, WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount); tcp_skb_pcount_add(skb, -pcount); @@ -5419,7 +5319,7 @@ index 4811727b8a02..ba8b714fb693 100644 /* When we're adding to gso_segs == 1, gso_size will be zero, * in theory this shouldn't be necessary but as long as DSACK * code can come after this skb later on it's better to keep -@@ -3826,7 +3842,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) +@@ -3832,7 +3848,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) /* This routine deals with acks during a TLP episode and ends an episode by * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack */ @@ -5429,7 +5329,7 @@ index 4811727b8a02..ba8b714fb693 100644 { struct tcp_sock *tp = tcp_sk(sk); -@@ -3843,6 +3860,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) +@@ -3849,6 +3866,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) /* ACK advances: there was a loss, so reduce cwnd. Reset * tlp_high_seq in tcp_init_cwnd_reduction() */ @@ -5437,7 +5337,7 @@ index 4811727b8a02..ba8b714fb693 100644 tcp_init_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_CWR); tcp_end_cwnd_reduction(sk); -@@ -3853,6 +3871,11 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) +@@ -3859,6 +3877,11 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) FLAG_NOT_DUP | FLAG_DATA_SACKED))) { /* Pure dupack: original and TLP probe arrived; no loss */ tp->tlp_high_seq = 0; @@ -5449,7 +5349,7 @@ index 4811727b8a02..ba8b714fb693 100644 } } -@@ -3961,6 +3984,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) +@@ -3967,6 +3990,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una; rs.prior_in_flight = tcp_packets_in_flight(tp); @@ -5457,7 +5357,7 @@ index 4811727b8a02..ba8b714fb693 100644 /* ts_recent update must be made after we are sure that the packet * is in window. -@@ -4035,7 +4059,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) +@@ -4041,7 +4065,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_rack_update_reo_wnd(sk, &rs); if (tp->tlp_high_seq) @@ -5466,7 +5366,7 @@ index 4811727b8a02..ba8b714fb693 100644 if (tcp_ack_is_dubious(sk, flag)) { if (!(flag & (FLAG_SND_UNA_ADVANCED | -@@ -4059,6 +4083,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) +@@ -4065,6 +4089,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) delivered = tcp_newly_delivered(sk, delivered, flag); lost = tp->lost - lost; /* freshly marked lost */ rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED); @@ -5474,7 +5374,7 @@ index 4811727b8a02..ba8b714fb693 100644 tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate); tcp_cong_control(sk, ack, delivered, flag, sack_state.rate); tcp_xmit_recovery(sk, rexmit); -@@ -4078,7 +4103,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) +@@ -4084,7 +4109,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_ack_probe(sk); if (tp->tlp_high_seq) @@ -5483,7 +5383,7 @@ index 4811727b8a02..ba8b714fb693 100644 return 1; old_ack: -@@ -5752,13 +5777,14 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) +@@ -5758,13 +5783,14 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) /* More than one full frame received... */ if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && @@ -5501,7 +5401,7 @@ index 4811727b8a02..ba8b714fb693 100644 tcp_in_quickack_mode(sk) || /* Protocol state mandates a one-time immediate ACK */ diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c -index 7121d8573928..696afe8cfda8 100644 +index 789e495d3bd6..dea9123e5c5d 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -466,6 +466,8 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) @@ -5719,9 +5619,9 @@ index b412ed88ccd9..d70f8b742b21 100644 -- 2.48.0.rc1 -From a0e342962d38af24849200d7089cbd54d6576fe8 Mon Sep 17 00:00:00 2001 +From ae9742e9f4e37e8cdd4bb80d00bc91e1a69e2eaf Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:40:07 +0100 +Date: Fri, 7 Mar 2025 19:28:39 +0100 Subject: [PATCH 04/12] cachy Signed-off-by: Peter Jung @@ -5734,6 +5634,9 @@ Signed-off-by: Peter Jung arch/x86/include/asm/pci.h | 6 + arch/x86/include/asm/vermagic.h | 72 + arch/x86/pci/common.c | 7 +- + block/Kconfig.iosched | 9 + + block/Makefile | 8 + + block/adios.c | 1352 +++++++ block/elevator.c | 8 + drivers/Makefile | 13 +- drivers/ata/ahci.c | 23 +- @@ -5798,7 +5701,8 @@ Signed-off-by: Peter Jung mm/vmpressure.c | 4 + mm/vmscan.c | 143 + net/ipv4/inet_connection_sock.c | 2 +- - 72 files changed, 6714 insertions(+), 93 deletions(-) + 75 files changed, 8083 insertions(+), 93 deletions(-) + create mode 100644 block/adios.c create mode 100644 drivers/media/v4l2-core/v4l2loopback.c create mode 100644 drivers/media/v4l2-core/v4l2loopback.h create mode 100644 drivers/media/v4l2-core/v4l2loopback_formats.h @@ -5935,7 +5839,7 @@ index f48eaa98d22d..fc777c14cff6 100644 unprivileged_userfaultfd ======================== diff --git a/Makefile b/Makefile -index c436a6e64971..c6bd6363ed96 100644 +index f49182f3bae1..5da1041d0860 100644 --- a/Makefile +++ b/Makefile @@ -860,11 +860,19 @@ KBUILD_CFLAGS += -fno-delete-null-pointer-checks @@ -5959,7 +5863,7 @@ index c436a6e64971..c6bd6363ed96 100644 # depends on `opt-level` and `debug-assertions`, respectively. KBUILD_RUSTFLAGS += -Cdebug-assertions=$(if $(CONFIG_RUST_DEBUG_ASSERTIONS),y,n) diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu -index abe013a1b076..b4ee329777ae 100644 +index 25c55cc17c5e..990c4e91e551 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -155,9 +155,8 @@ config MPENTIUM4 @@ -6646,6 +6550,1408 @@ index ddb798603201..7c20387d8202 100644 return dev; } -#endif +diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched +index 27f11320b8d1..dd9af8cd7bca 100644 +--- a/block/Kconfig.iosched ++++ b/block/Kconfig.iosched +@@ -16,6 +16,15 @@ config MQ_IOSCHED_KYBER + synchronous writes, it will self-tune queue depths to achieve that + goal. + ++config MQ_IOSCHED_ADIOS ++ tristate "Adaptive Deadline I/O scheduler" ++ default y ++ help ++ ADIOS is a multi-queue I/O scheduler for the Linux kernel, based on ++ mq-deadline and Kyber, with learning-based adaptive latency control. ++ It aims to provide low latency for synchronous requests while ++ maintaining high throughput for asynchronous requests and bulk I/O. ++ + config IOSCHED_BFQ + tristate "BFQ I/O scheduler" + select BLK_ICQ +diff --git a/block/Makefile b/block/Makefile +index ddfd21c1a9ff..ebe14e94d123 100644 +--- a/block/Makefile ++++ b/block/Makefile +@@ -23,6 +23,7 @@ obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o + obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o + obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o + obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o ++obj-$(CONFIG_MQ_IOSCHED_ADIOS) += adios.o + bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o + obj-$(CONFIG_IOSCHED_BFQ) += bfq.o + +@@ -38,3 +39,10 @@ obj-$(CONFIG_BLK_INLINE_ENCRYPTION) += blk-crypto.o blk-crypto-profile.o \ + blk-crypto-sysfs.o + obj-$(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) += blk-crypto-fallback.o + obj-$(CONFIG_BLOCK_HOLDER_DEPRECATED) += holder.o ++ ++all: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules ++ ++clean: ++ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean ++ +diff --git a/block/adios.c b/block/adios.c +new file mode 100644 +index 000000000000..301beb812ebb +--- /dev/null ++++ b/block/adios.c +@@ -0,0 +1,1352 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * The Adaptive Deadline I/O Scheduler (ADIOS) ++ * Based on mq-deadline and Kyber, ++ * with learning-based adaptive latency control ++ * ++ * Copyright (C) 2025 Masahito Suzuki ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "elevator.h" ++#include "blk.h" ++#include "blk-mq.h" ++#include "blk-mq-sched.h" ++ ++#define ADIOS_VERSION "1.5.3" ++ ++// Define operation types supported by ADIOS ++enum adios_op_type { ++ ADIOS_READ = 0, ++ ADIOS_WRITE = 1, ++ ADIOS_DISCARD = 2, ++ ADIOS_OTHER = 3, ++ ADIOS_OPTYPES = 4, ++}; ++ ++// Global variable to control the latency ++static u64 default_global_latency_window = 16000000ULL; ++// Ratio below which batch queues should be refilled ++static u8 default_bq_refill_below_ratio = 15; ++ ++// Dynamic thresholds for shrinkage ++static u32 default_lm_shrink_at_kreqs = 10000; ++static u32 default_lm_shrink_at_gbytes = 100; ++static u32 default_lm_shrink_resist = 2; ++ ++// Latency targets for each operation type ++static u64 default_latency_target[ADIOS_OPTYPES] = { ++ [ADIOS_READ] = 1ULL * NSEC_PER_MSEC, ++ [ADIOS_WRITE] = 2000ULL * NSEC_PER_MSEC, ++ [ADIOS_DISCARD] = 8000ULL * NSEC_PER_MSEC, ++ [ADIOS_OTHER] = 0ULL * NSEC_PER_MSEC, ++}; ++ ++// Maximum batch size limits for each operation type ++static u32 default_batch_limit[ADIOS_OPTYPES] = { ++ [ADIOS_READ] = 24, ++ [ADIOS_WRITE] = 48, ++ [ADIOS_DISCARD] = 1, ++ [ADIOS_OTHER] = 1, ++}; ++ ++static u32 default_dl_prio[2] = { ++ [0] = 7, ++ [1] = 0, ++}; ++ ++// Thresholds for latency model control ++#define LM_BLOCK_SIZE_THRESHOLD 4096 ++#define LM_SAMPLES_THRESHOLD 1024 ++#define LM_INTERVAL_THRESHOLD 1500 ++#define LM_OUTLIER_PERCENTILE 99 ++#define LM_LAT_BUCKET_COUNT 64 ++ ++// Structure to hold latency bucket data for small requests ++struct latency_bucket_small { ++ u64 sum_latency; ++ u32 count; ++}; ++ ++// Structure to hold latency bucket data for large requests ++struct latency_bucket_large { ++ u64 sum_latency; ++ u64 sum_block_size; ++ u32 count; ++}; ++ ++// Structure to hold the latency model context data ++struct latency_model { ++ spinlock_t lock; ++ u64 base; ++ u64 slope; ++ u64 small_sum_delay; ++ u64 small_count; ++ u64 large_sum_delay; ++ u64 large_sum_bsize; ++ u64 last_update_jiffies; ++ ++ spinlock_t buckets_lock; ++ struct latency_bucket_small small_bucket[LM_LAT_BUCKET_COUNT]; ++ struct latency_bucket_large large_bucket[LM_LAT_BUCKET_COUNT]; ++ ++ u32 lm_shrink_at_kreqs; ++ u32 lm_shrink_at_gbytes; ++ u8 lm_shrink_resist; ++}; ++ ++#define ADIOS_BQ_PAGES 2 ++ ++// Adios scheduler data ++struct adios_data { ++ spinlock_t pq_lock; ++ struct list_head prio_queue; ++ ++ struct rb_root_cached dl_tree[2]; ++ spinlock_t lock; ++ u8 dl_queued; ++ s64 dl_bias; ++ s32 dl_prio[2]; ++ ++ u64 global_latency_window; ++ u64 latency_target[ADIOS_OPTYPES]; ++ u32 batch_limit[ADIOS_OPTYPES]; ++ u32 batch_actual_max_size[ADIOS_OPTYPES]; ++ u32 batch_actual_max_total; ++ u32 async_depth; ++ u8 bq_refill_below_ratio; ++ ++ u8 bq_page; ++ bool more_bq_ready; ++ struct list_head batch_queue[ADIOS_BQ_PAGES][ADIOS_OPTYPES]; ++ u32 batch_count[ADIOS_BQ_PAGES][ADIOS_OPTYPES]; ++ spinlock_t bq_lock; ++ ++ struct latency_model latency_model[ADIOS_OPTYPES]; ++ struct timer_list update_timer; ++ ++ atomic64_t total_pred_lat; ++ ++ struct kmem_cache *rq_data_pool; ++ struct kmem_cache *dl_group_pool; ++}; ++ ++// List of requests with the same deadline in the deadline-sorted tree ++struct dl_group { ++ struct rb_node node; ++ struct list_head rqs; ++ u64 deadline; ++} __attribute__((aligned(64))); ++ ++// Structure to hold scheduler-specific data for each request ++struct adios_rq_data { ++ struct list_head *dl_group; ++ struct list_head dl_node; ++ ++ struct request *rq; ++ u64 deadline; ++ u64 pred_lat; ++ u32 block_size; ++} __attribute__((aligned(64))); ++ ++static const int adios_prio_to_weight[40] = { ++ /* -20 */ 88761, 71755, 56483, 46273, 36291, ++ /* -15 */ 29154, 23254, 18705, 14949, 11916, ++ /* -10 */ 9548, 7620, 6100, 4904, 3906, ++ /* -5 */ 3121, 2501, 1991, 1586, 1277, ++ /* 0 */ 1024, 820, 655, 526, 423, ++ /* 5 */ 335, 272, 215, 172, 137, ++ /* 10 */ 110, 87, 70, 56, 45, ++ /* 15 */ 36, 29, 23, 18, 15, ++}; ++ ++// Count the number of entries in small buckets ++static u32 lm_count_small_entries(struct latency_model *model) { ++ u32 total_count = 0; ++ for (u8 i = 0; i < LM_LAT_BUCKET_COUNT; i++) ++ total_count += model->small_bucket[i].count; ++ return total_count; ++} ++ ++// Update the small buckets in the latency model ++static bool lm_update_small_buckets(struct latency_model *model, ++ u32 total_count, bool count_all) { ++ u64 sum_latency = 0; ++ u32 sum_count = 0; ++ u32 cumulative_count = 0, threshold_count = 0; ++ u8 outlier_threshold_bucket = 0; ++ u8 outlier_percentile = LM_OUTLIER_PERCENTILE; ++ u8 reduction; ++ ++ if (count_all) ++ outlier_percentile = 100; ++ ++ // Calculate the threshold count for outlier detection ++ threshold_count = (total_count * outlier_percentile) / 100; ++ ++ // Identify the bucket that corresponds to the outlier threshold ++ for (u8 i = 0; i < LM_LAT_BUCKET_COUNT; i++) { ++ cumulative_count += model->small_bucket[i].count; ++ if (cumulative_count >= threshold_count) { ++ outlier_threshold_bucket = i; ++ break; ++ } ++ } ++ ++ // Calculate the average latency, excluding outliers ++ for (u8 i = 0; i <= outlier_threshold_bucket; i++) { ++ struct latency_bucket_small *bucket = &model->small_bucket[i]; ++ if (i < outlier_threshold_bucket) { ++ sum_latency += bucket->sum_latency; ++ sum_count += bucket->count; ++ } else { ++ // The threshold bucket's contribution is proportional ++ u64 remaining_count = ++ threshold_count - (cumulative_count - bucket->count); ++ if (bucket->count > 0) { ++ sum_latency += ++ (bucket->sum_latency * remaining_count) / bucket->count; ++ sum_count += remaining_count; ++ } ++ } ++ } ++ ++ // Shrink the model if it reaches at the readjustment threshold ++ if (model->small_count >= 1000ULL * model->lm_shrink_at_kreqs) { ++ reduction = model->lm_shrink_resist; ++ if (model->small_count >> reduction) { ++ model->small_sum_delay -= model->small_sum_delay >> reduction; ++ model->small_count -= model->small_count >> reduction; ++ } ++ } ++ ++ // Accumulate the average latency into the statistics ++ model->small_sum_delay += sum_latency; ++ model->small_count += sum_count; ++ ++ // Reset small bucket information ++ memset(model->small_bucket, 0, ++ sizeof(model->small_bucket[0]) * LM_LAT_BUCKET_COUNT); ++ ++ return true; ++} ++ ++// Count the number of entries in large buckets ++static u32 lm_count_large_entries(struct latency_model *model) { ++ u32 total_count = 0; ++ for (u8 i = 0; i < LM_LAT_BUCKET_COUNT; i++) ++ total_count += model->large_bucket[i].count; ++ return total_count; ++} ++ ++// Update the large buckets in the latency model ++static bool lm_update_large_buckets( ++ struct latency_model *model, ++ u32 total_count, bool count_all) { ++ s64 sum_latency = 0; ++ u64 sum_block_size = 0, intercept; ++ u32 cumulative_count = 0, threshold_count = 0; ++ u8 outlier_threshold_bucket = 0; ++ u8 outlier_percentile = LM_OUTLIER_PERCENTILE; ++ u8 reduction; ++ ++ if (count_all) ++ outlier_percentile = 100; ++ ++ // Calculate the threshold count for outlier detection ++ threshold_count = (total_count * outlier_percentile) / 100; ++ ++ // Identify the bucket that corresponds to the outlier threshold ++ for (u8 i = 0; i < LM_LAT_BUCKET_COUNT; i++) { ++ cumulative_count += model->large_bucket[i].count; ++ if (cumulative_count >= threshold_count) { ++ outlier_threshold_bucket = i; ++ break; ++ } ++ } ++ ++ // Calculate the average latency and block size, excluding outliers ++ for (u8 i = 0; i <= outlier_threshold_bucket; i++) { ++ struct latency_bucket_large *bucket = &model->large_bucket[i]; ++ if (i < outlier_threshold_bucket) { ++ sum_latency += bucket->sum_latency; ++ sum_block_size += bucket->sum_block_size; ++ } else { ++ // The threshold bucket's contribution is proportional ++ u64 remaining_count = ++ threshold_count - (cumulative_count - bucket->count); ++ if (bucket->count > 0) { ++ sum_latency += ++ (bucket->sum_latency * remaining_count) / bucket->count; ++ sum_block_size += ++ (bucket->sum_block_size * remaining_count) / bucket->count; ++ } ++ } ++ } ++ ++ // Shrink the model if it reaches at the readjustment threshold ++ if (model->large_sum_bsize >= 0x40000000ULL * model->lm_shrink_at_gbytes) { ++ reduction = model->lm_shrink_resist; ++ if (model->large_sum_bsize >> reduction) { ++ model->large_sum_delay -= model->large_sum_delay >> reduction; ++ model->large_sum_bsize -= model->large_sum_bsize >> reduction; ++ } ++ } ++ ++ // Accumulate the average delay into the statistics ++ intercept = model->base * threshold_count; ++ if (sum_latency > intercept) ++ sum_latency -= intercept; ++ ++ model->large_sum_delay += sum_latency; ++ model->large_sum_bsize += sum_block_size; ++ ++ // Reset large bucket information ++ memset(model->large_bucket, 0, ++ sizeof(model->large_bucket[0]) * LM_LAT_BUCKET_COUNT); ++ ++ return true; ++} ++ ++// Update the latency model parameters and statistics ++static void latency_model_update(struct latency_model *model) { ++ unsigned long flags; ++ u64 now; ++ u32 small_count, large_count; ++ bool time_elapsed; ++ bool small_processed = false, large_processed = false; ++ ++ guard(spinlock_irqsave)(&model->lock); ++ ++ spin_lock_irqsave(&model->buckets_lock, flags); ++ ++ // Whether enough time has elapsed since the last update ++ now = jiffies; ++ time_elapsed = unlikely(!model->base) || model->last_update_jiffies + ++ msecs_to_jiffies(LM_INTERVAL_THRESHOLD) <= now; ++ ++ // Count the number of entries in buckets ++ small_count = lm_count_small_entries(model); ++ large_count = lm_count_large_entries(model); ++ ++ // Update small buckets ++ if (small_count && (time_elapsed || ++ LM_SAMPLES_THRESHOLD <= small_count || !model->base)) ++ small_processed = lm_update_small_buckets( ++ model, small_count, !model->base); ++ // Update large buckets ++ if (large_count && (time_elapsed || ++ LM_SAMPLES_THRESHOLD <= large_count || !model->slope)) ++ large_processed = lm_update_large_buckets( ++ model, large_count, !model->slope); ++ ++ spin_unlock_irqrestore(&model->buckets_lock, flags); ++ ++ // Update the base parameter if small bucket was processed ++ if (small_processed && likely(model->small_count)) ++ model->base = div_u64(model->small_sum_delay, model->small_count); ++ ++ // Update the slope parameter if large bucket was processed ++ if (large_processed && likely(model->large_sum_bsize)) ++ model->slope = div_u64(model->large_sum_delay, ++ DIV_ROUND_UP_ULL(model->large_sum_bsize, 1024)); ++ ++ // Reset statistics and update last updated jiffies if time has elapsed ++ if (time_elapsed) ++ model->last_update_jiffies = now; ++} ++ ++// Determine the bucket index for a given measured and predicted latency ++static u8 lm_input_bucket_index( ++ struct latency_model *model, u64 measured, u64 predicted) { ++ u8 bucket_index; ++ ++ if (measured < predicted * 2) ++ bucket_index = (measured * 20) / predicted; ++ else if (measured < predicted * 5) ++ bucket_index = (measured * 10) / predicted + 20; ++ else ++ bucket_index = (measured * 3) / predicted + 40; ++ ++ return bucket_index; ++} ++ ++// Input latency data into the latency model ++static void latency_model_input(struct latency_model *model, ++ u32 block_size, u64 latency, u64 pred_lat) { ++ unsigned long flags; ++ u8 bucket_index; ++ ++ spin_lock_irqsave(&model->buckets_lock, flags); ++ ++ if (block_size <= LM_BLOCK_SIZE_THRESHOLD) { ++ // Handle small requests ++ ++ bucket_index = ++ lm_input_bucket_index(model, latency, (model->base ?: 1)); ++ ++ if (bucket_index >= LM_LAT_BUCKET_COUNT) ++ bucket_index = LM_LAT_BUCKET_COUNT - 1; ++ ++ model->small_bucket[bucket_index].count++; ++ model->small_bucket[bucket_index].sum_latency += latency; ++ ++ if (unlikely(!model->base)) { ++ spin_unlock_irqrestore(&model->buckets_lock, flags); ++ latency_model_update(model); ++ return; ++ } ++ } else { ++ // Handle large requests ++ if (!model->base || !pred_lat) { ++ spin_unlock_irqrestore(&model->buckets_lock, flags); ++ return; ++ } ++ ++ bucket_index = ++ lm_input_bucket_index(model, latency, pred_lat); ++ ++ if (bucket_index >= LM_LAT_BUCKET_COUNT) ++ bucket_index = LM_LAT_BUCKET_COUNT - 1; ++ ++ model->large_bucket[bucket_index].count++; ++ model->large_bucket[bucket_index].sum_latency += latency; ++ model->large_bucket[bucket_index].sum_block_size += block_size; ++ } ++ ++ spin_unlock_irqrestore(&model->buckets_lock, flags); ++} ++ ++// Predict the latency for a given block size using the latency model ++static u64 latency_model_predict(struct latency_model *model, u32 block_size) { ++ u64 result; ++ ++ guard(spinlock_irqsave)(&model->lock); ++ // Predict latency based on the model ++ result = model->base; ++ if (block_size > LM_BLOCK_SIZE_THRESHOLD) ++ result += model->slope * ++ DIV_ROUND_UP_ULL(block_size - LM_BLOCK_SIZE_THRESHOLD, 1024); ++ ++ return result; ++} ++ ++// Determine the type of operation based on request flags ++static u8 adios_optype(struct request *rq) { ++ blk_opf_t opf = rq->cmd_flags; ++ switch (opf & REQ_OP_MASK) { ++ case REQ_OP_READ: ++ return ADIOS_READ; ++ case REQ_OP_WRITE: ++ return ADIOS_WRITE; ++ case REQ_OP_DISCARD: ++ return ADIOS_DISCARD; ++ default: ++ return ADIOS_OTHER; ++ } ++} ++ ++static inline u8 adios_optype_not_read(struct request *rq) { ++ return (rq->cmd_flags & REQ_OP_MASK) != REQ_OP_READ; ++} ++ ++// Helper function to retrieve adios_rq_data from a request ++static inline struct adios_rq_data *get_rq_data(struct request *rq) { ++ return (struct adios_rq_data *)rq->elv.priv[0]; ++} ++ ++// Add a request to the deadline-sorted red-black tree ++static void add_to_dl_tree( ++ struct adios_data *ad, bool dl_idx, struct request *rq) { ++ struct rb_root_cached *root = &ad->dl_tree[dl_idx]; ++ struct rb_node **link = &(root->rb_root.rb_node), *parent = NULL; ++ bool leftmost = true; ++ struct adios_rq_data *rd = get_rq_data(rq); ++ struct dl_group *dlg; ++ ++ rd->block_size = blk_rq_bytes(rq); ++ u8 optype = adios_optype(rq); ++ rd->pred_lat = ++ latency_model_predict(&ad->latency_model[optype], rd->block_size); ++ rd->deadline = ++ rq->start_time_ns + ad->latency_target[optype] + rd->pred_lat; ++ ++ while (*link) { ++ dlg = rb_entry(*link, struct dl_group, node); ++ s64 diff = rd->deadline - dlg->deadline; ++ ++ parent = *link; ++ if (diff < 0) { ++ link = &((*link)->rb_left); ++ } else if (diff > 0) { ++ link = &((*link)->rb_right); ++ leftmost = false; ++ } else { // diff == 0 ++ goto found; ++ } ++ } ++ ++ dlg = rb_entry_safe(parent, struct dl_group, node); ++ if (!dlg || dlg->deadline != rd->deadline) { ++ dlg = kmem_cache_zalloc(ad->dl_group_pool, GFP_ATOMIC); ++ if (!dlg) ++ return; ++ dlg->deadline = rd->deadline; ++ INIT_LIST_HEAD(&dlg->rqs); ++ rb_link_node(&dlg->node, parent, link); ++ rb_insert_color_cached(&dlg->node, root, leftmost); ++ } ++found: ++ list_add_tail(&rd->dl_node, &dlg->rqs); ++ rd->dl_group = &dlg->rqs; ++ ad->dl_queued |= 1 << dl_idx; ++} ++ ++// Remove a request from the deadline-sorted red-black tree ++static void del_from_dl_tree( ++ struct adios_data *ad, bool dl_idx, struct request *rq) { ++ struct rb_root_cached *root = &ad->dl_tree[dl_idx]; ++ struct adios_rq_data *rd = get_rq_data(rq); ++ struct dl_group *dlg = container_of(rd->dl_group, struct dl_group, rqs); ++ ++ list_del_init(&rd->dl_node); ++ if (list_empty(&dlg->rqs)) { ++ rb_erase_cached(&dlg->node, root); ++ kmem_cache_free(ad->dl_group_pool, dlg); ++ } ++ rd->dl_group = NULL; ++ ++ if (RB_EMPTY_ROOT(&ad->dl_tree[dl_idx].rb_root)) ++ ad->dl_queued &= ~(1 << dl_idx); ++} ++ ++// Remove a request from the scheduler ++static void remove_request(struct adios_data *ad, struct request *rq) { ++ bool dl_idx = adios_optype_not_read(rq); ++ struct request_queue *q = rq->q; ++ struct adios_rq_data *rd = get_rq_data(rq); ++ ++ list_del_init(&rq->queuelist); ++ ++ // We might not be on the rbtree, if we are doing an insert merge ++ if (rd->dl_group) ++ del_from_dl_tree(ad, dl_idx, rq); ++ ++ elv_rqhash_del(q, rq); ++ if (q->last_merge == rq) ++ q->last_merge = NULL; ++} ++ ++// Convert a queue depth to the corresponding word depth for shallow allocation ++static int to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth) { ++ struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags; ++ const unsigned int nrr = hctx->queue->nr_requests; ++ ++ return ((qdepth << bt->sb.shift) + nrr - 1) / nrr; ++} ++ ++// Limit the depth of request allocation for asynchronous and write requests ++static void adios_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) { ++ struct adios_data *ad = data->q->elevator->elevator_data; ++ ++ // Do not throttle synchronous reads ++ if (op_is_sync(opf) && !op_is_write(opf)) ++ return; ++ ++ data->shallow_depth = to_word_depth(data->hctx, ad->async_depth); ++} ++ ++// Update async_depth when the number of requests in the queue changes ++static void adios_depth_updated(struct blk_mq_hw_ctx *hctx) { ++ struct request_queue *q = hctx->queue; ++ struct adios_data *ad = q->elevator->elevator_data; ++ struct blk_mq_tags *tags = hctx->sched_tags; ++ ++ ad->async_depth = q->nr_requests; ++ ++ sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1); ++} ++ ++// Handle request merging after a merge operation ++static void adios_request_merged(struct request_queue *q, struct request *req, ++ enum elv_merge type) { ++ bool dl_idx = adios_optype_not_read(req); ++ struct adios_data *ad = q->elevator->elevator_data; ++ ++ // if the merge was a front merge, we need to reposition request ++ if (type == ELEVATOR_FRONT_MERGE) { ++ del_from_dl_tree(ad, dl_idx, req); ++ add_to_dl_tree(ad, dl_idx, req); ++ } ++} ++ ++// Handle merging of requests after one has been merged into another ++static void adios_merged_requests(struct request_queue *q, struct request *req, ++ struct request *next) { ++ struct adios_data *ad = q->elevator->elevator_data; ++ ++ lockdep_assert_held(&ad->lock); ++ ++ // kill knowledge of next, this one is a goner ++ remove_request(ad, next); ++} ++ ++// Try to merge a bio into an existing rq before associating it with an rq ++static bool adios_bio_merge(struct request_queue *q, struct bio *bio, ++ unsigned int nr_segs) { ++ unsigned long flags; ++ struct adios_data *ad = q->elevator->elevator_data; ++ struct request *free = NULL; ++ bool ret; ++ ++ spin_lock_irqsave(&ad->lock, flags); ++ ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); ++ spin_unlock_irqrestore(&ad->lock, flags); ++ ++ if (free) ++ blk_mq_free_request(free); ++ ++ return ret; ++} ++ ++// Insert a request into the scheduler ++static void insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ++ blk_insert_t insert_flags, struct list_head *free) { ++ unsigned long flags; ++ bool dl_idx = adios_optype_not_read(rq); ++ struct request_queue *q = hctx->queue; ++ struct adios_data *ad = q->elevator->elevator_data; ++ ++ lockdep_assert_held(&ad->lock); ++ ++ if (insert_flags & BLK_MQ_INSERT_AT_HEAD) { ++ spin_lock_irqsave(&ad->pq_lock, flags); ++ list_add(&rq->queuelist, &ad->prio_queue); ++ spin_unlock_irqrestore(&ad->pq_lock, flags); ++ return; ++ } ++ ++ if (blk_mq_sched_try_insert_merge(q, rq, free)) ++ return; ++ ++ add_to_dl_tree(ad, dl_idx, rq); ++ ++ if (rq_mergeable(rq)) { ++ elv_rqhash_add(q, rq); ++ if (!q->last_merge) ++ q->last_merge = rq; ++ } ++} ++ ++// Insert multiple requests into the scheduler ++static void adios_insert_requests(struct blk_mq_hw_ctx *hctx, ++ struct list_head *list, ++ blk_insert_t insert_flags) { ++ unsigned long flags; ++ struct request_queue *q = hctx->queue; ++ struct adios_data *ad = q->elevator->elevator_data; ++ LIST_HEAD(free); ++ ++ spin_lock_irqsave(&ad->lock, flags); ++ while (!list_empty(list)) { ++ struct request *rq; ++ ++ rq = list_first_entry(list, struct request, queuelist); ++ list_del_init(&rq->queuelist); ++ insert_request(hctx, rq, insert_flags, &free); ++ } ++ spin_unlock_irqrestore(&ad->lock, flags); ++ ++ blk_mq_free_requests(&free); ++} ++ ++// Prepare a request before it is inserted into the scheduler ++static void adios_prepare_request(struct request *rq) { ++ struct adios_data *ad = rq->q->elevator->elevator_data; ++ struct adios_rq_data *rd; ++ ++ rq->elv.priv[0] = NULL; ++ ++ /* Allocate adios_rq_data from the memory pool */ ++ rd = kmem_cache_zalloc(ad->rq_data_pool, GFP_ATOMIC); ++ if (WARN(!rd, "adios_prepare_request: " ++ "Failed to allocate memory from rq_data_pool. rd is NULL\n")) ++ return; ++ ++ rd->rq = rq; ++ rq->elv.priv[0] = rd; ++} ++ ++static struct adios_rq_data *get_dl_first_rd(struct adios_data *ad, bool idx) { ++ struct rb_root_cached *root = &ad->dl_tree[idx]; ++ struct rb_node *first = rb_first_cached(root); ++ struct dl_group *dl_group = rb_entry(first, struct dl_group, node); ++ struct adios_rq_data *rd = ++ list_first_entry(&dl_group->rqs, struct adios_rq_data, dl_node); ++ ++ return rd; ++} ++ ++// Select the next request to dispatch from the deadline-sorted red-black tree ++static struct request *next_request(struct adios_data *ad) { ++ struct adios_rq_data *rd; ++ bool dl_idx, bias_idx, reduce_bias; ++ ++ if (!ad->dl_queued) ++ return NULL; ++ ++ dl_idx = ad->dl_queued >> 1; ++ rd = get_dl_first_rd(ad, dl_idx); ++ ++ bias_idx = ad->dl_bias < 0; ++ reduce_bias = (bias_idx == dl_idx); ++ ++ if (ad->dl_queued == 0x3) { ++ struct adios_rq_data *trd[2]; ++ trd[0] = get_dl_first_rd(ad, 0); ++ trd[1] = rd; ++ ++ rd = trd[bias_idx]; ++ ++ reduce_bias = ++ (trd[bias_idx]->deadline > trd[((u8)bias_idx + 1) % 2]->deadline); ++ } ++ ++ if (reduce_bias) { ++ s64 sign = ((int)bias_idx << 1) - 1; ++ if (unlikely(!rd->pred_lat)) ++ ad->dl_bias = sign; ++ else { ++ ad->dl_bias += sign * (s64)((rd->pred_lat * ++ adios_prio_to_weight[ad->dl_prio[bias_idx] + 20]) >> 10); ++ } ++ } ++ ++ return rd->rq; ++} ++ ++// Reset the batch queue counts for a given page ++static void reset_batch_counts(struct adios_data *ad, u8 page) { ++ memset(&ad->batch_count[page], 0, sizeof(ad->batch_count[page])); ++} ++ ++// Initialize all batch queues ++static void init_batch_queues(struct adios_data *ad) { ++ for (u8 page = 0; page < ADIOS_BQ_PAGES; page++) { ++ reset_batch_counts(ad, page); ++ ++ for (u8 optype = 0; optype < ADIOS_OPTYPES; optype++) ++ INIT_LIST_HEAD(&ad->batch_queue[page][optype]); ++ } ++} ++ ++// Fill the batch queues with requests from the deadline-sorted red-black tree ++static bool fill_batch_queues(struct adios_data *ad, u64 current_lat) { ++ unsigned long flags; ++ u32 count = 0; ++ u32 optype_count[ADIOS_OPTYPES]; ++ memset(optype_count, 0, sizeof(optype_count)); ++ u8 page = (ad->bq_page + 1) % ADIOS_BQ_PAGES; ++ ++ reset_batch_counts(ad, page); ++ ++ spin_lock_irqsave(&ad->lock, flags); ++ while (true) { ++ struct request *rq = next_request(ad); ++ if (!rq) ++ break; ++ ++ struct adios_rq_data *rd = get_rq_data(rq); ++ u8 optype = adios_optype(rq); ++ current_lat += rd->pred_lat; ++ ++ // Check batch size and total predicted latency ++ if (count && (!ad->latency_model[optype].base || ++ ad->batch_count[page][optype] >= ad->batch_limit[optype] || ++ current_lat > ad->global_latency_window)) { ++ break; ++ } ++ ++ remove_request(ad, rq); ++ ++ // Add request to the corresponding batch queue ++ list_add_tail(&rq->queuelist, &ad->batch_queue[page][optype]); ++ ad->batch_count[page][optype]++; ++ atomic64_add(rd->pred_lat, &ad->total_pred_lat); ++ optype_count[optype]++; ++ count++; ++ } ++ spin_unlock_irqrestore(&ad->lock, flags); ++ ++ if (count) { ++ ad->more_bq_ready = true; ++ for (u8 optype = 0; optype < ADIOS_OPTYPES; optype++) { ++ if (ad->batch_actual_max_size[optype] < optype_count[optype]) ++ ad->batch_actual_max_size[optype] = optype_count[optype]; ++ } ++ if (ad->batch_actual_max_total < count) ++ ad->batch_actual_max_total = count; ++ } ++ return count; ++} ++ ++// Flip to the next batch queue page ++static void flip_bq_page(struct adios_data *ad) { ++ ad->more_bq_ready = false; ++ ad->bq_page = (ad->bq_page + 1) % ADIOS_BQ_PAGES; ++} ++ ++// Dispatch a request from the batch queues ++static struct request *dispatch_from_bq(struct adios_data *ad) { ++ struct request *rq = NULL; ++ u64 tpl; ++ ++ guard(spinlock_irqsave)(&ad->bq_lock); ++ ++ tpl = atomic64_read(&ad->total_pred_lat); ++ ++ if (!ad->more_bq_ready && (!tpl || ++ tpl < ad->global_latency_window * ad->bq_refill_below_ratio / 100)) ++ fill_batch_queues(ad, tpl); ++ ++again: ++ // Check if there are any requests in the batch queues ++ for (u8 i = 0; i < ADIOS_OPTYPES; i++) { ++ if (!list_empty(&ad->batch_queue[ad->bq_page][i])) { ++ rq = list_first_entry(&ad->batch_queue[ad->bq_page][i], ++ struct request, queuelist); ++ list_del_init(&rq->queuelist); ++ return rq; ++ } ++ } ++ ++ // If there's more batch queue page available, flip to it and retry ++ if (ad->more_bq_ready) { ++ flip_bq_page(ad); ++ goto again; ++ } ++ ++ return NULL; ++} ++ ++// Dispatch a request from the priority queue ++static struct request *dispatch_from_pq(struct adios_data *ad) { ++ struct request *rq = NULL; ++ ++ guard(spinlock_irqsave)(&ad->pq_lock); ++ ++ if (!list_empty(&ad->prio_queue)) { ++ rq = list_first_entry(&ad->prio_queue, struct request, queuelist); ++ list_del_init(&rq->queuelist); ++ } ++ return rq; ++} ++ ++// Dispatch a request to the hardware queue ++static struct request *adios_dispatch_request(struct blk_mq_hw_ctx *hctx) { ++ struct adios_data *ad = hctx->queue->elevator->elevator_data; ++ struct request *rq; ++ ++ rq = dispatch_from_pq(ad); ++ if (rq) goto found; ++ rq = dispatch_from_bq(ad); ++ if (!rq) return NULL; ++found: ++ rq->rq_flags |= RQF_STARTED; ++ return rq; ++} ++ ++// Timer callback function to periodically update latency models ++static void update_timer_callback(struct timer_list *t) { ++ struct adios_data *ad = from_timer(ad, t, update_timer); ++ ++ for (u8 optype = 0; optype < ADIOS_OPTYPES; optype++) ++ latency_model_update(&ad->latency_model[optype]); ++} ++ ++// Handle the completion of a request ++static void adios_completed_request(struct request *rq, u64 now) { ++ struct adios_data *ad = rq->q->elevator->elevator_data; ++ struct adios_rq_data *rd = get_rq_data(rq); ++ ++ atomic64_sub(rd->pred_lat, &ad->total_pred_lat); ++ ++ if (!rq->io_start_time_ns || !rd->block_size) ++ return; ++ u64 latency = now - rq->io_start_time_ns; ++ u8 optype = adios_optype(rq); ++ latency_model_input(&ad->latency_model[optype], ++ rd->block_size, latency, rd->pred_lat); ++ timer_reduce(&ad->update_timer, jiffies + msecs_to_jiffies(100)); ++} ++ ++// Clean up after a request is finished ++static void adios_finish_request(struct request *rq) { ++ struct adios_data *ad = rq->q->elevator->elevator_data; ++ ++ if (rq->elv.priv[0]) { ++ // Free adios_rq_data back to the memory pool ++ kmem_cache_free(ad->rq_data_pool, get_rq_data(rq)); ++ rq->elv.priv[0] = NULL; ++ } ++} ++ ++static inline bool pq_has_work(struct adios_data *ad) { ++ guard(spinlock_irqsave)(&ad->pq_lock); ++ return !list_empty(&ad->prio_queue); ++} ++ ++static inline bool bq_has_work(struct adios_data *ad) { ++ guard(spinlock_irqsave)(&ad->bq_lock); ++ ++ for (u8 i = 0; i < ADIOS_OPTYPES; i++) ++ if (!list_empty(&ad->batch_queue[ad->bq_page][i])) ++ return true; ++ ++ return ad->more_bq_ready; ++} ++ ++static inline bool dl_tree_has_work(struct adios_data *ad) { ++ guard(spinlock_irqsave)(&ad->lock); ++ return ad->dl_queued; ++} ++ ++// Check if there are any requests available for dispatch ++static bool adios_has_work(struct blk_mq_hw_ctx *hctx) { ++ struct adios_data *ad = hctx->queue->elevator->elevator_data; ++ ++ return pq_has_work(ad) || bq_has_work(ad) || dl_tree_has_work(ad); ++} ++ ++// Initialize the scheduler-specific data for a hardware queue ++static int adios_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) { ++ adios_depth_updated(hctx); ++ return 0; ++} ++ ++// Initialize the scheduler-specific data when initializing the request queue ++static int adios_init_sched(struct request_queue *q, struct elevator_type *e) { ++ struct adios_data *ad; ++ struct elevator_queue *eq; ++ int ret = -ENOMEM; ++ ++ eq = elevator_alloc(q, e); ++ if (!eq) ++ return ret; ++ ++ ad = kzalloc_node(sizeof(*ad), GFP_KERNEL, q->node); ++ if (!ad) ++ goto put_eq; ++ ++ // Create a memory pool for adios_rq_data ++ ad->rq_data_pool = kmem_cache_create("rq_data_pool", ++ sizeof(struct adios_rq_data), ++ 0, SLAB_HWCACHE_ALIGN, NULL); ++ if (!ad->rq_data_pool) { ++ pr_err("adios: Failed to create rq_data_pool\n"); ++ goto free_ad; ++ } ++ ++ /* Create a memory pool for dl_group */ ++ ad->dl_group_pool = kmem_cache_create("dl_group_pool", ++ sizeof(struct dl_group), ++ 0, SLAB_HWCACHE_ALIGN, NULL); ++ if (!ad->dl_group_pool) { ++ pr_err("adios: Failed to create dl_group_pool\n"); ++ goto destroy_rq_data_pool; ++ } ++ ++ eq->elevator_data = ad; ++ ++ ad->global_latency_window = default_global_latency_window; ++ ad->bq_refill_below_ratio = default_bq_refill_below_ratio; ++ ++ INIT_LIST_HEAD(&ad->prio_queue); ++ for (u8 i = 0; i < 2; i++) ++ ad->dl_tree[i] = RB_ROOT_CACHED; ++ ad->dl_bias = 0; ++ ad->dl_queued = 0x0; ++ for (u8 i = 0; i < 2; i++) ++ ad->dl_prio[i] = default_dl_prio[i]; ++ ++ for (u8 i = 0; i < ADIOS_OPTYPES; i++) { ++ struct latency_model *model = &ad->latency_model[i]; ++ spin_lock_init(&model->lock); ++ spin_lock_init(&model->buckets_lock); ++ memset(model->small_bucket, 0, ++ sizeof(model->small_bucket[0]) * LM_LAT_BUCKET_COUNT); ++ memset(model->large_bucket, 0, ++ sizeof(model->large_bucket[0]) * LM_LAT_BUCKET_COUNT); ++ model->last_update_jiffies = jiffies; ++ model->lm_shrink_at_kreqs = default_lm_shrink_at_kreqs; ++ model->lm_shrink_at_gbytes = default_lm_shrink_at_gbytes; ++ model->lm_shrink_resist = default_lm_shrink_resist; ++ ++ ad->latency_target[i] = default_latency_target[i]; ++ ad->batch_limit[i] = default_batch_limit[i]; ++ } ++ timer_setup(&ad->update_timer, update_timer_callback, 0); ++ init_batch_queues(ad); ++ ++ spin_lock_init(&ad->lock); ++ spin_lock_init(&ad->pq_lock); ++ spin_lock_init(&ad->bq_lock); ++ ++ /* We dispatch from request queue wide instead of hw queue */ ++ blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q); ++ ++ q->elevator = eq; ++ return 0; ++ ++destroy_rq_data_pool: ++ kmem_cache_destroy(ad->rq_data_pool); ++free_ad: ++ kfree(ad); ++put_eq: ++ kobject_put(&eq->kobj); ++ return ret; ++} ++ ++// Clean up and free resources when exiting the scheduler ++static void adios_exit_sched(struct elevator_queue *e) { ++ struct adios_data *ad = e->elevator_data; ++ ++ timer_shutdown_sync(&ad->update_timer); ++ ++ WARN_ON_ONCE(!list_empty(&ad->prio_queue)); ++ ++ if (ad->rq_data_pool) ++ kmem_cache_destroy(ad->rq_data_pool); ++ ++ if (ad->dl_group_pool) ++ kmem_cache_destroy(ad->dl_group_pool); ++ ++ kfree(ad); ++} ++ ++// Define sysfs attributes for read operation latency model ++#define SYSFS_OPTYPE_DECL(name, optype) \ ++static ssize_t adios_lat_model_##name##_show( \ ++ struct elevator_queue *e, char *page) { \ ++ struct adios_data *ad = e->elevator_data; \ ++ struct latency_model *model = &ad->latency_model[optype]; \ ++ ssize_t len = 0; \ ++ guard(spinlock_irqsave)(&model->lock); \ ++ len += sprintf(page, "base : %llu ns\n", model->base); \ ++ len += sprintf(page + len, "slope: %llu ns/KiB\n", model->slope);\ ++ return len; \ ++} \ ++static ssize_t adios_lat_target_##name##_store( \ ++ struct elevator_queue *e, const char *page, size_t count) { \ ++ struct adios_data *ad = e->elevator_data; \ ++ unsigned long nsec; \ ++ int ret; \ ++ ret = kstrtoul(page, 10, &nsec); \ ++ if (ret) \ ++ return ret; \ ++ ad->latency_model[optype].base = 0ULL; \ ++ ad->latency_target[optype] = nsec; \ ++ return count; \ ++} \ ++static ssize_t adios_lat_target_##name##_show( \ ++ struct elevator_queue *e, char *page) { \ ++ struct adios_data *ad = e->elevator_data; \ ++ return sprintf(page, "%llu\n", ad->latency_target[optype]); \ ++} \ ++static ssize_t adios_batch_limit_##name##_store( \ ++ struct elevator_queue *e, const char *page, size_t count) { \ ++ unsigned long max_batch; \ ++ int ret; \ ++ ret = kstrtoul(page, 10, &max_batch); \ ++ if (ret || max_batch == 0) \ ++ return -EINVAL; \ ++ struct adios_data *ad = e->elevator_data; \ ++ ad->batch_limit[optype] = max_batch; \ ++ return count; \ ++} \ ++static ssize_t adios_batch_limit_##name##_show( \ ++ struct elevator_queue *e, char *page) { \ ++ struct adios_data *ad = e->elevator_data; \ ++ return sprintf(page, "%u\n", ad->batch_limit[optype]); \ ++} ++ ++SYSFS_OPTYPE_DECL(read, ADIOS_READ); ++SYSFS_OPTYPE_DECL(write, ADIOS_WRITE); ++SYSFS_OPTYPE_DECL(discard, ADIOS_DISCARD); ++ ++// Show the maximum batch size actually achieved for each operation type ++static ssize_t adios_batch_actual_max_show( ++ struct elevator_queue *e, char *page) { ++ struct adios_data *ad = e->elevator_data; ++ u32 total_count, read_count, write_count, discard_count; ++ ++ total_count = ad->batch_actual_max_total; ++ read_count = ad->batch_actual_max_size[ADIOS_READ]; ++ write_count = ad->batch_actual_max_size[ADIOS_WRITE]; ++ discard_count = ad->batch_actual_max_size[ADIOS_DISCARD]; ++ ++ return sprintf(page, ++ "Total : %u\nDiscard: %u\nRead : %u\nWrite : %u\n", ++ total_count, discard_count, read_count, write_count); ++} ++ ++// Set the global latency window ++static ssize_t adios_global_latency_window_store( ++ struct elevator_queue *e, const char *page, size_t count) { ++ struct adios_data *ad = e->elevator_data; ++ unsigned long nsec; ++ int ret; ++ ++ ret = kstrtoul(page, 10, &nsec); ++ if (ret) ++ return ret; ++ ++ ad->global_latency_window = nsec; ++ ++ return count; ++} ++ ++// Show the global latency window ++static ssize_t adios_global_latency_window_show( ++ struct elevator_queue *e, char *page) { ++ struct adios_data *ad = e->elevator_data; ++ return sprintf(page, "%llu\n", ad->global_latency_window); ++} ++ ++// Show the bq_refill_below_ratio ++static ssize_t adios_bq_refill_below_ratio_show( ++ struct elevator_queue *e, char *page) { ++ struct adios_data *ad = e->elevator_data; ++ return sprintf(page, "%d\n", ad->bq_refill_below_ratio); ++} ++ ++// Set the bq_refill_below_ratio ++static ssize_t adios_bq_refill_below_ratio_store( ++ struct elevator_queue *e, const char *page, size_t count) { ++ struct adios_data *ad = e->elevator_data; ++ int ratio; ++ int ret; ++ ++ ret = kstrtoint(page, 10, &ratio); ++ if (ret || ratio < 0 || ratio > 100) ++ return -EINVAL; ++ ++ ad->bq_refill_below_ratio = ratio; ++ ++ return count; ++} ++ ++// Show the read priority ++static ssize_t adios_read_priority_show( ++ struct elevator_queue *e, char *page) { ++ struct adios_data *ad = e->elevator_data; ++ return sprintf(page, "%d\n", ad->dl_prio[0]); ++} ++ ++// Set the read priority ++static ssize_t adios_read_priority_store( ++ struct elevator_queue *e, const char *page, size_t count) { ++ struct adios_data *ad = e->elevator_data; ++ int prio; ++ int ret; ++ ++ ret = kstrtoint(page, 10, &prio); ++ if (ret || prio < -20 || prio > 19) ++ return -EINVAL; ++ ++ guard(spinlock_irqsave)(&ad->lock); ++ ad->dl_prio[0] = prio; ++ ad->dl_bias = 0; ++ ++ return count; ++} ++ ++// Reset batch queue statistics ++static ssize_t adios_reset_bq_stats_store( ++ struct elevator_queue *e, const char *page, size_t count) { ++ struct adios_data *ad = e->elevator_data; ++ unsigned long val; ++ int ret; ++ ++ ret = kstrtoul(page, 10, &val); ++ if (ret || val != 1) ++ return -EINVAL; ++ ++ for (u8 i = 0; i < ADIOS_OPTYPES; i++) ++ ad->batch_actual_max_size[i] = 0; ++ ++ ad->batch_actual_max_total = 0; ++ ++ return count; ++} ++ ++// Reset the latency model parameters ++static ssize_t adios_reset_lat_model_store( ++ struct elevator_queue *e, const char *page, size_t count) { ++ struct adios_data *ad = e->elevator_data; ++ unsigned long val; ++ int ret; ++ ++ ret = kstrtoul(page, 10, &val); ++ if (ret || val != 1) ++ return -EINVAL; ++ ++ for (u8 i = 0; i < ADIOS_OPTYPES; i++) { ++ struct latency_model *model = &ad->latency_model[i]; ++ unsigned long flags; ++ spin_lock_irqsave(&model->lock, flags); ++ model->base = 0ULL; ++ model->slope = 0ULL; ++ model->small_sum_delay = 0ULL; ++ model->small_count = 0ULL; ++ model->large_sum_delay = 0ULL; ++ model->large_sum_bsize = 0ULL; ++ spin_unlock_irqrestore(&model->lock, flags); ++ } ++ ++ return count; ++} ++ ++// Show the ADIOS version ++static ssize_t adios_version_show(struct elevator_queue *e, char *page) { ++ return sprintf(page, "%s\n", ADIOS_VERSION); ++} ++ ++// Define sysfs attributes for dynamic thresholds ++#define SHRINK_THRESHOLD_ATTR_RW(name, model_field, min_value, max_value) \ ++static ssize_t adios_shrink_##name##_store( \ ++ struct elevator_queue *e, const char *page, size_t count) { \ ++ struct adios_data *ad = e->elevator_data; \ ++ unsigned long val; \ ++ int ret; \ ++ ret = kstrtoul(page, 10, &val); \ ++ if (ret || val < min_value || val > max_value) \ ++ return -EINVAL; \ ++ for (u8 i = 0; i < ADIOS_OPTYPES; i++) { \ ++ struct latency_model *model = &ad->latency_model[i]; \ ++ unsigned long flags; \ ++ spin_lock_irqsave(&model->lock, flags); \ ++ model->model_field = val; \ ++ spin_unlock_irqrestore(&model->lock, flags); \ ++ } \ ++ return count; \ ++} \ ++static ssize_t adios_shrink_##name##_show( \ ++ struct elevator_queue *e, char *page) { \ ++ struct adios_data *ad = e->elevator_data; \ ++ u32 val = 0; \ ++ for (u8 i = 0; i < ADIOS_OPTYPES; i++) { \ ++ struct latency_model *model = &ad->latency_model[i]; \ ++ unsigned long flags; \ ++ spin_lock_irqsave(&model->lock, flags); \ ++ val = model->model_field; \ ++ spin_unlock_irqrestore(&model->lock, flags); \ ++ } \ ++ return sprintf(page, "%u\n", val); \ ++} ++ ++SHRINK_THRESHOLD_ATTR_RW(at_kreqs, lm_shrink_at_kreqs, 1, 100000) ++SHRINK_THRESHOLD_ATTR_RW(at_gbytes, lm_shrink_at_gbytes, 1, 1000) ++SHRINK_THRESHOLD_ATTR_RW(resist, lm_shrink_resist, 1, 3) ++ ++// Define sysfs attributes ++#define AD_ATTR(name, show_func, store_func) \ ++ __ATTR(name, 0644, show_func, store_func) ++#define AD_ATTR_RW(name) \ ++ __ATTR(name, 0644, adios_##name##_show, adios_##name##_store) ++#define AD_ATTR_RO(name) \ ++ __ATTR(name, 0644, adios_##name##_show, NULL) ++#define AD_ATTR_WO(name) \ ++ __ATTR(name, 0644, NULL, adios_##name##_store) ++ ++// Define sysfs attributes for ADIOS scheduler ++static struct elv_fs_entry adios_sched_attrs[] = { ++ AD_ATTR_RO(batch_actual_max), ++ AD_ATTR_RW(bq_refill_below_ratio), ++ AD_ATTR_RW(global_latency_window), ++ ++ AD_ATTR_RW(batch_limit_read), ++ AD_ATTR_RW(batch_limit_write), ++ AD_ATTR_RW(batch_limit_discard), ++ ++ AD_ATTR_RO(lat_model_read), ++ AD_ATTR_RO(lat_model_write), ++ AD_ATTR_RO(lat_model_discard), ++ ++ AD_ATTR_RW(lat_target_read), ++ AD_ATTR_RW(lat_target_write), ++ AD_ATTR_RW(lat_target_discard), ++ ++ AD_ATTR_RW(shrink_at_kreqs), ++ AD_ATTR_RW(shrink_at_gbytes), ++ AD_ATTR_RW(shrink_resist), ++ ++ AD_ATTR_RW(read_priority), ++ ++ AD_ATTR_WO(reset_bq_stats), ++ AD_ATTR_WO(reset_lat_model), ++ AD_ATTR(adios_version, adios_version_show, NULL), ++ ++ __ATTR_NULL ++}; ++ ++// Define the ADIOS scheduler type ++static struct elevator_type mq_adios = { ++ .ops = { ++ .next_request = elv_rb_latter_request, ++ .former_request = elv_rb_former_request, ++ .limit_depth = adios_limit_depth, ++ .depth_updated = adios_depth_updated, ++ .request_merged = adios_request_merged, ++ .requests_merged = adios_merged_requests, ++ .bio_merge = adios_bio_merge, ++ .insert_requests = adios_insert_requests, ++ .prepare_request = adios_prepare_request, ++ .dispatch_request = adios_dispatch_request, ++ .completed_request = adios_completed_request, ++ .finish_request = adios_finish_request, ++ .has_work = adios_has_work, ++ .init_hctx = adios_init_hctx, ++ .init_sched = adios_init_sched, ++ .exit_sched = adios_exit_sched, ++ }, ++#ifdef CONFIG_BLK_DEBUG_FS ++#endif ++ .elevator_attrs = adios_sched_attrs, ++ .elevator_name = "adios", ++ .elevator_owner = THIS_MODULE, ++}; ++MODULE_ALIAS("mq-adios-iosched"); ++ ++#define ADIOS_PROGNAME "Adaptive Deadline I/O Scheduler" ++#define ADIOS_AUTHOR "Masahito Suzuki" ++ ++// Initialize the ADIOS scheduler module ++static int __init adios_init(void) { ++ printk(KERN_INFO "%s %s by %s\n", ++ ADIOS_PROGNAME, ADIOS_VERSION, ADIOS_AUTHOR); ++ return elv_register(&mq_adios); ++} ++ ++// Exit the ADIOS scheduler module ++static void __exit adios_exit(void) { ++ elv_unregister(&mq_adios); ++} ++ ++module_init(adios_init); ++module_exit(adios_exit); ++ ++MODULE_AUTHOR(ADIOS_AUTHOR); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION(ADIOS_PROGNAME); +\ No newline at end of file diff --git a/block/elevator.c b/block/elevator.c index 7c3ba80e5ff4..06e974eb6594 100644 --- a/block/elevator.c @@ -6891,10 +8197,10 @@ index 0e16432d9a72..867bc5c5ce67 100644 bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -index cd4fac120834..1ab433d774cc 100644 +index 3780d50fd3ae..6f622c9944e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -@@ -4461,8 +4461,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, +@@ -4468,8 +4468,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, goto failed; } /* init i2c buses */ @@ -6904,7 +8210,7 @@ index cd4fac120834..1ab433d774cc 100644 } } -@@ -4724,8 +4723,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) +@@ -4731,8 +4730,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) amdgpu_reset_fini(adev); /* free i2c buses */ @@ -6915,10 +8221,10 @@ index cd4fac120834..1ab433d774cc 100644 if (amdgpu_emu_mode != 1) amdgpu_atombios_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -index e63efe5c5b75..14a959a4f270 100644 +index 91a874bb0e24..32298a4a7fa7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -@@ -138,6 +138,7 @@ enum AMDGPU_DEBUG_MASK { +@@ -139,6 +139,7 @@ enum AMDGPU_DEBUG_MASK { }; unsigned int amdgpu_vram_limit = UINT_MAX; @@ -6926,7 +8232,7 @@ index e63efe5c5b75..14a959a4f270 100644 int amdgpu_vis_vram_limit; int amdgpu_gart_size = -1; /* auto */ int amdgpu_gtt_size = -1; /* auto */ -@@ -256,6 +257,15 @@ struct amdgpu_watchdog_timer amdgpu_watchdog_timer = { +@@ -257,6 +258,15 @@ struct amdgpu_watchdog_timer amdgpu_watchdog_timer = { .period = 0x0, /* default to 0x0 (timeout disable) */ }; @@ -7012,7 +8318,7 @@ index 11e3f2f3b174..7b1bd69dc29e 100644 + endmenu diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c -index f7c0d7625ff1..02e915268f33 100644 +index ca6b9a585aba..ddcb2c66c916 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -178,6 +178,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev); @@ -7024,7 +8330,7 @@ index f7c0d7625ff1..02e915268f33 100644 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) { -@@ -2842,6 +2844,33 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) +@@ -2897,6 +2899,33 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) return 0; } @@ -7058,7 +8364,7 @@ index f7c0d7625ff1..02e915268f33 100644 /** * dm_hw_init() - Initialize DC device * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. -@@ -2873,6 +2902,10 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block) +@@ -2928,6 +2957,10 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block) return r; amdgpu_dm_hpd_init(adev); @@ -7069,7 +8375,7 @@ index f7c0d7625ff1..02e915268f33 100644 return 0; } -@@ -2888,6 +2921,8 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block) +@@ -2943,6 +2976,8 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -7078,7 +8384,7 @@ index f7c0d7625ff1..02e915268f33 100644 amdgpu_dm_hpd_fini(adev); amdgpu_dm_irq_fini(adev); -@@ -4524,7 +4559,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) +@@ -4579,7 +4614,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) return r; } @@ -7087,7 +8393,7 @@ index f7c0d7625ff1..02e915268f33 100644 if (amdgpu_dm_create_color_properties(adev)) { dc_state_release(state->context); kfree(state); -@@ -8222,7 +8257,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, +@@ -8290,7 +8325,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, int i; int result = -EIO; @@ -7096,7 +8402,7 @@ index f7c0d7625ff1..02e915268f33 100644 return result; cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); -@@ -8241,11 +8276,18 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, +@@ -8309,11 +8344,18 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, cmd.payloads[i].data = msgs[i].buf; } @@ -7120,7 +8426,7 @@ index f7c0d7625ff1..02e915268f33 100644 kfree(cmd.payloads); return result; -@@ -8262,9 +8304,7 @@ static const struct i2c_algorithm amdgpu_dm_i2c_algo = { +@@ -8330,9 +8372,7 @@ static const struct i2c_algorithm amdgpu_dm_i2c_algo = { }; static struct amdgpu_i2c_adapter * @@ -7131,7 +8437,7 @@ index f7c0d7625ff1..02e915268f33 100644 { struct amdgpu_device *adev = ddc_service->ctx->driver_context; struct amdgpu_i2c_adapter *i2c; -@@ -8275,9 +8315,14 @@ create_i2c(struct ddc_service *ddc_service, +@@ -8343,9 +8383,14 @@ create_i2c(struct ddc_service *ddc_service, i2c->base.owner = THIS_MODULE; i2c->base.dev.parent = &adev->pdev->dev; i2c->base.algo = &amdgpu_dm_i2c_algo; @@ -7147,7 +8453,7 @@ index f7c0d7625ff1..02e915268f33 100644 return i2c; } -@@ -8302,7 +8347,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, +@@ -8370,7 +8415,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, link->priv = aconnector; @@ -13331,7 +14637,7 @@ index 7d0a05660e5e..3a3116dca89c 100644 #ifdef CONFIG_NUMA_BALANCING /* Restrict the NUMA promotion throughput (MB/s) for each target node. */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 66744d60904d..4b3fffa1d5f5 100644 +index f3e121888d05..18f3955ddb8f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2820,7 +2820,7 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); @@ -13912,9 +15218,9 @@ index 6872b5aff73e..1910fe1b2471 100644 -- 2.48.0.rc1 -From c92d718c7ad95a5eae66eb820b6d7879fa127443 Mon Sep 17 00:00:00 2001 +From a0c98ac11e5dd2142d30fa11748f183adf4bbb49 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:40:38 +0100 +Date: Fri, 7 Mar 2025 19:28:58 +0100 Subject: [PATCH 05/12] crypto Signed-off-by: Peter Jung @@ -14686,22 +15992,25 @@ index fbf43482e1f5..11e95fc62636 100644 -- 2.48.0.rc1 -From 3cb3e2023181b4be0e5a454b75e86ecddca9646a Mon Sep 17 00:00:00 2001 +From 7ca1fe99c4ce6e9ee34af69f45ad68d3d7bdd9b6 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:41:10 +0100 +Date: Fri, 7 Mar 2025 19:31:39 +0100 Subject: [PATCH 06/12] fixes Signed-off-by: Peter Jung --- - arch/Kconfig | 4 ++-- - drivers/gpu/drm/drm_edid.c | 47 +++++++++++++++++++++++++++++++++++--- - drivers/hid/hid-ids.h | 1 + - fs/fuse/file.c | 6 +++-- - kernel/fork.c | 9 ++++---- - kernel/kprobes.c | 23 +++++++++---------- - kernel/sched/ext.c | 7 +++--- - scripts/package/PKGBUILD | 5 ++++ - 8 files changed, 76 insertions(+), 26 deletions(-) + arch/Kconfig | 4 +-- + arch/x86/tools/insn_decoder_test.c | 2 +- + drivers/edac/igen6_edac.c | 21 +++++++++---- + drivers/gpu/drm/drm_edid.c | 47 ++++++++++++++++++++++++++++-- + drivers/hid/hid-ids.h | 1 + + drivers/misc/mei/vsc-tp.c | 2 +- + drivers/usb/host/xhci.c | 6 +++- + kernel/fork.c | 9 +++--- + kernel/kprobes.c | 23 +++++++-------- + kernel/sched/ext.c | 7 +++-- + scripts/package/PKGBUILD | 5 ++++ + 11 files changed, 94 insertions(+), 33 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 6682b2a53e34..fe54298ae05c 100644 @@ -14725,6 +16034,52 @@ index 6682b2a53e34..fe54298ae05c 100644 depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS help This value can be used to select the number of bits to use to +diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c +index 472540aeabc2..366e07546344 100644 +--- a/arch/x86/tools/insn_decoder_test.c ++++ b/arch/x86/tools/insn_decoder_test.c +@@ -106,7 +106,7 @@ static void parse_args(int argc, char **argv) + } + } + +-#define BUFSIZE 256 ++#define BUFSIZE 4096 + + int main(int argc, char **argv) + { +diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c +index fdf3a84fe698..595908af9e5c 100644 +--- a/drivers/edac/igen6_edac.c ++++ b/drivers/edac/igen6_edac.c +@@ -785,13 +785,22 @@ static u64 ecclog_read_and_clear(struct igen6_imc *imc) + { + u64 ecclog = readq(imc->window + ECC_ERROR_LOG_OFFSET); + +- if (ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE)) { +- /* Clear CE/UE bits by writing 1s */ +- writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET); +- return ecclog; +- } ++ /* ++ * Quirk: The ECC_ERROR_LOG register of certain SoCs may contain ++ * the invalid value ~0. This will result in a flood of invalid ++ * error reports in polling mode. Skip it. ++ */ ++ if (ecclog == ~0) ++ return 0; + +- return 0; ++ /* Neither a CE nor a UE. Skip it.*/ ++ if (!(ecclog & (ECC_ERROR_LOG_CE | ECC_ERROR_LOG_UE))) ++ return 0; ++ ++ /* Clear CE/UE bits by writing 1s */ ++ writeq(ecclog, imc->window + ECC_ERROR_LOG_OFFSET); ++ ++ return ecclog; + } + + static void errsts_clear(struct igen6_imc *imc) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 13bc4c290b17..9b741e6262bc 100644 --- a/drivers/gpu/drm/drm_edid.c @@ -14830,31 +16185,37 @@ index d1d479ca50a2..d1ab021e4a6a 100644 #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY 0x1abe #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X 0x1b4c #define USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD 0x196b -diff --git a/fs/fuse/file.c b/fs/fuse/file.c -index 7d92a5479998..a40d65ffb94d 100644 ---- a/fs/fuse/file.c -+++ b/fs/fuse/file.c -@@ -955,8 +955,10 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, - fuse_invalidate_atime(inode); - } +diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c +index 35d349fee769..7be1649b1972 100644 +--- a/drivers/misc/mei/vsc-tp.c ++++ b/drivers/misc/mei/vsc-tp.c +@@ -502,7 +502,7 @@ static int vsc_tp_probe(struct spi_device *spi) + if (ret) + return ret; -- for (i = 0; i < ap->num_folios; i++) -+ for (i = 0; i < ap->num_folios; i++) { - folio_end_read(ap->folios[i], !err); -+ folio_put(ap->folios[i]); +- tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN); ++ tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN); + if (IS_ERR(tp->wakeuphost)) + return PTR_ERR(tp->wakeuphost); + +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 5ebde8cae4fc..55086d63e7ef 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -779,8 +779,12 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) + struct xhci_segment *seg; + + ring = xhci->cmd_ring; +- xhci_for_each_ring_seg(ring->first_seg, seg) ++ xhci_for_each_ring_seg(ring->first_seg, seg) { ++ /* erase all TRBs before the link */ + memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); ++ /* clear link cycle bit */ ++ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); + } - if (ia->ff) - fuse_file_put(ia->ff, false); -@@ -1048,7 +1050,7 @@ static void fuse_readahead(struct readahead_control *rac) - ap = &ia->ap; - - while (ap->num_folios < cur_pages) { -- folio = readahead_folio(rac); -+ folio = __readahead_folio(rac); - ap->folios[ap->num_folios] = folio; - ap->descs[ap->num_folios].length = folio_size(folio); - ap->num_folios++; + xhci_initialize_ring_info(ring); + /* diff --git a/kernel/fork.c b/kernel/fork.c index 32bf5e30ba4a..e12de63bac76 100644 --- a/kernel/fork.c @@ -14946,10 +16307,10 @@ index b027a4030976..5cc750200f19 100644 return ret; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c -index c1dec2453af4..1a2553498f89 100644 +index 5ccd46124ff0..66be80b6a3f1 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c -@@ -5279,9 +5279,10 @@ static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx, +@@ -5282,9 +5282,10 @@ static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx, scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, ops_state >> SCX_OPSS_QSEQ_SHIFT); @@ -14982,9 +16343,9 @@ index dca706617adc..89d3aef160b7 100644 -- 2.48.0.rc1 -From 3ff29239964e763e644499525f6abb1e6b5de3cb Mon Sep 17 00:00:00 2001 +From 4b8d17da9c0c68596966885f53993200e05a9033 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:41:23 +0100 +Date: Fri, 7 Mar 2025 19:31:52 +0100 Subject: [PATCH 07/12] itmt-core-ranking Signed-off-by: Peter Jung @@ -15300,7 +16661,7 @@ index 3a3116dca89c..a27896a05103 100644 case group_misfit_task: /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 4b3fffa1d5f5..fa4c60eb4043 100644 +index 18f3955ddb8f..6a9efb0fd86f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2056,7 +2056,6 @@ struct sched_group { @@ -15347,9 +16708,9 @@ index 9748a4c8d668..59b8157cb114 100644 -- 2.48.0.rc1 -From b822a217d4e67f24160f4a0602b9d5bb66c1d5a8 Mon Sep 17 00:00:00 2001 +From ad509ec4d26e59c6a0f24dc77ad92aad6bb1ab84 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:42:10 +0100 +Date: Fri, 7 Mar 2025 19:32:02 +0100 Subject: [PATCH 08/12] ntsync Signed-off-by: Peter Jung @@ -18397,9 +19758,9 @@ index 000000000000..3aad311574c4 -- 2.48.0.rc1 -From 975a79a5278fd9b12781af42cab0ca820ee4598c Mon Sep 17 00:00:00 2001 +From ff19b7ba4e494393e0beffb3f2970984d67215d6 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:42:22 +0100 +Date: Fri, 7 Mar 2025 19:32:14 +0100 Subject: [PATCH 09/12] perf-per-core Signed-off-by: Peter Jung @@ -19295,9 +20656,9 @@ index 8277c64f88db..b5a5e1411469 100644 -- 2.48.0.rc1 -From 73a7a93279dfd9eca8703374f7e45340f2cd9f5b Mon Sep 17 00:00:00 2001 +From dc43bd082e690be56fe7c2d68564c54bb3fb63c8 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:42:40 +0100 +Date: Fri, 7 Mar 2025 19:32:25 +0100 Subject: [PATCH 10/12] pksm Signed-off-by: Peter Jung @@ -19728,9 +21089,9 @@ index e9115b4d8b63..2afc778f2d17 100644 -- 2.48.0.rc1 -From edfb199a3ed4ad6d5524c21801f40a406c3c85df Mon Sep 17 00:00:00 2001 +From f32d618f3014adc3a4df45c153fe5bb5d0cc41df Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:43:09 +0100 +Date: Fri, 7 Mar 2025 19:32:37 +0100 Subject: [PATCH 11/12] t2 Signed-off-by: Peter Jung @@ -19751,8 +21112,8 @@ Signed-off-by: Peter Jung drivers/gpu/vga/vga_switcheroo.c | 7 +- drivers/hid/Kconfig | 26 + drivers/hid/Makefile | 2 + - drivers/hid/hid-appletb-bl.c | 207 +++ - drivers/hid/hid-appletb-kbd.c | 506 ++++++++ + drivers/hid/hid-appletb-bl.c | 204 +++ + drivers/hid/hid-appletb-kbd.c | 507 ++++++++ drivers/hid/hid-multitouch.c | 60 +- drivers/hid/hid-quirks.c | 8 +- drivers/hwmon/applesmc.c | 1138 ++++++++++++----- @@ -19791,7 +21152,7 @@ Signed-off-by: Peter Jung lib/test_printf.c | 20 +- lib/vsprintf.c | 36 +- scripts/checkpatch.pl | 2 +- - 56 files changed, 8348 insertions(+), 336 deletions(-) + 56 files changed, 8346 insertions(+), 336 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd create mode 100644 drivers/gpu/drm/tiny/appletbdrm.c create mode 100644 drivers/hid/hid-appletb-bl.c @@ -19903,10 +21264,10 @@ index efecb59adfe6..16af42c68cca 100644 S: Orphan T: git https://gitlab.freedesktop.org/drm/misc/kernel.git diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -index 14a959a4f270..e1a563cdb361 100644 +index 32298a4a7fa7..c70eed5585df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -@@ -2257,6 +2257,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, +@@ -2258,6 +2258,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, int ret, retry = 0, i; bool supports_atomic = false; @@ -19989,7 +21350,7 @@ index b1be458ed4dd..28c0e76a1e88 100644 drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state); return 0; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c -index 49b5cc01ce40..1435f49f2ce6 100644 +index 943b57835b3a..aa11a611e79e 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4685,6 +4685,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port) @@ -20954,10 +22315,10 @@ index 24de45f3677d..1989288e0438 100644 obj-$(CONFIG_HID_AUREAL) += hid-aureal.o diff --git a/drivers/hid/hid-appletb-bl.c b/drivers/hid/hid-appletb-bl.c new file mode 100644 -index 000000000000..819157686e59 +index 000000000000..bad2aead8780 --- /dev/null +++ b/drivers/hid/hid-appletb-bl.c -@@ -0,0 +1,207 @@ +@@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Apple Touch Bar Backlight Driver @@ -21101,15 +22462,12 @@ index 000000000000..819157686e59 + bl->aux1_field = aux1_field; + bl->brightness_field = brightness_field; + -+ if (appletb_bl_def_brightness == 0) -+ ret = appletb_bl_set_brightness(bl, APPLETB_BL_OFF); -+ else if (appletb_bl_def_brightness == 1) -+ ret = appletb_bl_set_brightness(bl, APPLETB_BL_DIM); -+ else -+ ret = appletb_bl_set_brightness(bl, APPLETB_BL_ON); ++ ret = appletb_bl_set_brightness(bl, ++ appletb_bl_brightness_map[(appletb_bl_def_brightness > 2) ? 2 : appletb_bl_def_brightness]); + + if (ret) { -+ dev_err_probe(dev, ret, "Failed to set touch bar brightness to off\n"); ++ dev_err_probe(dev, ret, "Failed to set default touch bar brightness to %d\n", ++ appletb_bl_def_brightness); + goto close_hw; + } + @@ -21163,21 +22521,21 @@ index 000000000000..819157686e59 + +MODULE_AUTHOR("Ronald Tschalär"); +MODULE_AUTHOR("Kerem Karabay "); -+MODULE_DESCRIPTION("MacBookPro Touch Bar Backlight Driver"); ++MODULE_DESCRIPTION("MacBook Pro Touch Bar Backlight driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-appletb-kbd.c b/drivers/hid/hid-appletb-kbd.c new file mode 100644 -index 000000000000..fa28a691da6a +index 000000000000..d4b95aa3eecb --- /dev/null +++ b/drivers/hid/hid-appletb-kbd.c -@@ -0,0 +1,506 @@ +@@ -0,0 +1,507 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Apple Touch Bar Keyboard Mode Driver + * + * Copyright (c) 2017-2018 Ronald Tschalär + * Copyright (c) 2022-2023 Kerem Karabay -+ * Copyright (c) 2024 Aditya Garg ++ * Copyright (c) 2024-2025 Aditya Garg + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -21316,7 +22674,7 @@ index 000000000000..fa28a691da6a +} +static DEVICE_ATTR_RW(mode); + -+struct attribute *appletb_kbd_attrs[] = { ++static struct attribute *appletb_kbd_attrs[] = { + &dev_attr_mode.attr, + NULL +}; @@ -21409,13 +22767,13 @@ index 000000000000..fa28a691da6a + + reset_inactivity_timer(kbd); + -+ if (type == EV_KEY && code == KEY_FN && appletb_tb_fn_toggle) { ++ if (type == EV_KEY && code == KEY_FN && appletb_tb_fn_toggle && ++ (kbd->current_mode == APPLETB_KBD_MODE_SPCL || ++ kbd->current_mode == APPLETB_KBD_MODE_FN)) { + if (value == 1) { + kbd->saved_mode = kbd->current_mode; -+ if (kbd->current_mode == APPLETB_KBD_MODE_SPCL) -+ appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_FN); -+ else if (kbd->current_mode == APPLETB_KBD_MODE_FN) -+ appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_SPCL); ++ appletb_kbd_set_mode(kbd, kbd->current_mode == APPLETB_KBD_MODE_SPCL ++ ? APPLETB_KBD_MODE_FN : APPLETB_KBD_MODE_SPCL); + } else if (value == 0) { + if (kbd->saved_mode != kbd->current_mode) + appletb_kbd_set_mode(kbd, kbd->saved_mode); @@ -21575,13 +22933,13 @@ index 000000000000..fa28a691da6a + } + + kbd->backlight_dev = backlight_device_get_by_name("appletb_backlight"); -+ if (!kbd->backlight_dev) -+ dev_err_probe(dev, ret, "Failed to get backlight device\n"); -+ else { -+ backlight_device_set_brightness(kbd->backlight_dev, 2); -+ timer_setup(&kbd->inactivity_timer, appletb_inactivity_timer, 0); -+ mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000)); -+ } ++ if (!kbd->backlight_dev) { ++ dev_err_probe(dev, -ENODEV, "Failed to get backlight device\n"); ++ } else { ++ backlight_device_set_brightness(kbd->backlight_dev, 2); ++ timer_setup(&kbd->inactivity_timer, appletb_inactivity_timer, 0); ++ mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000)); ++ } + + kbd->inp_handler.event = appletb_kbd_inp_event; + kbd->inp_handler.connect = appletb_kbd_inp_connect; @@ -21670,12 +23028,13 @@ index 000000000000..fa28a691da6a +}; +module_hid_driver(appletb_kbd_hid_driver); + -+/* The backlight driver should be loaded before the keyboard driver is initialised*/ ++/* The backlight driver should be loaded before the keyboard driver is initialised */ +MODULE_SOFTDEP("pre: hid_appletb_bl"); + +MODULE_AUTHOR("Ronald Tschalär"); +MODULE_AUTHOR("Kerem Karabay "); -+MODULE_DESCRIPTION("MacBookPro Touch Bar Keyboard Mode Driver"); ++MODULE_AUTHOR("Aditya Garg "); ++MODULE_DESCRIPTION("MacBook Pro Touch Bar Keyboard Mode driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index e50887a6d22c..c436340331b4 100644 @@ -30055,77 +31414,81 @@ index 9eed3683ad76..7ddbf75f4c26 100755 -- 2.48.0.rc1 -From 047728cdbfdbf23f914674f8fb9cbae2bce866e0 Mon Sep 17 00:00:00 2001 +From 2a754c4a6f572d93d418d0894f0988d36e030f96 Mon Sep 17 00:00:00 2001 From: Peter Jung -Date: Fri, 21 Feb 2025 14:43:30 +0100 +Date: Fri, 7 Mar 2025 19:33:03 +0100 Subject: [PATCH 12/12] zstd Signed-off-by: Peter Jung --- - include/linux/zstd.h | 2 +- - include/linux/zstd_errors.h | 23 +- - include/linux/zstd_lib.h | 850 +++++-- - lib/zstd/Makefile | 2 +- + include/linux/zstd.h | 86 +- + include/linux/zstd_errors.h | 30 +- + include/linux/zstd_lib.h | 1123 ++++-- + lib/zstd/Makefile | 3 +- lib/zstd/common/allocations.h | 56 + - lib/zstd/common/bits.h | 149 ++ - lib/zstd/common/bitstream.h | 127 +- - lib/zstd/common/compiler.h | 134 +- + lib/zstd/common/bits.h | 150 + + lib/zstd/common/bitstream.h | 155 +- + lib/zstd/common/compiler.h | 151 +- lib/zstd/common/cpu.h | 3 +- lib/zstd/common/debug.c | 9 +- - lib/zstd/common/debug.h | 34 +- + lib/zstd/common/debug.h | 37 +- lib/zstd/common/entropy_common.c | 42 +- - lib/zstd/common/error_private.c | 12 +- - lib/zstd/common/error_private.h | 84 +- - lib/zstd/common/fse.h | 94 +- - lib/zstd/common/fse_decompress.c | 130 +- - lib/zstd/common/huf.h | 237 +- + lib/zstd/common/error_private.c | 13 +- + lib/zstd/common/error_private.h | 88 +- + lib/zstd/common/fse.h | 103 +- + lib/zstd/common/fse_decompress.c | 132 +- + lib/zstd/common/huf.h | 240 +- lib/zstd/common/mem.h | 3 +- - lib/zstd/common/portability_macros.h | 28 +- + lib/zstd/common/portability_macros.h | 45 +- lib/zstd/common/zstd_common.c | 38 +- lib/zstd/common/zstd_deps.h | 16 +- - lib/zstd/common/zstd_internal.h | 109 +- + lib/zstd/common/zstd_internal.h | 153 +- lib/zstd/compress/clevels.h | 3 +- lib/zstd/compress/fse_compress.c | 74 +- - lib/zstd/compress/hist.c | 3 +- - lib/zstd/compress/hist.h | 3 +- - lib/zstd/compress/huf_compress.c | 441 ++-- - lib/zstd/compress/zstd_compress.c | 2111 ++++++++++++----- - lib/zstd/compress/zstd_compress_internal.h | 359 ++- - lib/zstd/compress/zstd_compress_literals.c | 155 +- + lib/zstd/compress/hist.c | 13 +- + lib/zstd/compress/hist.h | 10 +- + lib/zstd/compress/huf_compress.c | 441 ++- + lib/zstd/compress/zstd_compress.c | 3289 ++++++++++++----- + lib/zstd/compress/zstd_compress_internal.h | 621 +++- + lib/zstd/compress/zstd_compress_literals.c | 157 +- lib/zstd/compress/zstd_compress_literals.h | 25 +- - lib/zstd/compress/zstd_compress_sequences.c | 7 +- - lib/zstd/compress/zstd_compress_sequences.h | 3 +- - lib/zstd/compress/zstd_compress_superblock.c | 376 ++- + lib/zstd/compress/zstd_compress_sequences.c | 21 +- + lib/zstd/compress/zstd_compress_sequences.h | 16 +- + lib/zstd/compress/zstd_compress_superblock.c | 394 +- lib/zstd/compress/zstd_compress_superblock.h | 3 +- - lib/zstd/compress/zstd_cwksp.h | 169 +- - lib/zstd/compress/zstd_double_fast.c | 143 +- - lib/zstd/compress/zstd_double_fast.h | 17 +- - lib/zstd/compress/zstd_fast.c | 596 +++-- - lib/zstd/compress/zstd_fast.h | 6 +- - lib/zstd/compress/zstd_lazy.c | 732 +++--- - lib/zstd/compress/zstd_lazy.h | 138 +- - lib/zstd/compress/zstd_ldm.c | 21 +- - lib/zstd/compress/zstd_ldm.h | 3 +- + lib/zstd/compress/zstd_cwksp.h | 222 +- + lib/zstd/compress/zstd_double_fast.c | 245 +- + lib/zstd/compress/zstd_double_fast.h | 27 +- + lib/zstd/compress/zstd_fast.c | 703 +++- + lib/zstd/compress/zstd_fast.h | 16 +- + lib/zstd/compress/zstd_lazy.c | 840 +++-- + lib/zstd/compress/zstd_lazy.h | 195 +- + lib/zstd/compress/zstd_ldm.c | 102 +- + lib/zstd/compress/zstd_ldm.h | 17 +- lib/zstd/compress/zstd_ldm_geartab.h | 3 +- - lib/zstd/compress/zstd_opt.c | 497 ++-- - lib/zstd/compress/zstd_opt.h | 41 +- - lib/zstd/decompress/huf_decompress.c | 887 ++++--- + lib/zstd/compress/zstd_opt.c | 571 +-- + lib/zstd/compress/zstd_opt.h | 55 +- + lib/zstd/compress/zstd_preSplit.c | 239 ++ + lib/zstd/compress/zstd_preSplit.h | 34 + + lib/zstd/decompress/huf_decompress.c | 887 +++-- lib/zstd/decompress/zstd_ddict.c | 9 +- lib/zstd/decompress/zstd_ddict.h | 3 +- - lib/zstd/decompress/zstd_decompress.c | 358 ++- - lib/zstd/decompress/zstd_decompress_block.c | 708 +++--- + lib/zstd/decompress/zstd_decompress.c | 377 +- + lib/zstd/decompress/zstd_decompress_block.c | 724 ++-- lib/zstd/decompress/zstd_decompress_block.h | 10 +- - .../decompress/zstd_decompress_internal.h | 9 +- + .../decompress/zstd_decompress_internal.h | 19 +- lib/zstd/decompress_sources.h | 2 +- lib/zstd/zstd_common_module.c | 5 +- - lib/zstd/zstd_compress_module.c | 2 +- + lib/zstd/zstd_compress_module.c | 75 +- lib/zstd/zstd_decompress_module.c | 4 +- - 58 files changed, 6577 insertions(+), 3531 deletions(-) + 60 files changed, 8747 insertions(+), 4380 deletions(-) create mode 100644 lib/zstd/common/allocations.h create mode 100644 lib/zstd/common/bits.h + create mode 100644 lib/zstd/compress/zstd_preSplit.c + create mode 100644 lib/zstd/compress/zstd_preSplit.h diff --git a/include/linux/zstd.h b/include/linux/zstd.h -index b2c7cf310c8f..ac59ae9a18d7 100644 +index b2c7cf310c8f..d7be07c887e7 100644 --- a/include/linux/zstd.h +++ b/include/linux/zstd.h @@ -1,6 +1,6 @@ @@ -30136,8 +31499,139 @@ index b2c7cf310c8f..ac59ae9a18d7 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the +@@ -160,6 +160,19 @@ typedef ZSTD_parameters zstd_parameters; + zstd_parameters zstd_get_params(int level, + unsigned long long estimated_src_size); + ++typedef ZSTD_CCtx zstd_cctx; ++typedef ZSTD_cParameter zstd_cparameter; ++ ++/** ++ * zstd_cctx_set_param() - sets a compression parameter ++ * @cctx: The context. Must have been initialized with zstd_init_cctx(). ++ * @param: The parameter to set. ++ * @value: The value to set the parameter to. ++ * ++ * Return: Zero or an error, which can be checked using zstd_is_error(). ++ */ ++size_t zstd_cctx_set_param(zstd_cctx *cctx, zstd_cparameter param, int value); ++ + + /** + * zstd_get_cparams() - returns zstd_compression_parameters for selected level +@@ -175,8 +188,6 @@ zstd_compression_parameters zstd_get_cparams(int level, + + /* ====== Single-pass Compression ====== */ + +-typedef ZSTD_CCtx zstd_cctx; +- + /** + * zstd_cctx_workspace_bound() - max memory needed to initialize a zstd_cctx + * @parameters: The compression parameters to be used. +@@ -190,6 +201,20 @@ typedef ZSTD_CCtx zstd_cctx; + */ + size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *parameters); + ++/** ++ * zstd_cctx_workspace_bound_with_ext_seq_prod() - max memory needed to ++ * initialize a zstd_cctx when using the block-level external sequence ++ * producer API. ++ * @parameters: The compression parameters to be used. ++ * ++ * If multiple compression parameters might be used, the caller must call ++ * this function for each set of parameters and use the maximum size. ++ * ++ * Return: A lower bound on the size of the workspace that is passed to ++ * zstd_init_cctx(). ++ */ ++size_t zstd_cctx_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *parameters); ++ + /** + * zstd_init_cctx() - initialize a zstd compression context + * @workspace: The workspace to emplace the context into. It must outlive +@@ -424,6 +449,16 @@ typedef ZSTD_CStream zstd_cstream; + */ + size_t zstd_cstream_workspace_bound(const zstd_compression_parameters *cparams); + ++/** ++ * zstd_cstream_workspace_bound_with_ext_seq_prod() - memory needed to initialize ++ * a zstd_cstream when using the block-level external sequence producer API. ++ * @cparams: The compression parameters to be used for compression. ++ * ++ * Return: A lower bound on the size of the workspace that is passed to ++ * zstd_init_cstream(). ++ */ ++size_t zstd_cstream_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *cparams); ++ + /** + * zstd_init_cstream() - initialize a zstd streaming compression context + * @parameters The zstd parameters to use for compression. +@@ -583,6 +618,18 @@ size_t zstd_decompress_stream(zstd_dstream *dstream, zstd_out_buffer *output, + */ + size_t zstd_find_frame_compressed_size(const void *src, size_t src_size); + ++/** ++ * zstd_register_sequence_producer() - exposes the zstd library function ++ * ZSTD_registerSequenceProducer(). This is used for the block-level external ++ * sequence producer API. See upstream zstd.h for detailed documentation. ++ */ ++typedef ZSTD_sequenceProducer_F zstd_sequence_producer_f; ++void zstd_register_sequence_producer( ++ zstd_cctx *cctx, ++ void* sequence_producer_state, ++ zstd_sequence_producer_f sequence_producer ++); ++ + /** + * struct zstd_frame_params - zstd frame parameters stored in the frame header + * @frameContentSize: The frame content size, or ZSTD_CONTENTSIZE_UNKNOWN if not +@@ -596,7 +643,7 @@ size_t zstd_find_frame_compressed_size(const void *src, size_t src_size); + * + * See zstd_lib.h. + */ +-typedef ZSTD_frameHeader zstd_frame_header; ++typedef ZSTD_FrameHeader zstd_frame_header; + + /** + * zstd_get_frame_header() - extracts parameters from a zstd or skippable frame +@@ -611,4 +658,35 @@ typedef ZSTD_frameHeader zstd_frame_header; + size_t zstd_get_frame_header(zstd_frame_header *params, const void *src, + size_t src_size); + ++/** ++ * struct zstd_sequence - a sequence of literals or a match ++ * ++ * @offset: The offset of the match ++ * @litLength: The literal length of the sequence ++ * @matchLength: The match length of the sequence ++ * @rep: Represents which repeat offset is used ++ */ ++typedef ZSTD_Sequence zstd_sequence; ++ ++/** ++ * zstd_compress_sequences_and_literals() - compress an array of zstd_sequence and literals ++ * ++ * @cctx: The zstd compression context. ++ * @dst: The buffer to compress the data into. ++ * @dst_capacity: The size of the destination buffer. ++ * @in_seqs: The array of zstd_sequence to compress. ++ * @in_seqs_size: The number of sequences in in_seqs. ++ * @literals: The literals associated to the sequences to be compressed. ++ * @lit_size: The size of the literals in the literals buffer. ++ * @lit_capacity: The size of the literals buffer. ++ * @decompressed_size: The size of the input data ++ * ++ * Return: The compressed size or an error, which can be checked using ++ * zstd_is_error(). ++ */ ++size_t zstd_compress_sequences_and_literals(zstd_cctx *cctx, void* dst, size_t dst_capacity, ++ const zstd_sequence *in_seqs, size_t in_seqs_size, ++ const void* literals, size_t lit_size, size_t lit_capacity, ++ size_t decompressed_size); ++ + #endif /* LINUX_ZSTD_H */ diff --git a/include/linux/zstd_errors.h b/include/linux/zstd_errors.h -index 58b6dd45a969..6d5cf55f0bf3 100644 +index 58b6dd45a969..c307fb011132 100644 --- a/include/linux/zstd_errors.h +++ b/include/linux/zstd_errors.h @@ -1,5 +1,6 @@ @@ -30148,14 +31642,15 @@ index 58b6dd45a969..6d5cf55f0bf3 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the -@@ -17,8 +18,17 @@ +@@ -12,13 +13,18 @@ + #define ZSTD_ERRORS_H_398273423 - /* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ --#define ZSTDERRORLIB_VISIBILITY --#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY +-/*===== dependency =====*/ +-#include /* size_t */ ++/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ +#define ZSTDERRORLIB_VISIBLE -+ + +#ifndef ZSTDERRORLIB_HIDDEN +# if (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDERRORLIB_HIDDEN __attribute__ ((visibility ("hidden"))) @@ -30163,12 +31658,15 @@ index 58b6dd45a969..6d5cf55f0bf3 100644 +# define ZSTDERRORLIB_HIDDEN +# endif +#endif -+ + +-/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ +-#define ZSTDERRORLIB_VISIBILITY +-#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY +#define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBLE /*-********************************************* * Error codes list -@@ -43,14 +53,17 @@ typedef enum { +@@ -43,14 +49,18 @@ typedef enum { ZSTD_error_frameParameter_windowTooLarge = 16, ZSTD_error_corruption_detected = 20, ZSTD_error_checksum_wrong = 22, @@ -30182,11 +31680,12 @@ index 58b6dd45a969..6d5cf55f0bf3 100644 ZSTD_error_tableLog_tooLarge = 44, ZSTD_error_maxSymbolValue_tooLarge = 46, ZSTD_error_maxSymbolValue_tooSmall = 48, ++ ZSTD_error_cannotProduce_uncompressedBlock = 49, + ZSTD_error_stabilityCondition_notRespected = 50, ZSTD_error_stage_wrong = 60, ZSTD_error_init_missing = 62, ZSTD_error_memory_allocation = 64, -@@ -58,11 +71,15 @@ typedef enum { +@@ -58,18 +68,18 @@ typedef enum { ZSTD_error_dstSize_tooSmall = 70, ZSTD_error_srcSize_wrong = 72, ZSTD_error_dstBuffer_null = 74, @@ -30202,8 +31701,15 @@ index 58b6dd45a969..6d5cf55f0bf3 100644 ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ } ZSTD_ErrorCode; +-/*! ZSTD_getErrorCode() : +- convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, +- which can be used to compare with enum list published above */ +-ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); + ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /*< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */ + + diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h -index 79d55465d5c1..6320fedcf8a4 100644 +index 79d55465d5c1..e295d4125dde 100644 --- a/include/linux/zstd_lib.h +++ b/include/linux/zstd_lib.h @@ -1,5 +1,6 @@ @@ -30214,15 +31720,21 @@ index 79d55465d5c1..6320fedcf8a4 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the -@@ -11,23 +12,42 @@ +@@ -11,23 +12,47 @@ #ifndef ZSTD_H_235446 #define ZSTD_H_235446 -/* ====== Dependency ======*/ +-#include /* INT_MAX */ ++ +/* ====== Dependencies ======*/ - #include /* INT_MAX */ #include /* size_t */ ++#include /* list of errors */ ++#if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) ++#include /* INT_MAX */ ++#endif /* ZSTD_STATIC_LINKING_ONLY */ ++ /* ===== ZSTDLIB_API : control library symbols visibility ===== */ -#ifndef ZSTDLIB_VISIBLE @@ -30248,7 +31760,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 +#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS +# define ZSTD_DEPRECATED(message) /* disable deprecation warnings */ +#else -+# if (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) ++# if (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) || defined(__IAR_SYSTEMS_ICC__) +# define ZSTD_DEPRECATED(message) __attribute__((deprecated(message))) +# elif (__GNUC__ >= 3) +# define ZSTD_DEPRECATED(message) __attribute__((deprecated)) @@ -30261,16 +31773,21 @@ index 79d55465d5c1..6320fedcf8a4 100644 /* ***************************************************************************** Introduction -@@ -65,7 +85,7 @@ +@@ -65,7 +90,7 @@ /*------ Version ------*/ #define ZSTD_VERSION_MAJOR 1 #define ZSTD_VERSION_MINOR 5 -#define ZSTD_VERSION_RELEASE 2 -+#define ZSTD_VERSION_RELEASE 6 ++#define ZSTD_VERSION_RELEASE 7 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /*! ZSTD_versionNumber() : -@@ -107,7 +127,8 @@ ZSTDLIB_API const char* ZSTD_versionString(void); +@@ -103,11 +128,12 @@ ZSTDLIB_API const char* ZSTD_versionString(void); + + + /* ************************************* +-* Simple API ++* Simple Core API ***************************************/ /*! ZSTD_compress() : * Compresses `src` content as a single zstd compressed frame into already allocated `dst`. @@ -30280,30 +31797,122 @@ index 79d55465d5c1..6320fedcf8a4 100644 * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, -@@ -156,7 +177,9 @@ ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t +@@ -115,47 +141,55 @@ ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity, + int compressionLevel); + + /*! ZSTD_decompress() : +- * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. +- * `dstCapacity` is an upper bound of originalSize to regenerate. +- * If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. +- * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), +- * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ ++ * `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. ++ * Multiple compressed frames can be decompressed at once with this method. ++ * The result will be the concatenation of all decompressed frames, back to back. ++ * `dstCapacity` is an upper bound of originalSize to regenerate. ++ * First frame's decompressed size can be extracted using ZSTD_getFrameContentSize(). ++ * If maximum upper bound isn't known, prefer using streaming mode to decompress data. ++ * @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), ++ * or an errorCode if it fails (which can be tested using ZSTD_isError()). */ + ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity, + const void* src, size_t compressedSize); + ++ ++/*====== Decompression helper functions ======*/ ++ + /*! ZSTD_getFrameContentSize() : requires v1.3.0+ +- * `src` should point to the start of a ZSTD encoded frame. +- * `srcSize` must be at least as large as the frame header. +- * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. +- * @return : - decompressed size of `src` frame content, if known +- * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined +- * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) +- * note 1 : a 0 return value means the frame is valid but "empty". +- * note 2 : decompressed size is an optional field, it may not be present, typically in streaming mode. +- * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. +- * In which case, it's necessary to use streaming mode to decompress data. +- * Optionally, application can rely on some implicit limit, +- * as ZSTD_decompress() only needs an upper bound of decompressed size. +- * (For example, data could be necessarily cut into blocks <= 16 KB). +- * note 3 : decompressed size is always present when compression is completed using single-pass functions, +- * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). +- * note 4 : decompressed size can be very large (64-bits value), +- * potentially larger than what local system can handle as a single memory segment. +- * In which case, it's necessary to use streaming mode to decompress data. +- * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. +- * Always ensure return value fits within application's authorized limits. +- * Each application can set its own limits. +- * note 6 : This function replaces ZSTD_getDecompressedSize() */ ++ * `src` should point to the start of a ZSTD encoded frame. ++ * `srcSize` must be at least as large as the frame header. ++ * hint : any size >= `ZSTD_frameHeaderSize_max` is large enough. ++ * @return : - decompressed size of `src` frame content, if known ++ * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined ++ * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) ++ * note 1 : a 0 return value means the frame is valid but "empty". ++ * When invoking this method on a skippable frame, it will return 0. ++ * note 2 : decompressed size is an optional field, it may not be present (typically in streaming mode). ++ * When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. ++ * In which case, it's necessary to use streaming mode to decompress data. ++ * Optionally, application can rely on some implicit limit, ++ * as ZSTD_decompress() only needs an upper bound of decompressed size. ++ * (For example, data could be necessarily cut into blocks <= 16 KB). ++ * note 3 : decompressed size is always present when compression is completed using single-pass functions, ++ * such as ZSTD_compress(), ZSTD_compressCCtx() ZSTD_compress_usingDict() or ZSTD_compress_usingCDict(). ++ * note 4 : decompressed size can be very large (64-bits value), ++ * potentially larger than what local system can handle as a single memory segment. ++ * In which case, it's necessary to use streaming mode to decompress data. ++ * note 5 : If source is untrusted, decompressed size could be wrong or intentionally modified. ++ * Always ensure return value fits within application's authorized limits. ++ * Each application can set its own limits. ++ * note 6 : This function replaces ZSTD_getDecompressedSize() */ + #define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) + #define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) + ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); + +-/*! ZSTD_getDecompressedSize() : +- * NOTE: This function is now obsolete, in favor of ZSTD_getFrameContentSize(). ++/*! ZSTD_getDecompressedSize() (obsolete): ++ * This function is now obsolete, in favor of ZSTD_getFrameContentSize(). + * Both functions work the same way, but ZSTD_getDecompressedSize() blends * "empty", "unknown" and "error" results to the same return value (0), * while ZSTD_getFrameContentSize() gives them separate return values. * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */ --ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); +ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize") -+ZSTDLIB_API -+unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); + ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); /*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+ - * `src` should point to the start of a ZSTD frame or skippable frame. -@@ -168,8 +191,30 @@ ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize) +@@ -163,18 +197,50 @@ ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t + * `srcSize` must be >= first frame size + * @return : the compressed size of the first frame starting at `src`, + * suitable to pass as `srcSize` to `ZSTD_decompress` or similar, +- * or an error code if input is invalid */ ++ * or an error code if input is invalid ++ * Note 1: this method is called _find*() because it's not enough to read the header, ++ * it may have to scan through the frame's content, to reach its end. ++ * Note 2: this method also works with Skippable Frames. In which case, ++ * it returns the size of the complete skippable frame, ++ * which is always equal to its content size + 8 bytes for headers. */ + ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize); - /*====== Helper functions ======*/ +-/*====== Helper functions ======*/ -#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ -ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ -+/* ZSTD_compressBound() : +-ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ +-ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ +-ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ +-ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ +-ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */ ++/*====== Compression helper functions ======*/ ++ ++/*! ZSTD_compressBound() : + * maximum compressed size in worst case single-pass scenario. -+ * When invoking `ZSTD_compress()` or any other one-pass compression function, ++ * When invoking `ZSTD_compress()`, or any other one-pass compression function, + * it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize) + * as it eliminates one potential failure scenario, + * aka not enough room in dst buffer to write the compressed frame. -+ * Note : ZSTD_compressBound() itself can fail, if @srcSize > ZSTD_MAX_INPUT_SIZE . ++ * Note : ZSTD_compressBound() itself can fail, if @srcSize >= ZSTD_MAX_INPUT_SIZE . + * In which case, ZSTD_compressBound() will return an error code + * which can be tested using ZSTD_isError(). + * @@ -30311,29 +31920,49 @@ index 79d55465d5c1..6320fedcf8a4 100644 + * same as ZSTD_compressBound(), but as a macro. + * It can be used to produce constants, which can be useful for static allocation, + * for example to size a static array on stack. -+ * Will produce constant value 0 if srcSize too large. ++ * Will produce constant value 0 if srcSize is too large. + */ +#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00ULL : 0xFF00FF00U) +#define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */ +ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ ++ ++ ++/*====== Error helper functions ======*/ +/* ZSTD_isError() : + * Most ZSTD_* functions returning a size_t value can be tested for error, + * using ZSTD_isError(). + * @return 1 if error, 0 otherwise + */ - ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ - ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ - ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ -@@ -183,7 +228,7 @@ ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compres ++ZSTDLIB_API unsigned ZSTD_isError(size_t result); /*!< tells if a `size_t` function result is an error code */ ++ZSTDLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); /* convert a result into an error code, which can be compared to error enum list */ ++ZSTDLIB_API const char* ZSTD_getErrorName(size_t result); /*!< provides readable string from a function result */ ++ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ ++ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ ++ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */ + + + /* ************************************* +@@ -182,25 +248,25 @@ ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compres + ***************************************/ /*= Compression context * When compressing many times, - * it is recommended to allocate a context just once, +- * it is recommended to allocate a context just once, - * and re-use it for each successive compression operation. +- * This will make workload friendlier for system's memory. ++ * it is recommended to allocate a compression context just once, + * and reuse it for each successive compression operation. - * This will make workload friendlier for system's memory. ++ * This will make the workload easier for system's memory. * Note : re-using context is just a speed / resource optimization. * It doesn't change the compression ratio, which remains identical. -@@ -196,9 +241,9 @@ ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer * +- * Note 2 : In multi-threaded environments, +- * use one different context per thread for parallel execution. ++ * Note 2: For parallel execution in multi-threaded environments, ++ * use one different context per thread . + */ + typedef struct ZSTD_CCtx_s ZSTD_CCtx; + ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void); +-ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* accept NULL pointer */ ++ZSTDLIB_API size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); /* compatible with NULL pointer */ /*! ZSTD_compressCCtx() : * Same as ZSTD_compress(), using an explicit ZSTD_CCtx. @@ -30344,9 +31973,12 @@ index 79d55465d5c1..6320fedcf8a4 100644 + * this function compresses at the requested compression level, + * __ignoring any other advanced parameter__ . * If any advanced parameter was set using the advanced API, - * they will all be reset. Only `compressionLevel` remains. +- * they will all be reset. Only `compressionLevel` remains. ++ * they will all be reset. Only @compressionLevel remains. */ -@@ -210,7 +255,7 @@ ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, + ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, +@@ -210,7 +276,7 @@ ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, /*= Decompression context * When decompressing many times, * it is recommended to allocate a context only once, @@ -30355,7 +31987,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * This will make workload friendlier for system's memory. * Use one context per thread for parallel execution. */ typedef struct ZSTD_DCtx_s ZSTD_DCtx; -@@ -220,7 +265,7 @@ ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer * +@@ -220,7 +286,7 @@ ZSTDLIB_API size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); /* accept NULL pointer * /*! ZSTD_decompressDCtx() : * Same as ZSTD_decompress(), * requires an allocated ZSTD_DCtx. @@ -30364,7 +31996,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 */ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, -@@ -236,12 +281,12 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, +@@ -236,12 +302,12 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, * using ZSTD_CCtx_set*() functions. * Pushed parameters are sticky : they are valid for next compressed frame, and any subsequent frame. * "sticky" parameters are applicable to `ZSTD_compress2()` and `ZSTD_compressStream*()` ! @@ -30379,13 +32011,13 @@ index 79d55465d5c1..6320fedcf8a4 100644 */ -@@ -324,6 +369,19 @@ typedef enum { +@@ -324,6 +390,19 @@ typedef enum { * The higher the value of selected strategy, the more complex it is, * resulting in stronger and slower compression. * Special: value 0 means "use default strategy". */ + + ZSTD_c_targetCBlockSize=130, /* v1.5.6+ -+ * Attempts to fit compressed block size into approximatively targetCBlockSize. ++ * Attempts to fit compressed block size into approximately targetCBlockSize. + * Bound by ZSTD_TARGETCBLOCKSIZE_MIN and ZSTD_TARGETCBLOCKSIZE_MAX. + * Note that it's not a guarantee, just a convergence target (default:0). + * No target when targetCBlockSize == 0. @@ -30399,7 +32031,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 /* LDM mode parameters */ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching. * This parameter is designed to improve compression ratio -@@ -403,7 +461,6 @@ typedef enum { +@@ -403,15 +482,18 @@ typedef enum { * ZSTD_c_forceMaxWindow * ZSTD_c_forceAttachDict * ZSTD_c_literalCompressionMode @@ -30407,9 +32039,12 @@ index 79d55465d5c1..6320fedcf8a4 100644 * ZSTD_c_srcSizeHint * ZSTD_c_enableDedicatedDictSearch * ZSTD_c_stableInBuffer -@@ -412,6 +469,9 @@ typedef enum { + * ZSTD_c_stableOutBuffer + * ZSTD_c_blockDelimiters * ZSTD_c_validateSequences - * ZSTD_c_useBlockSplitter +- * ZSTD_c_useBlockSplitter ++ * ZSTD_c_blockSplitterLevel ++ * ZSTD_c_splitAfterSequences * ZSTD_c_useRowMatchFinder + * ZSTD_c_prefetchCDictTables + * ZSTD_c_enableSeqProducerFallback @@ -30417,7 +32052,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; * also, the enums values themselves are unstable and can still change. -@@ -421,7 +481,7 @@ typedef enum { +@@ -421,7 +503,7 @@ typedef enum { ZSTD_c_experimentalParam3=1000, ZSTD_c_experimentalParam4=1001, ZSTD_c_experimentalParam5=1002, @@ -30426,7 +32061,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 ZSTD_c_experimentalParam7=1004, ZSTD_c_experimentalParam8=1005, ZSTD_c_experimentalParam9=1006, -@@ -430,7 +490,11 @@ typedef enum { +@@ -430,7 +512,12 @@ typedef enum { ZSTD_c_experimentalParam12=1009, ZSTD_c_experimentalParam13=1010, ZSTD_c_experimentalParam14=1011, @@ -30435,11 +32070,12 @@ index 79d55465d5c1..6320fedcf8a4 100644 + ZSTD_c_experimentalParam16=1013, + ZSTD_c_experimentalParam17=1014, + ZSTD_c_experimentalParam18=1015, -+ ZSTD_c_experimentalParam19=1016 ++ ZSTD_c_experimentalParam19=1016, ++ ZSTD_c_experimentalParam20=1017 } ZSTD_cParameter; typedef struct { -@@ -493,7 +557,7 @@ typedef enum { +@@ -493,7 +580,7 @@ typedef enum { * They will be used to compress next frame. * Resetting session never fails. * - The parameters : changes all parameters back to "default". @@ -30448,7 +32084,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing) * otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError()) * - Both : similar to resetting the session, followed by resetting parameters. -@@ -502,11 +566,13 @@ ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset); +@@ -502,11 +589,13 @@ ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset); /*! ZSTD_compress2() : * Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API. @@ -30463,7 +32099,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * @return : compressed size written into `dst` (<= `dstCapacity), * or an error code if it fails (which can be tested using ZSTD_isError()). */ -@@ -543,13 +609,17 @@ typedef enum { +@@ -543,13 +632,17 @@ typedef enum { * ZSTD_d_stableOutBuffer * ZSTD_d_forceIgnoreChecksum * ZSTD_d_refMultipleDDicts @@ -30482,7 +32118,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 } ZSTD_dParameter; -@@ -604,14 +674,14 @@ typedef struct ZSTD_outBuffer_s { +@@ -604,14 +697,14 @@ typedef struct ZSTD_outBuffer_s { * A ZSTD_CStream object is required to track streaming operation. * Use ZSTD_createCStream() and ZSTD_freeCStream() to create/release resources. * ZSTD_CStream objects can be reused multiple times on consecutive compression operations. @@ -30499,7 +32135,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * When in doubt, it's recommended to fully initialize the context before usage. * Use ZSTD_CCtx_reset() to reset the context and ZSTD_CCtx_setParameter(), * ZSTD_CCtx_setPledgedSrcSize(), or ZSTD_CCtx_loadDictionary() and friends to -@@ -700,6 +770,11 @@ typedef enum { +@@ -700,6 +793,11 @@ typedef enum { * only ZSTD_e_end or ZSTD_e_flush operations are allowed. * Before starting a new compression job, or changing compression parameters, * it is required to fully flush internal buffers. @@ -30511,7 +32147,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 */ ZSTDLIB_API size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_outBuffer* output, -@@ -728,8 +803,6 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output +@@ -728,8 +826,6 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output * This following is a legacy streaming API, available since v1.0+ . * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2(). * It is redundant, but remains fully supported. @@ -30520,7 +32156,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 ******************************************************************************/ /*! -@@ -738,6 +811,9 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output +@@ -738,6 +834,9 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); @@ -30530,16 +32166,40 @@ index 79d55465d5c1..6320fedcf8a4 100644 */ ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel); /*! -@@ -758,7 +834,7 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); +@@ -758,7 +857,7 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); * * A ZSTD_DStream object is required to track streaming operations. * Use ZSTD_createDStream() and ZSTD_freeDStream() to create/release resources. -* ZSTD_DStream objects can be re-used multiple times. -+* ZSTD_DStream objects can be reused multiple times. ++* ZSTD_DStream objects can be re-employed multiple times. * * Use ZSTD_initDStream() to start a new decompression operation. * @return : recommended first input size -@@ -788,13 +864,37 @@ ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer +@@ -768,16 +867,21 @@ ZSTDLIB_API size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output); + * The function will update both `pos` fields. + * If `input.pos < input.size`, some input has not been consumed. + * It's up to the caller to present again remaining data. ++* + * The function tries to flush all data decoded immediately, respecting output buffer size. + * If `output.pos < output.size`, decoder has flushed everything it could. +-* But if `output.pos == output.size`, there might be some data left within internal buffers., ++* ++* However, when `output.pos == output.size`, it's more difficult to know. ++* If @return > 0, the frame is not complete, meaning ++* either there is still some data left to flush within internal buffers, ++* or there is more input to read to complete the frame (or both). + * In which case, call ZSTD_decompressStream() again to flush whatever remains in the buffer. + * Note : with no additional input provided, amount of data flushed is necessarily <= ZSTD_BLOCKSIZE_MAX. + * @return : 0 when a frame is completely decoded and fully flushed, + * or an error code, which can be tested using ZSTD_isError(), + * or any other value > 0, which means there is still some decoding or flushing to do to complete current frame : + * the return value is a suggested next input size (just a hint for better latency) +-* that will never request more than the remaining frame size. ++* that will never request more than the remaining content of the compressed frame. + * *******************************************************************************/ + + typedef ZSTD_DCtx ZSTD_DStream; /*< DCtx and DStream are now effectively same object (>= v1.3.0) */ +@@ -788,13 +892,38 @@ ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer /*===== Streaming decompression functions =====*/ @@ -30560,9 +32220,10 @@ index 79d55465d5c1..6320fedcf8a4 100644 + * Function will update both input and output `pos` fields exposing current state via these fields: + * - `input.pos < input.size`, some input remaining and caller should provide remaining input + * on the next call. -+ * - `output.pos < output.size`, decoder finished and flushed all remaining buffers. -+ * - `output.pos == output.size`, potentially uncflushed data present in the internal buffers, -+ * call ZSTD_decompressStream() again to flush remaining data to output. ++ * - `output.pos < output.size`, decoder flushed internal output buffer. ++ * - `output.pos == output.size`, unflushed data potentially present in the internal buffers, ++ * check ZSTD_decompressStream() @return value, ++ * if > 0, invoke it again to flush remaining data to output. + * Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX. + * + * @return : 0 when a frame is completely decoded and fully flushed, @@ -30578,7 +32239,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input); ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */ -@@ -913,7 +1013,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); +@@ -913,7 +1042,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); * If @return == 0, the dictID could not be decoded. * This could for one of the following reasons : * - The frame does not require a dictionary to be decoded (most common case). @@ -30587,7 +32248,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). * - This is not a Zstandard frame. -@@ -925,9 +1025,11 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); +@@ -925,9 +1054,11 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); * Advanced dictionary and prefix API (Requires v1.4.0+) * * This API allows dictionaries to be used with ZSTD_compress2(), @@ -30602,7 +32263,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 ******************************************************************************/ -@@ -937,8 +1039,9 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); +@@ -937,8 +1068,9 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary, * meaning "return to no-dictionary mode". @@ -30614,7 +32275,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * Note 2 : Loading a dictionary involves building tables. * It's also a CPU consuming operation, with non-negligible impact on latency. * Tables are dependent on compression parameters, and for this reason, -@@ -947,11 +1050,15 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); +@@ -947,11 +1079,15 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); * Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead. * In such a case, dictionary buffer must outlive its users. * Note 4 : Use ZSTD_CCtx_loadDictionary_advanced() @@ -30632,7 +32293,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * Note that compression parameters are enforced from within CDict, * and supersede any compression parameter previously set within CCtx. * The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs. -@@ -970,6 +1077,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); +@@ -970,6 +1106,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); * Decompression will need same prefix to properly regenerate data. * Compressing with a prefix is similar in outcome as performing a diff and compressing it, * but performs much faster, especially during decompression (compression speed is tunable with compression level). @@ -30640,7 +32301,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary * Note 1 : Prefix buffer is referenced. It **must** outlive compression. -@@ -986,9 +1094,9 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, +@@ -986,9 +1123,9 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize); /*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ @@ -30653,7 +32314,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * @result : 0, or an error code (which can be tested with ZSTD_isError()). * Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary, * meaning "return to no-dictionary mode". -@@ -1012,9 +1120,10 @@ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, s +@@ -1012,9 +1149,10 @@ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, s * The memory for the table is allocated on the first call to refDDict, and can be * freed with ZSTD_freeDCtx(). * @@ -30666,7 +32327,21 @@ index 79d55465d5c1..6320fedcf8a4 100644 * Special: referencing a NULL DDict means "return to no-dictionary mode". * Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx. */ -@@ -1071,24 +1180,6 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); +@@ -1051,6 +1189,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds); + ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict); + ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); + ++ + #endif /* ZSTD_H_235446 */ + + +@@ -1066,29 +1205,12 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); + #if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) + #define ZSTD_H_ZSTD_STATIC_LINKING_ONLY + ++ + /* This can be overridden externally to hide static symbols. */ + #ifndef ZSTDLIB_STATIC_API #define ZSTDLIB_STATIC_API ZSTDLIB_VISIBLE #endif @@ -30691,7 +32366,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 /* ************************************************************************************** * experimental API (static linking only) **************************************************************************************** -@@ -1123,6 +1214,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); +@@ -1123,6 +1245,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */ #define ZSTD_STRATEGY_MIN ZSTD_fast #define ZSTD_STRATEGY_MAX ZSTD_btultra2 @@ -30699,7 +32374,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 #define ZSTD_OVERLAPLOG_MIN 0 -@@ -1146,7 +1238,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); +@@ -1146,7 +1269,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #define ZSTD_LDM_HASHRATELOG_MAX (ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN) /* Advanced parameter bounds */ @@ -30708,8 +32383,32 @@ index 79d55465d5c1..6320fedcf8a4 100644 #define ZSTD_TARGETCBLOCKSIZE_MAX ZSTD_BLOCKSIZE_MAX #define ZSTD_SRCSIZEHINT_MIN 0 #define ZSTD_SRCSIZEHINT_MAX INT_MAX -@@ -1303,7 +1395,7 @@ typedef enum { - } ZSTD_paramSwitch_e; +@@ -1188,7 +1311,7 @@ typedef struct { + * + * Note: This field is optional. ZSTD_generateSequences() will calculate the value of + * 'rep', but repeat offsets do not necessarily need to be calculated from an external +- * sequence provider's perspective. For example, ZSTD_compressSequences() does not ++ * sequence provider perspective. For example, ZSTD_compressSequences() does not + * use this 'rep' field at all (as of now). + */ + } ZSTD_Sequence; +@@ -1293,17 +1416,18 @@ typedef enum { + } ZSTD_literalCompressionMode_e; + + typedef enum { +- /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final +- * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable +- * or ZSTD_ps_disable allow for a force enable/disable the feature. ++ /* Note: This enum controls features which are conditionally beneficial. ++ * Zstd can take a decision on whether or not to enable the feature (ZSTD_ps_auto), ++ * but setting the switch to ZSTD_ps_enable or ZSTD_ps_disable force enable/disable the feature. + */ + ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */ + ZSTD_ps_enable = 1, /* Force-enable the feature */ + ZSTD_ps_disable = 2 /* Do not use the feature */ +-} ZSTD_paramSwitch_e; ++} ZSTD_ParamSwitch_e; ++#define ZSTD_paramSwitch_e ZSTD_ParamSwitch_e /* old name */ /* ************************************* -* Frame size functions @@ -30717,33 +32416,41 @@ index 79d55465d5c1..6320fedcf8a4 100644 ***************************************/ /*! ZSTD_findDecompressedSize() : -@@ -1350,29 +1442,122 @@ ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size +@@ -1345,34 +1469,130 @@ ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, + ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); + + /*! ZSTD_frameHeaderSize() : +- * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. ++ * srcSize must be large enough, aka >= ZSTD_FRAMEHEADERSIZE_PREFIX. + * @return : size of the Frame Header, * or an error code (if srcSize is too small) */ ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); -+typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e; ++typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_FrameType_e; ++#define ZSTD_frameType_e ZSTD_FrameType_e /* old name */ +typedef struct { + unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */ + unsigned long long windowSize; /* can be very large, up to <= frameContentSize */ + unsigned blockSizeMax; -+ ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ ++ ZSTD_FrameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */ + unsigned headerSize; -+ unsigned dictID; ++ unsigned dictID; /* for ZSTD_skippableFrame, contains the skippable magic variant [0-15] */ + unsigned checksumFlag; + unsigned _reserved1; + unsigned _reserved2; -+} ZSTD_frameHeader; ++} ZSTD_FrameHeader; ++#define ZSTD_frameHeader ZSTD_FrameHeader /* old name */ + +/*! ZSTD_getFrameHeader() : -+ * decode Frame Header, or requires larger `srcSize`. -+ * @return : 0, `zfhPtr` is correctly filled, -+ * >0, `srcSize` is too small, value is wanted `srcSize` amount, ++ * decode Frame Header into `zfhPtr`, or requires larger `srcSize`. ++ * @return : 0 => header is complete, `zfhPtr` is correctly filled, ++ * >0 => `srcSize` is too small, @return value is the wanted `srcSize` amount, `zfhPtr` is not filled, + * or an error code, which can be tested using ZSTD_isError() */ -+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /*< doesn't consume input */ ++ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize); +/*! ZSTD_getFrameHeader_advanced() : + * same as ZSTD_getFrameHeader(), + * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ -+ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); ++ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); + +/*! ZSTD_decompressionMargin() : + * Zstd supports in-place decompression, where the input and output buffers overlap. @@ -30791,10 +32498,14 @@ index 79d55465d5c1..6320fedcf8a4 100644 + )) + typedef enum { - ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */ - ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */ - } ZSTD_sequenceFormat_e; - +- ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */ +- ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */ +-} ZSTD_sequenceFormat_e; ++ ZSTD_sf_noBlockDelimiters = 0, /* ZSTD_Sequence[] has no block delimiters, just sequences */ ++ ZSTD_sf_explicitBlockDelimiters = 1 /* ZSTD_Sequence[] contains explicit block delimiters */ ++} ZSTD_SequenceFormat_e; ++#define ZSTD_sequenceFormat_e ZSTD_SequenceFormat_e /* old name */ ++ +/*! ZSTD_sequenceBound() : + * `srcSize` : size of the input buffer + * @return : upper-bound for the number of sequences that can be generated @@ -30803,7 +32514,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 + * note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence). + */ +ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize); -+ + /*! ZSTD_generateSequences() : - * Generate sequences using ZSTD_compress2, given a source buffer. + * WARNING: This function is meant for debugging and informational purposes ONLY! @@ -30818,7 +32529,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 + * @param zc The compression context to be used for ZSTD_compress2(). Set any + * compression parameters you need on this context. + * @param outSeqs The output sequences buffer of size @p outSeqsSize -+ * @param outSeqsSize The size of the output sequences buffer. ++ * @param outSeqsCapacity The size of the output sequences buffer. + * ZSTD_sequenceBound(srcSize) is an upper bound on the number + * of sequences that can be generated. + * @param src The source buffer to generate sequences from of size @p srcSize. @@ -30845,40 +32556,146 @@ index 79d55465d5c1..6320fedcf8a4 100644 +ZSTD_DEPRECATED("For debugging only, will be replaced by ZSTD_extractSequences()") +ZSTDLIB_STATIC_API size_t +ZSTD_generateSequences(ZSTD_CCtx* zc, -+ ZSTD_Sequence* outSeqs, size_t outSeqsSize, ++ ZSTD_Sequence* outSeqs, size_t outSeqsCapacity, + const void* src, size_t srcSize); /*! ZSTD_mergeBlockDelimiters() : * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals -@@ -1388,7 +1573,9 @@ ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* o +@@ -1388,8 +1608,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* o ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize); /*! ZSTD_compressSequences() : - * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst. +- * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.) + * Compress an array of ZSTD_Sequence, associated with @src buffer, into dst. + * @src contains the entire input (not just the literals). + * If @srcSize > sum(sequence.length), the remaining bytes are considered all literals - * If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.) ++ * If a dictionary is included, then the cctx should reference the dict (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.). * The entire source is compressed into a single frame. * -@@ -1413,11 +1600,12 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si - * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused. - * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly, - * and cannot emit an RLE block that disagrees with the repcode history + * The compression behavior changes based on cctx params. In particular: +@@ -1398,11 +1620,17 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si + * the block size derived from the cctx, and sequences may be split. This is the default setting. + * + * If ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, the array of ZSTD_Sequence is expected to contain +- * block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. ++ * valid block delimiters (defined in ZSTD_Sequence). Behavior is undefined if no block delimiters are provided. ++ * ++ * When ZSTD_c_blockDelimiters == ZSTD_sf_explicitBlockDelimiters, it's possible to decide generating repcodes ++ * using the advanced parameter ZSTD_c_repcodeResolution. Repcodes will improve compression ratio, though the benefit ++ * can vary greatly depending on Sequences. On the other hand, repcode resolution is an expensive operation. ++ * By default, it's disabled at low (<10) compression levels, and enabled above the threshold (>=10). ++ * ZSTD_c_repcodeResolution makes it possible to directly manage this processing in either direction. + * +- * If ZSTD_c_validateSequences == 0, this function will blindly accept the sequences provided. Invalid sequences cause undefined +- * behavior. If ZSTD_c_validateSequences == 1, then if sequence is invalid (see doc/zstd_compression_format.md for +- * specifics regarding offset/matchlength requirements) then the function will bail out and return an error. ++ * If ZSTD_c_validateSequences == 0, this function blindly accepts the Sequences provided. Invalid Sequences cause undefined ++ * behavior. If ZSTD_c_validateSequences == 1, then the function will detect invalid Sequences (see doc/zstd_compression_format.md for ++ * specifics regarding offset/matchlength requirements) and then bail out and return an error. + * + * In addition to the two adjustable experimental params, there are other important cctx params. + * - ZSTD_c_minMatch MUST be set as less than or equal to the smallest match generated by the match finder. It has a minimum value of ZSTD_MINMATCH_MIN. +@@ -1410,14 +1638,42 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si + * - ZSTD_c_windowLog affects offset validation: this function will return an error at higher debug levels if a provided offset + * is larger than what the spec allows for a given window log and dictionary (if present). See: doc/zstd_compression_format.md + * +- * Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused. +- * Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly, +- * and cannot emit an RLE block that disagrees with the repcode history - * @return : final compressed size or a ZSTD error. -+ * @return : final compressed size, or a ZSTD error code. - */ +- */ -ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize, - const ZSTD_Sequence* inSeqs, size_t inSeqsSize, - const void* src, size_t srcSize); ++ * Note: Repcodes are, as of now, always re-calculated within this function, ZSTD_Sequence.rep is effectively unused. ++ * Dev Note: Once ability to ingest repcodes become available, the explicit block delims mode must respect those repcodes exactly, ++ * and cannot emit an RLE block that disagrees with the repcode history. ++ * @return : final compressed size, or a ZSTD error code. ++ */ +ZSTDLIB_STATIC_API size_t -+ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize, -+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize, -+ const void* src, size_t srcSize); ++ZSTD_compressSequences(ZSTD_CCtx* cctx, ++ void* dst, size_t dstCapacity, ++ const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ++ const void* src, size_t srcSize); ++ ++ ++/*! ZSTD_compressSequencesAndLiterals() : ++ * This is a variant of ZSTD_compressSequences() which, ++ * instead of receiving (src,srcSize) as input parameter, receives (literals,litSize), ++ * aka all the literals, already extracted and laid out into a single continuous buffer. ++ * This can be useful if the process generating the sequences also happens to generate the buffer of literals, ++ * thus skipping an extraction + caching stage. ++ * It's a speed optimization, useful when the right conditions are met, ++ * but it also features the following limitations: ++ * - Only supports explicit delimiter mode ++ * - Currently does not support Sequences validation (so input Sequences are trusted) ++ * - Not compatible with frame checksum, which must be disabled ++ * - If any block is incompressible, will fail and return an error ++ * - @litSize must be == sum of all @.litLength fields in @inSeqs. Any discrepancy will generate an error. ++ * - @litBufCapacity is the size of the underlying buffer into which literals are written, starting at address @literals. ++ * @litBufCapacity must be at least 8 bytes larger than @litSize. ++ * - @decompressedSize must be correct, and correspond to the sum of all Sequences. Any discrepancy will generate an error. ++ * @return : final compressed size, or a ZSTD error code. ++ */ ++ZSTDLIB_STATIC_API size_t ++ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx, ++ void* dst, size_t dstCapacity, ++ const ZSTD_Sequence* inSeqs, size_t nbSequences, ++ const void* literals, size_t litSize, size_t litBufCapacity, ++ size_t decompressedSize); /*! ZSTD_writeSkippableFrame() : -@@ -1464,48 +1652,59 @@ ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); +@@ -1425,8 +1681,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* ds + * + * Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number, + * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. +- * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so +- * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. ++ * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, ++ * so the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. + * + * Returns an error if destination buffer is not large enough, if the source size is not representable + * with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid). +@@ -1434,26 +1690,28 @@ ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* ds + * @return : number of bytes written or a ZSTD error. + */ + ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, +- const void* src, size_t srcSize, unsigned magicVariant); ++ const void* src, size_t srcSize, ++ unsigned magicVariant); + + /*! ZSTD_readSkippableFrame() : +- * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. ++ * Retrieves the content of a zstd skippable frame starting at @src, and writes it to @dst buffer. + * +- * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, +- * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested +- * in the magicVariant. ++ * The parameter @magicVariant will receive the magicVariant that was supplied when the frame was written, ++ * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. ++ * This can be NULL if the caller is not interested in the magicVariant. + * + * Returns an error if destination buffer is not large enough, or if the frame is not skippable. + * + * @return : number of bytes written or a ZSTD error. + */ +-ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant, +- const void* src, size_t srcSize); ++ZSTDLIB_STATIC_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, ++ unsigned* magicVariant, ++ const void* src, size_t srcSize); + + /*! ZSTD_isSkippableFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + */ +-ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); ++ZSTDLIB_STATIC_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); + + + +@@ -1464,48 +1722,59 @@ ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); /*! ZSTD_estimate*() : * These functions make it possible to estimate memory usage * of a future {D,C}Ctx, before its creation. @@ -30953,7 +32770,23 @@ index 79d55465d5c1..6320fedcf8a4 100644 ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); /*! ZSTD_estimate?DictSize() : -@@ -1649,22 +1848,45 @@ ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); +@@ -1568,7 +1837,15 @@ typedef void (*ZSTD_freeFunction) (void* opaque, void* address); + typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem; + static + __attribute__((__unused__)) ++ ++#if defined(__clang__) && __clang_major__ >= 5 ++#pragma clang diagnostic push ++#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" ++#endif + ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /*< this constant defers to stdlib's functions */ ++#if defined(__clang__) && __clang_major__ >= 5 ++#pragma clang diagnostic pop ++#endif + + ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); + ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); +@@ -1649,22 +1926,45 @@ ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); * This function never fails (wide contract) */ ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); @@ -31003,7 +32836,16 @@ index 79d55465d5c1..6320fedcf8a4 100644 size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, -@@ -1737,11 +1959,6 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo +@@ -1725,7 +2025,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo + * See the comments on that enum for an explanation of the feature. */ + #define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4 + +-/* Controlled with ZSTD_paramSwitch_e enum. ++/* Controlled with ZSTD_ParamSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never compress literals. + * Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals +@@ -1737,11 +2037,6 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo */ #define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5 @@ -31015,7 +32857,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 /* User's best guess of source size. * Hint is not valid when srcSizeHint == 0. * There is no guarantee that hint is close to actual source size, -@@ -1808,13 +2025,16 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo +@@ -1808,13 +2103,16 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo * Experimental parameter. * Default is 0 == disabled. Set to 1 to enable. * @@ -31039,7 +32881,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * * When this flag is enabled zstd won't allocate an input window buffer, * because the user guarantees it can reference the ZSTD_inBuffer until -@@ -1822,18 +2042,15 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo +@@ -1822,18 +2120,15 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo * large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also * avoid the memcpy() from the input buffer to the input window buffer. * @@ -31063,21 +32905,80 @@ index 79d55465d5c1..6320fedcf8a4 100644 */ #define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9 -@@ -1878,7 +2095,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo - * Without validation, providing a sequence that does not conform to the zstd spec will cause - * undefined behavior, and may produce a corrupted block. +@@ -1871,22 +2166,46 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo + /* ZSTD_c_validateSequences + * Default is 0 == disabled. Set to 1 to enable sequence validation. + * +- * For use with sequence compression API: ZSTD_compressSequences(). +- * Designates whether or not we validate sequences provided to ZSTD_compressSequences() ++ * For use with sequence compression API: ZSTD_compressSequences*(). ++ * Designates whether or not provided sequences are validated within ZSTD_compressSequences*() + * during function execution. + * +- * Without validation, providing a sequence that does not conform to the zstd spec will cause +- * undefined behavior, and may produce a corrupted block. ++ * When Sequence validation is disabled (default), Sequences are compressed as-is, ++ * so they must correct, otherwise it would result in a corruption error. * - * With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for -+ * With validation enabled, if sequence is invalid (see doc/zstd_compression_format.md for ++ * Sequence validation adds some protection, by ensuring that all values respect boundary conditions. ++ * If a Sequence is detected invalid (see doc/zstd_compression_format.md for * specifics regarding offset/matchlength requirements) then the function will bail out and * return an error. - * -@@ -1928,6 +2145,79 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo +- * + */ + #define ZSTD_c_validateSequences ZSTD_c_experimentalParam12 + +-/* ZSTD_c_useBlockSplitter +- * Controlled with ZSTD_paramSwitch_e enum. ++/* ZSTD_c_blockSplitterLevel ++ * note: this parameter only influences the first splitter stage, ++ * which is active before producing the sequences. ++ * ZSTD_c_splitAfterSequences controls the next splitter stage, ++ * which is active after sequence production. ++ * Note that both can be combined. ++ * Allowed values are between 0 and ZSTD_BLOCKSPLITTER_LEVEL_MAX included. ++ * 0 means "auto", which will select a value depending on current ZSTD_c_strategy. ++ * 1 means no splitting. ++ * Then, values from 2 to 6 are sorted in increasing cpu load order. ++ * ++ * Note that currently the first block is never split, ++ * to ensure expansion guarantees in presence of incompressible data. ++ */ ++#define ZSTD_BLOCKSPLITTER_LEVEL_MAX 6 ++#define ZSTD_c_blockSplitterLevel ZSTD_c_experimentalParam20 ++ ++/* ZSTD_c_splitAfterSequences ++ * This is a stronger splitter algorithm, ++ * based on actual sequences previously produced by the selected parser. ++ * It's also slower, and as a consequence, mostly used for high compression levels. ++ * While the post-splitter does overlap with the pre-splitter, ++ * both can nonetheless be combined, ++ * notably with ZSTD_c_blockSplitterLevel at ZSTD_BLOCKSPLITTER_LEVEL_MAX, ++ * resulting in higher compression ratio than just one of them. ++ * + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never use block splitter. + * Set to ZSTD_ps_enable to always use block splitter. +@@ -1894,10 +2213,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * block splitting based on the compression parameters. + */ +-#define ZSTD_c_useBlockSplitter ZSTD_c_experimentalParam13 ++#define ZSTD_c_splitAfterSequences ZSTD_c_experimentalParam13 + + /* ZSTD_c_useRowMatchFinder +- * Controlled with ZSTD_paramSwitch_e enum. ++ * Controlled with ZSTD_ParamSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never use row-based matchfinder. + * Set to ZSTD_ps_enable to force usage of row-based matchfinder. +@@ -1928,6 +2247,80 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo */ #define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15 +/* ZSTD_c_prefetchCDictTables -+ * Controlled with ZSTD_paramSwitch_e enum. Default is ZSTD_ps_auto. ++ * Controlled with ZSTD_ParamSwitch_e enum. Default is ZSTD_ps_auto. + * + * In some situations, zstd uses CDict tables in-place rather than copying them + * into the working context. (See docs on ZSTD_dictAttachPref_e above for details). @@ -31121,19 +33022,21 @@ index 79d55465d5c1..6320fedcf8a4 100644 + * that overrides the default ZSTD_BLOCKSIZE_MAX. It cannot be used to set upper + * bounds greater than ZSTD_BLOCKSIZE_MAX or bounds lower than 1KB (will make + * compressBound() inaccurate). Only currently meant to be used for testing. -+ * + */ +#define ZSTD_c_maxBlockSize ZSTD_c_experimentalParam18 + -+/* ZSTD_c_searchForExternalRepcodes -+ * This parameter affects how zstd parses external sequences, such as sequences -+ * provided through the compressSequences() API or from an external block-level -+ * sequence producer. ++/* ZSTD_c_repcodeResolution ++ * This parameter only has an effect if ZSTD_c_blockDelimiters is ++ * set to ZSTD_sf_explicitBlockDelimiters (may change in the future). + * -+ * If set to ZSTD_ps_enable, the library will check for repeated offsets in ++ * This parameter affects how zstd parses external sequences, ++ * provided via the ZSTD_compressSequences*() API ++ * or from an external block-level sequence producer. ++ * ++ * If set to ZSTD_ps_enable, the library will check for repeated offsets within + * external sequences, even if those repcodes are not explicitly indicated in + * the "rep" field. Note that this is the only way to exploit repcode matches -+ * while using compressSequences() or an external sequence producer, since zstd ++ * while using compressSequences*() or an external sequence producer, since zstd + * currently ignores the "rep" field of external sequences. + * + * If set to ZSTD_ps_disable, the library will not exploit repeated offsets in @@ -31142,17 +33045,16 @@ index 79d55465d5c1..6320fedcf8a4 100644 + * compression ratio. + * + * The default value is ZSTD_ps_auto, for which the library will enable/disable -+ * based on compression level. -+ * -+ * Note: for now, this param only has an effect if ZSTD_c_blockDelimiters is -+ * set to ZSTD_sf_explicitBlockDelimiters. That may change in the future. ++ * based on compression level (currently: level<10 disables, level>=10 enables). + */ -+#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19 ++#define ZSTD_c_repcodeResolution ZSTD_c_experimentalParam19 ++#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19 /* older name */ ++ + /*! ZSTD_CCtx_getParameter() : * Get the requested compression parameter value, selected by enum ZSTD_cParameter, * and store it into int* value. -@@ -2084,7 +2374,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete +@@ -2084,7 +2477,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete * in the range [dst, dst + pos) MUST not be modified during decompression * or you will get data corruption. * @@ -31161,7 +33063,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * it can write directly to the ZSTD_outBuffer, but it will still allocate * an input buffer large enough to fit any compressed block. This will also * avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer. -@@ -2137,6 +2427,33 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete +@@ -2137,6 +2530,33 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete */ #define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4 @@ -31195,7 +33097,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 /*! ZSTD_DCtx_setFormat() : * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). -@@ -2145,6 +2462,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete +@@ -2145,6 +2565,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete * such ZSTD_f_zstd1_magicless for example. * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead") @@ -31203,7 +33105,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); /*! ZSTD_decompressStream_simpleArgs() : -@@ -2181,6 +2499,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs ( +@@ -2181,6 +2602,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs ( * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") @@ -31211,7 +33113,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); -@@ -2198,17 +2517,15 @@ size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, +@@ -2198,17 +2620,15 @@ size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") @@ -31232,7 +33134,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); * -@@ -2218,6 +2535,7 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, +@@ -2218,6 +2638,7 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") @@ -31240,7 +33142,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, -@@ -2232,15 +2550,13 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, +@@ -2232,15 +2653,13 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") @@ -31259,7 +33161,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); * ZSTD_CCtx_refCDict(zcs, cdict); * -@@ -2250,6 +2566,7 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); +@@ -2250,6 +2669,7 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") @@ -31267,7 +33169,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, -@@ -2264,7 +2581,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, +@@ -2264,7 +2684,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, * explicitly specified. * * start a new frame, using same parameters from previous frame. @@ -31276,7 +33178,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 * Note that zcs must be init at least once before using ZSTD_resetCStream(). * If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN. * If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. -@@ -2274,6 +2591,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, +@@ -2274,6 +2694,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, * This prototype will generate compilation warnings. */ ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") @@ -31284,7 +33186,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); -@@ -2319,8 +2637,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); +@@ -2319,8 +2740,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); * ZSTD_DCtx_loadDictionary(zds, dict, dictSize); * * note: no dictionary will be used if dict == NULL or dictSize < 8 @@ -31294,7 +33196,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /*! -@@ -2330,8 +2648,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const vo +@@ -2330,8 +2751,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const vo * ZSTD_DCtx_refDDict(zds, ddict); * * note : ddict is referenced, it must outlive decompression session @@ -31304,7 +33206,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /*! -@@ -2339,18 +2657,202 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const Z +@@ -2339,18 +2760,202 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const Z * * ZSTD_DCtx_reset(zds, ZSTD_reset_session_only); * @@ -31513,7 +33415,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 ********************************************************************* */ /* -@@ -2358,11 +2860,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); +@@ -2358,11 +2963,10 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); A ZSTD_CCtx object is required to track streaming operations. Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource. @@ -31526,7 +33428,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 Then, consume your input using ZSTD_compressContinue(). There are some important considerations to keep in mind when using this advanced function : -@@ -2380,36 +2881,46 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); +@@ -2380,39 +2984,49 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame. Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders. @@ -31577,8 +33479,12 @@ index 79d55465d5c1..6320fedcf8a4 100644 + >0 : `srcSize` is too small, please provide at least result bytes on next attempt. errorCode, which can be tested using ZSTD_isError(). - It fills a ZSTD_frameHeader structure with important information to correctly decode the frame, -@@ -2428,7 +2939,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ +- It fills a ZSTD_frameHeader structure with important information to correctly decode the frame, ++ It fills a ZSTD_FrameHeader structure with important information to correctly decode the frame, + such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`). + Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information. + As a consequence, check that values remain within valid application range. +@@ -2428,7 +3042,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ The most memory efficient way is to use a round buffer of sufficient size. Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(), @@ -31587,7 +33493,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one, up to the moment there is not enough room left in the buffer to guarantee decoding another full block, which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`. -@@ -2448,7 +2959,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ +@@ -2448,7 +3062,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue(). ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail. @@ -31596,7 +33502,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item. It can also be an error code, which can be tested with ZSTD_isError(). -@@ -2471,27 +2982,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ +@@ -2471,27 +3085,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_ */ /*===== Buffer-less streaming decompression functions =====*/ @@ -31624,7 +33530,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /*< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); -@@ -2502,6 +2993,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); +@@ -2502,6 +3096,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* misc */ @@ -31632,7 +33538,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); -@@ -2509,11 +3001,23 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); +@@ -2509,11 +3104,23 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); @@ -31659,7 +33565,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 Block functions produce and decode raw zstd blocks, without frame metadata. Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes). But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes. -@@ -2524,7 +3028,6 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); +@@ -2524,7 +3131,6 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); - It is necessary to init context before starting + compression : any ZSTD_compressBegin*() variant, including with dictionary + decompression : any ZSTD_decompressBegin*() variant, including with dictionary @@ -31667,7 +33573,7 @@ index 79d55465d5c1..6320fedcf8a4 100644 - Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB + If input is larger than a block size, it's necessary to split input data into multiple blocks + For inputs larger than a single block, consider using regular ZSTD_compress() instead. -@@ -2541,11 +3044,14 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); +@@ -2541,11 +3147,14 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); */ /*===== Raw zstd block functions =====*/ @@ -31680,11 +33586,11 @@ index 79d55465d5c1..6320fedcf8a4 100644 +ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.") ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /*< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ -- - #endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ + #endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ +- diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile -index 20f08c644b71..464c410b2768 100644 +index 20f08c644b71..be218b5e0ed5 100644 --- a/lib/zstd/Makefile +++ b/lib/zstd/Makefile @@ -1,6 +1,6 @@ @@ -31695,6 +33601,14 @@ index 20f08c644b71..464c410b2768 100644 # All rights reserved. # # This source code is licensed under both the BSD-style license (found in the +@@ -26,6 +26,7 @@ zstd_compress-y := \ + compress/zstd_lazy.o \ + compress/zstd_ldm.o \ + compress/zstd_opt.o \ ++ compress/zstd_preSplit.o \ + + zstd_decompress-y := \ + zstd_decompress_module.o \ diff --git a/lib/zstd/common/allocations.h b/lib/zstd/common/allocations.h new file mode 100644 index 000000000000..16c3d08e8d1a @@ -31759,10 +33673,10 @@ index 000000000000..16c3d08e8d1a +#endif /* ZSTD_ALLOCATIONS_H */ diff --git a/lib/zstd/common/bits.h b/lib/zstd/common/bits.h new file mode 100644 -index 000000000000..aa3487ec4b6a +index 000000000000..c5faaa3d7b08 --- /dev/null +++ b/lib/zstd/common/bits.h -@@ -0,0 +1,149 @@ +@@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. @@ -31794,14 +33708,15 @@ index 000000000000..aa3487ec4b6a +MEM_STATIC unsigned ZSTD_countTrailingZeros32(U32 val) +{ + assert(val != 0); -+# if (__GNUC__ >= 4) -+ return (unsigned)__builtin_ctz(val); -+# else -+ return ZSTD_countTrailingZeros32_fallback(val); -+# endif ++#if (__GNUC__ >= 4) ++ return (unsigned)__builtin_ctz(val); ++#else ++ return ZSTD_countTrailingZeros32_fallback(val); ++#endif +} + -+MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) { ++MEM_STATIC unsigned ZSTD_countLeadingZeros32_fallback(U32 val) ++{ + assert(val != 0); + { + static const U32 DeBruijnClz[32] = {0, 9, 1, 10, 13, 21, 2, 29, @@ -31820,47 +33735,47 @@ index 000000000000..aa3487ec4b6a +MEM_STATIC unsigned ZSTD_countLeadingZeros32(U32 val) +{ + assert(val != 0); -+# if (__GNUC__ >= 4) -+ return (unsigned)__builtin_clz(val); -+# else -+ return ZSTD_countLeadingZeros32_fallback(val); -+# endif ++#if (__GNUC__ >= 4) ++ return (unsigned)__builtin_clz(val); ++#else ++ return ZSTD_countLeadingZeros32_fallback(val); ++#endif +} + +MEM_STATIC unsigned ZSTD_countTrailingZeros64(U64 val) +{ + assert(val != 0); -+# if (__GNUC__ >= 4) && defined(__LP64__) -+ return (unsigned)__builtin_ctzll(val); -+# else -+ { -+ U32 mostSignificantWord = (U32)(val >> 32); -+ U32 leastSignificantWord = (U32)val; -+ if (leastSignificantWord == 0) { -+ return 32 + ZSTD_countTrailingZeros32(mostSignificantWord); -+ } else { -+ return ZSTD_countTrailingZeros32(leastSignificantWord); -+ } ++#if (__GNUC__ >= 4) && defined(__LP64__) ++ return (unsigned)__builtin_ctzll(val); ++#else ++ { ++ U32 mostSignificantWord = (U32)(val >> 32); ++ U32 leastSignificantWord = (U32)val; ++ if (leastSignificantWord == 0) { ++ return 32 + ZSTD_countTrailingZeros32(mostSignificantWord); ++ } else { ++ return ZSTD_countTrailingZeros32(leastSignificantWord); + } -+# endif ++ } ++#endif +} + +MEM_STATIC unsigned ZSTD_countLeadingZeros64(U64 val) +{ + assert(val != 0); -+# if (__GNUC__ >= 4) -+ return (unsigned)(__builtin_clzll(val)); -+# else -+ { -+ U32 mostSignificantWord = (U32)(val >> 32); -+ U32 leastSignificantWord = (U32)val; -+ if (mostSignificantWord == 0) { -+ return 32 + ZSTD_countLeadingZeros32(leastSignificantWord); -+ } else { -+ return ZSTD_countLeadingZeros32(mostSignificantWord); -+ } ++#if (__GNUC__ >= 4) ++ return (unsigned)(__builtin_clzll(val)); ++#else ++ { ++ U32 mostSignificantWord = (U32)(val >> 32); ++ U32 leastSignificantWord = (U32)val; ++ if (mostSignificantWord == 0) { ++ return 32 + ZSTD_countLeadingZeros32(leastSignificantWord); ++ } else { ++ return ZSTD_countLeadingZeros32(mostSignificantWord); + } -+# endif ++ } ++#endif +} + +MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val) @@ -31913,7 +33828,7 @@ index 000000000000..aa3487ec4b6a + +#endif /* ZSTD_BITS_H */ diff --git a/lib/zstd/common/bitstream.h b/lib/zstd/common/bitstream.h -index feef3a1b1d60..6a13f1f0f1e8 100644 +index feef3a1b1d60..86439da0eea7 100644 --- a/lib/zstd/common/bitstream.h +++ b/lib/zstd/common/bitstream.h @@ -1,7 +1,8 @@ @@ -31926,19 +33841,51 @@ index feef3a1b1d60..6a13f1f0f1e8 100644 * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -@@ -27,6 +28,7 @@ +@@ -27,7 +28,7 @@ #include "compiler.h" /* UNLIKELY() */ #include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */ #include "error_private.h" /* error codes and messages */ +- +#include "bits.h" /* ZSTD_highbit32 */ - /*========================================= -@@ -79,19 +81,20 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); - /*-******************************************** + * Target specific +@@ -41,12 +42,13 @@ + /*-****************************************** + * bitStream encoding API (write forward) + ********************************************/ ++typedef size_t BitContainerType; + /* bitStream can mix input from multiple sources. + * A critical property of these streams is that they encode and decode in **reverse** direction. + * So the first bit sequence you add will be the last to be read, like a LIFO stack. + */ + typedef struct { +- size_t bitContainer; ++ BitContainerType bitContainer; + unsigned bitPos; + char* startPtr; + char* ptr; +@@ -54,7 +56,7 @@ typedef struct { + } BIT_CStream_t; + + MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity); +-MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits); ++MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits); + MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC); + MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); + +@@ -63,7 +65,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); + * `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. + * + * bits are first added to a local register. +-* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. ++* Local register is BitContainerType, 64-bits on 64-bits systems, or 32-bits on 32-bits systems. + * Writing data into memory is an explicit operation, performed by the flushBits function. + * Hence keep track how many bits are potentially stored into local register to avoid register overflow. + * After a flushBits, a maximum of 7 bits might still be stored into local register. +@@ -80,28 +82,28 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); * bitStream decoding API (read backward) **********************************************/ -+typedef size_t BitContainerType; typedef struct { - size_t bitContainer; + BitContainerType bitContainer; @@ -31960,8 +33907,11 @@ index feef3a1b1d60..6a13f1f0f1e8 100644 + } BIT_DStream_status; /* result of BIT_reloadDStream() */ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); - MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); -@@ -101,7 +104,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); +-MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); ++MEM_STATIC BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); + MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); + MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); + /* Start by invoking BIT_initDStream(). * A chunk of the bitStream is then stored into a local register. @@ -31970,7 +33920,16 @@ index feef3a1b1d60..6a13f1f0f1e8 100644 * You can then retrieve bitFields stored into the local register, **in reverse order**. * Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. * A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. -@@ -122,33 +125,6 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); +@@ -113,7 +115,7 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); + /*-**************************************** + * unsafe API + ******************************************/ +-MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits); ++MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, BitContainerType value, unsigned nbBits); + /* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ + + MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); +@@ -122,33 +124,6 @@ MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); /* faster, but works only if nbBits >= 1 */ @@ -32004,11 +33963,11 @@ index feef3a1b1d60..6a13f1f0f1e8 100644 /*===== Local Constants =====*/ static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, -@@ -178,6 +154,12 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, +@@ -178,16 +153,22 @@ MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, return 0; } -+FORCE_INLINE_TEMPLATE size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) ++FORCE_INLINE_TEMPLATE BitContainerType BIT_getLowerBits(BitContainerType bitContainer, U32 const nbBits) +{ + assert(nbBits < BIT_MASK_SIZE); + return bitContainer & BIT_mask[nbBits]; @@ -32017,7 +33976,10 @@ index feef3a1b1d60..6a13f1f0f1e8 100644 /*! BIT_addBits() : * can add up to 31 bits into `bitC`. * Note : does not check for register overflow ! */ -@@ -187,7 +169,7 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, + MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, +- size_t value, unsigned nbBits) ++ BitContainerType value, unsigned nbBits) + { DEBUG_STATIC_ASSERT(BIT_MASK_SIZE == 32); assert(nbBits < BIT_MASK_SIZE); assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); @@ -32026,7 +33988,25 @@ index feef3a1b1d60..6a13f1f0f1e8 100644 bitC->bitPos += nbBits; } -@@ -266,35 +248,35 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si +@@ -195,7 +176,7 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, + * works only if `value` is _clean_, + * meaning all high bits above nbBits are 0 */ + MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, +- size_t value, unsigned nbBits) ++ BitContainerType value, unsigned nbBits) + { + assert((value>>nbBits) == 0); + assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); +@@ -242,7 +223,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) + BIT_addBitsFast(bitC, 1, 1); /* endMark */ + BIT_flushBits(bitC); + if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ +- return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); ++ return (size_t)(bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); + } + + +@@ -266,35 +247,35 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); bitD->bitContainer = MEM_readLEST(bitD->ptr); { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; @@ -32070,22 +34050,30 @@ index feef3a1b1d60..6a13f1f0f1e8 100644 if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */ } bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; -@@ -303,12 +285,12 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si +@@ -303,12 +284,12 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si return srcSize; } -MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getUpperBits(size_t bitContainer, U32 const start) -+FORCE_INLINE_TEMPLATE size_t BIT_getUpperBits(BitContainerType bitContainer, U32 const start) ++FORCE_INLINE_TEMPLATE BitContainerType BIT_getUpperBits(BitContainerType bitContainer, U32 const start) { return bitContainer >> start; } -MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) -+FORCE_INLINE_TEMPLATE size_t BIT_getMiddleBits(BitContainerType bitContainer, U32 const start, U32 const nbBits) ++FORCE_INLINE_TEMPLATE BitContainerType BIT_getMiddleBits(BitContainerType bitContainer, U32 const start, U32 const nbBits) { U32 const regMask = sizeof(bitContainer)*8 - 1; /* if start > regMask, bitstream is corrupted, and result is undefined */ -@@ -325,19 +307,13 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c +@@ -318,26 +299,20 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c + * such cpus old (pre-Haswell, 2013) and their performance is not of that + * importance. + */ +-#if defined(__x86_64__) || defined(_M_X86) ++#if defined(__x86_64__) || defined(_M_X64) + return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1); + #else + return (bitContainer >> (start & regMask)) & BIT_mask[nbBits]; #endif } @@ -32102,11 +34090,19 @@ index feef3a1b1d60..6a13f1f0f1e8 100644 * On 64-bits, maxNbBits==56. * @return : value extracted */ -MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) -+FORCE_INLINE_TEMPLATE size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) ++FORCE_INLINE_TEMPLATE BitContainerType BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) { /* arbitrate between double-shift and shift+mask */ #if 1 -@@ -360,7 +336,7 @@ MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) +@@ -353,14 +328,14 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_lookBits(const BIT_DStream_t* bitD, U3 + + /*! BIT_lookBitsFast() : + * unsafe version; only works if nbBits >= 1 */ +-MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) ++MEM_STATIC BitContainerType BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) + { + U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; + assert(nbBits >= 1); return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); } @@ -32115,25 +34111,29 @@ index feef3a1b1d60..6a13f1f0f1e8 100644 { bitD->bitsConsumed += nbBits; } -@@ -369,7 +345,7 @@ MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) +@@ -369,23 +344,38 @@ MEM_STATIC FORCE_INLINE_ATTR void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) * Read (consume) next n bits from local register and update. * Pay attention to not read more than nbBits contained into local register. * @return : extracted value. */ -MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) -+FORCE_INLINE_TEMPLATE size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) ++FORCE_INLINE_TEMPLATE BitContainerType BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) { - size_t const value = BIT_lookBits(bitD, nbBits); +- size_t const value = BIT_lookBits(bitD, nbBits); ++ BitContainerType const value = BIT_lookBits(bitD, nbBits); BIT_skipBits(bitD, nbBits); -@@ -377,7 +353,7 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_readBits(BIT_DStream_t* bitD, unsigned n + return value; } /*! BIT_readBitsFast() : - * unsafe version; only works only if nbBits >= 1 */ +-MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) + * unsafe version; only works if nbBits >= 1 */ - MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) ++MEM_STATIC BitContainerType BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) { - size_t const value = BIT_lookBitsFast(bitD, nbBits); -@@ -386,6 +362,21 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) +- size_t const value = BIT_lookBitsFast(bitD, nbBits); ++ BitContainerType const value = BIT_lookBitsFast(bitD, nbBits); + assert(nbBits >= 1); + BIT_skipBits(bitD, nbBits); return value; } @@ -32155,7 +34155,7 @@ index feef3a1b1d60..6a13f1f0f1e8 100644 /*! BIT_reloadDStreamFast() : * Similar to BIT_reloadDStream(), but with two differences: * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! -@@ -396,31 +387,35 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) +@@ -396,31 +386,35 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) { if (UNLIKELY(bitD->ptr < bitD->limitPtr)) return BIT_DStream_overflow; @@ -32201,8 +34201,14 @@ index feef3a1b1d60..6a13f1f0f1e8 100644 { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { +@@ -442,5 +436,4 @@ MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) + return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); + } + +- + #endif /* BITSTREAM_H_MODULE */ diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h -index c42d39faf9bd..508ee25537bb 100644 +index c42d39faf9bd..dc9bd15e174e 100644 --- a/lib/zstd/common/compiler.h +++ b/lib/zstd/common/compiler.h @@ -1,5 +1,6 @@ @@ -32301,7 +34307,7 @@ index c42d39faf9bd..508ee25537bb 100644 /* vectorization * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax, -@@ -126,9 +143,9 @@ +@@ -126,16 +143,13 @@ #define UNLIKELY(x) (__builtin_expect((x), 0)) #if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))) @@ -32313,7 +34319,41 @@ index c42d39faf9bd..508ee25537bb 100644 #endif /* disable warnings */ -@@ -179,6 +196,85 @@ + +-/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/ +- +- + /* compile time determination of SIMD support */ + + /* C-language Attributes are added in C23. */ +@@ -158,9 +172,15 @@ + #define ZSTD_FALLTHROUGH fallthrough + + /*-************************************************************** +-* Alignment check ++* Alignment + *****************************************************************/ + ++/* @return 1 if @u is a 2^n value, 0 otherwise ++ * useful to check a value is valid for alignment restrictions */ ++MEM_STATIC int ZSTD_isPower2(size_t u) { ++ return (u & (u-1)) == 0; ++} ++ + /* this test was initially positioned in mem.h, + * but this file is removed (or replaced) for linux kernel + * so it's now hosted in compiler.h, +@@ -175,10 +195,95 @@ + + #endif /* ZSTD_ALIGNOF */ + ++#ifndef ZSTD_ALIGNED ++/* C90-compatible alignment macro (GCC/Clang). Adjust for other compilers if needed. */ ++#define ZSTD_ALIGNED(a) __attribute__((aligned(a))) ++#endif /* ZSTD_ALIGNED */ ++ ++ + /*-************************************************************** * Sanitizer *****************************************************************/ @@ -32336,7 +34376,7 @@ index c42d39faf9bd..508ee25537bb 100644 +#endif + +/* -+ * Helper function to perform a wrapped pointer difference without trigging ++ * Helper function to perform a wrapped pointer difference without triggering + * UBSAN. + * + * @returns lhs - rhs with wrapping @@ -32437,7 +34477,7 @@ index bb863c9ea616..8eb6aa9a3b20 100644 int g_debuglevel = DEBUGLEVEL; +#endif diff --git a/lib/zstd/common/debug.h b/lib/zstd/common/debug.h -index 6dd88d1fbd02..226ba3c57ec3 100644 +index 6dd88d1fbd02..c8a10281f112 100644 --- a/lib/zstd/common/debug.h +++ b/lib/zstd/common/debug.h @@ -1,7 +1,8 @@ @@ -32450,7 +34490,15 @@ index 6dd88d1fbd02..226ba3c57ec3 100644 * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -@@ -82,18 +83,27 @@ extern int g_debuglevel; /* the variable is only declared, +@@ -33,7 +34,6 @@ + #define DEBUG_H_12987983217 + + +- + /* static assert is triggered at compile time, leaving no runtime artefact. + * static assert only works with compile-time constants. + * Also, this variant can only be used inside a function. */ +@@ -82,20 +82,27 @@ extern int g_debuglevel; /* the variable is only declared, It's useful when enabling very verbose levels on selective conditions (such as position in src) */ @@ -32488,7 +34536,9 @@ index 6dd88d1fbd02..226ba3c57ec3 100644 +# define DEBUGLOG(l, ...) do { } while (0) /* disabled */ #endif - +- +- + #endif /* DEBUG_H_12987983217 */ diff --git a/lib/zstd/common/entropy_common.c b/lib/zstd/common/entropy_common.c index fef67056f052..6cdd82233fb5 100644 --- a/lib/zstd/common/entropy_common.c @@ -32608,7 +34658,7 @@ index fef67056f052..6cdd82233fb5 100644 return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize); } diff --git a/lib/zstd/common/error_private.c b/lib/zstd/common/error_private.c -index 6d1135f8c373..a4062d30d170 100644 +index 6d1135f8c373..6c3dbad838b6 100644 --- a/lib/zstd/common/error_private.c +++ b/lib/zstd/common/error_private.c @@ -1,5 +1,6 @@ @@ -32632,10 +34682,11 @@ index 6d1135f8c373..a4062d30d170 100644 case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; case PREFIX(init_missing): return "Context should be init first"; case PREFIX(memory_allocation): return "Allocation error : not enough memory"; -@@ -38,17 +41,22 @@ const char* ERR_getErrorString(ERR_enum code) +@@ -38,17 +41,23 @@ const char* ERR_getErrorString(ERR_enum code) case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; ++ case PREFIX(cannotProduce_uncompressedBlock): return "This mode cannot generate an uncompressed block"; + case PREFIX(stabilityCondition_notRespected): return "pledged buffer stability condition is not respected"; case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; case PREFIX(dictionary_wrong): return "Dictionary mismatch"; @@ -32656,7 +34707,7 @@ index 6d1135f8c373..a4062d30d170 100644 default: return notErrorCode; } diff --git a/lib/zstd/common/error_private.h b/lib/zstd/common/error_private.h -index ca5101e542fa..0410ca415b54 100644 +index ca5101e542fa..08ee87b68cca 100644 --- a/lib/zstd/common/error_private.h +++ b/lib/zstd/common/error_private.h @@ -1,5 +1,6 @@ @@ -32667,7 +34718,24 @@ index ca5101e542fa..0410ca415b54 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the -@@ -49,8 +50,13 @@ ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } +@@ -13,8 +14,6 @@ + #ifndef ERROR_H_MODULE + #define ERROR_H_MODULE + +- +- + /* **************************************** + * Dependencies + ******************************************/ +@@ -23,7 +22,6 @@ + #include "debug.h" + #include "zstd_deps.h" /* size_t */ + +- + /* **************************************** + * Compiler-specific + ******************************************/ +@@ -49,8 +47,13 @@ ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } /* check and forward error code */ @@ -32683,7 +34751,7 @@ index ca5101e542fa..0410ca415b54 100644 /*-**************************************** -@@ -84,10 +90,12 @@ void _force_has_format_string(const char *format, ...) { +@@ -84,10 +87,12 @@ void _force_has_format_string(const char *format, ...) { * We want to force this function invocation to be syntactically correct, but * we don't want to force runtime evaluation of its arguments. */ @@ -32700,7 +34768,7 @@ index ca5101e542fa..0410ca415b54 100644 #define ERR_QUOTE(str) #str -@@ -98,48 +106,50 @@ void _force_has_format_string(const char *format, ...) { +@@ -98,48 +103,49 @@ void _force_has_format_string(const char *format, ...) { * In order to do that (particularly, printing the conditional that failed), * this can't just wrap RETURN_ERROR(). */ @@ -32766,6 +34834,7 @@ index ca5101e542fa..0410ca415b54 100644 - return err_code; \ - } \ - } while(0); +- +#define FORWARD_IF_ERROR(err, ...) \ + do { \ + size_t const err_code = (err); \ @@ -32779,10 +34848,9 @@ index ca5101e542fa..0410ca415b54 100644 + } \ + } while(0) - #endif /* ERROR_H_MODULE */ diff --git a/lib/zstd/common/fse.h b/lib/zstd/common/fse.h -index 4507043b2287..2185a578617d 100644 +index 4507043b2287..b36ce7a2a8c3 100644 --- a/lib/zstd/common/fse.h +++ b/lib/zstd/common/fse.h @@ -1,7 +1,8 @@ @@ -32795,7 +34863,24 @@ index 4507043b2287..2185a578617d 100644 * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -@@ -50,34 +51,6 @@ +@@ -11,8 +12,6 @@ + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + ****************************************************************** */ +- +- + #ifndef FSE_H + #define FSE_H + +@@ -22,7 +21,6 @@ + ******************************************/ + #include "zstd_deps.h" /* size_t, ptrdiff_t */ + +- + /*-***************************************** + * FSE_PUBLIC_API : control library symbols visibility + ******************************************/ +@@ -50,34 +48,6 @@ FSE_PUBLIC_API unsigned FSE_versionNumber(void); /*< library version number; to be used when checking dll version */ @@ -32830,7 +34915,7 @@ index 4507043b2287..2185a578617d 100644 /*-***************************************** * Tool functions ******************************************/ -@@ -88,20 +61,6 @@ FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return +@@ -88,20 +58,6 @@ FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ @@ -32851,7 +34936,7 @@ index 4507043b2287..2185a578617d 100644 /*-***************************************** * FSE detailed API ******************************************/ -@@ -161,8 +120,6 @@ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, +@@ -161,8 +117,6 @@ FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, /*! Constructor and Destructor of FSE_CTable. Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ @@ -32860,7 +34945,7 @@ index 4507043b2287..2185a578617d 100644 /*! FSE_buildCTable(): Builds `ct`, which must be already allocated, using FSE_createCTable(). -@@ -238,23 +195,7 @@ FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter, +@@ -238,23 +192,7 @@ FSE_PUBLIC_API size_t FSE_readNCount_bmi2(short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize, int bmi2); @@ -32884,15 +34969,22 @@ index 4507043b2287..2185a578617d 100644 /*! Tutorial : -@@ -286,6 +227,7 @@ If there is an error, the function will return an error code, which can be teste +@@ -286,13 +224,11 @@ If there is an error, the function will return an error code, which can be teste #endif /* FSE_H */ + #if !defined(FSE_H_FSE_STATIC_LINKING_ONLY) #define FSE_H_FSE_STATIC_LINKING_ONLY +- +-/* *** Dependency *** */ + #include "bitstream.h" -@@ -317,16 +259,6 @@ If there is an error, the function will return an error code, which can be teste +- + /* ***************************************** + * Static allocation + *******************************************/ +@@ -317,16 +253,6 @@ If there is an error, the function will return an error code, which can be teste unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); /*< same as FSE_optimalTableLog(), which used `minus==2` */ @@ -32909,7 +35001,7 @@ index 4507043b2287..2185a578617d 100644 size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); /*< build a fake FSE_CTable, designed to compress always the same symbolValue */ -@@ -344,19 +276,11 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi +@@ -344,19 +270,11 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi FSE_PUBLIC_API size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /*< Same as FSE_buildDTable(), using an externally allocated `workspace` produced with `FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxSymbolValue)` */ @@ -32932,19 +35024,19 @@ index 4507043b2287..2185a578617d 100644 typedef enum { FSE_repeat_none, /*< Cannot use the previous table */ -@@ -539,20 +463,20 @@ MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, un +@@ -539,20 +457,20 @@ MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, un FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* const stateTable = (const U16*)(statePtr->stateTable); U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); - BIT_addBits(bitC, statePtr->value, nbBitsOut); -+ BIT_addBits(bitC, (size_t)statePtr->value, nbBitsOut); ++ BIT_addBits(bitC, (BitContainerType)statePtr->value, nbBitsOut); statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; } MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) { - BIT_addBits(bitC, statePtr->value, statePtr->stateLog); -+ BIT_addBits(bitC, (size_t)statePtr->value, statePtr->stateLog); ++ BIT_addBits(bitC, (BitContainerType)statePtr->value, statePtr->stateLog); BIT_flushBits(bitC); } @@ -32956,8 +35048,16 @@ index 4507043b2287..2185a578617d 100644 * note 1 : assume symbolValue is valid (<= maxSymbolValue) * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue) +@@ -705,7 +623,4 @@ MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) + + #define FSE_TABLESTEP(tableSize) (((tableSize)>>1) + ((tableSize)>>3) + 3) + +- + #endif /* FSE_STATIC_LINKING_ONLY */ +- +- diff --git a/lib/zstd/common/fse_decompress.c b/lib/zstd/common/fse_decompress.c -index 8dcb8ca39767..3a17e84f27bf 100644 +index 8dcb8ca39767..15081d8dc607 100644 --- a/lib/zstd/common/fse_decompress.c +++ b/lib/zstd/common/fse_decompress.c @@ -1,6 +1,7 @@ @@ -33097,7 +35197,16 @@ index 8dcb8ca39767..3a17e84f27bf 100644 FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic( void* dst, size_t maxDstSize, -@@ -287,32 +230,12 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic( +@@ -248,6 +191,8 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic( + FSE_initDState(&state1, &bitD, dt); + FSE_initDState(&state2, &bitD, dt); + ++ RETURN_ERROR_IF(BIT_reloadDStream(&bitD)==BIT_DStream_overflow, corruption_detected, ""); ++ + #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) + + /* 4 symbols per loop */ +@@ -287,32 +232,12 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic( break; } } @@ -33132,7 +35241,7 @@ index 8dcb8ca39767..3a17e84f27bf 100644 } FSE_DecompressWksp; -@@ -327,13 +250,18 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body( +@@ -327,13 +252,18 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body( unsigned tableLog; unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace; @@ -33154,7 +35263,7 @@ index 8dcb8ca39767..3a17e84f27bf 100644 if (FSE_isError(NCountLength)) return NCountLength; if (tableLog > maxLog) return ERROR(tableLog_tooLarge); assert(NCountLength <= cSrcSize); -@@ -342,19 +270,20 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body( +@@ -342,19 +272,20 @@ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body( } if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge); @@ -33180,7 +35289,7 @@ index 8dcb8ca39767..3a17e84f27bf 100644 } } -@@ -382,9 +311,4 @@ size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, +@@ -382,9 +313,4 @@ size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize); } @@ -33191,7 +35300,7 @@ index 8dcb8ca39767..3a17e84f27bf 100644 - #endif /* FSE_COMMONDEFS_ONLY */ diff --git a/lib/zstd/common/huf.h b/lib/zstd/common/huf.h -index 5042ff870308..57462466e188 100644 +index 5042ff870308..49736dcd8f49 100644 --- a/lib/zstd/common/huf.h +++ b/lib/zstd/common/huf.h @@ -1,7 +1,8 @@ @@ -33204,7 +35313,13 @@ index 5042ff870308..57462466e188 100644 * * You can contact the author at : * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy -@@ -18,99 +19,22 @@ +@@ -12,105 +13,26 @@ + * You may select, at your option, one of the above-listed licenses. + ****************************************************************** */ + +- + #ifndef HUF_H_298734234 + #define HUF_H_298734234 /* *** Dependencies *** */ #include "zstd_deps.h" /* size_t */ @@ -33253,11 +35368,11 @@ index 5042ff870308..57462466e188 100644 - */ -HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, - const void* cSrc, size_t cSrcSize); +- +#include "mem.h" /* U32 */ +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" - /* *** Tool functions *** */ -#define HUF_BLOCKSIZE_MAX (128 * 1024) /*< maximum input size for a single block compressed with HUF_compress */ -HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /*< maximum compressed size (worst case) */ @@ -33267,12 +35382,12 @@ index 5042ff870308..57462466e188 100644 /* Error Management */ -HUF_PUBLIC_API unsigned HUF_isError(size_t code); /*< tells if a return value is an error code */ -HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /*< provides error code string (useful for debugging) */ -- +unsigned HUF_isError(size_t code); /*< tells if a return value is an error code */ +const char* HUF_getErrorName(size_t code); /*< provides error code string (useful for debugging) */ --/* *** Advanced function *** */ +-/* *** Advanced function *** */ +- -/* HUF_compress2() : - * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. - * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . @@ -33311,7 +35426,7 @@ index 5042ff870308..57462466e188 100644 /* *** Constants *** */ #define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */ -@@ -151,25 +75,49 @@ typedef U32 HUF_DTable; +@@ -151,25 +73,49 @@ typedef U32 HUF_DTable; /* **************************************** * Advanced decompression functions ******************************************/ @@ -33374,7 +35489,7 @@ index 5042ff870308..57462466e188 100644 /*! HUF_compress() does the following: * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") -@@ -182,12 +130,12 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, +@@ -182,12 +128,12 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, * For example, it's possible to compress several blocks using the same 'CTable', * or to save and regenerate 'CTable' using external methods. */ @@ -33392,7 +35507,7 @@ index 5042ff870308..57462466e188 100644 size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); -@@ -196,6 +144,7 @@ typedef enum { +@@ -196,6 +142,7 @@ typedef enum { HUF_repeat_check, /*< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ HUF_repeat_valid /*< Can use the previous table and it is assumed to be valid */ } HUF_repeat; @@ -33400,7 +35515,7 @@ index 5042ff870308..57462466e188 100644 /* HUF_compress4X_repeat() : * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. -@@ -206,13 +155,13 @@ size_t HUF_compress4X_repeat(void* dst, size_t dstSize, +@@ -206,13 +153,13 @@ size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ @@ -33416,7 +35531,7 @@ index 5042ff870308..57462466e188 100644 #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, -@@ -238,7 +187,7 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, +@@ -238,7 +185,7 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workspace, size_t wkspSize, @@ -33425,7 +35540,7 @@ index 5042ff870308..57462466e188 100644 /* HUF_readCTable() : * Loading a CTable saved with HUF_writeCTable() */ -@@ -246,9 +195,22 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void +@@ -246,9 +193,22 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void /* HUF_getNbBitsFromCTable() : * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX @@ -33449,7 +35564,7 @@ index 5042ff870308..57462466e188 100644 /* * HUF_decompress() does the following: * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics -@@ -276,32 +238,12 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); +@@ -276,32 +236,12 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); #define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9)) #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) @@ -33483,13 +35598,12 @@ index 5042ff870308..57462466e188 100644 /* HUF_compress1X_repeat() : * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. -@@ -312,47 +254,28 @@ size_t HUF_compress1X_repeat(void* dst, size_t dstSize, +@@ -312,47 +252,27 @@ size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /*< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); -+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags); - +- -size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ -#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ @@ -33501,17 +35615,18 @@ index 5042ff870308..57462466e188 100644 -size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< single-symbol decoder */ -size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< single-symbol decoder */ -#endif -+size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); - #ifndef HUF_FORCE_DECOMPRESS_X1 +-#ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /*< double-symbols decoder */ -size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /*< double-symbols decoder */ -#endif -- ++ HUF_CElt* hufTable, HUF_repeat* repeat, int flags); + -size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /*< automatic selection of sing or double symbol decoder, based on DTable */ -#ifndef HUF_FORCE_DECOMPRESS_X2 -size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); -#endif --#ifndef HUF_FORCE_DECOMPRESS_X1 ++size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); + #ifndef HUF_FORCE_DECOMPRESS_X1 -size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /*< double-symbols decoder */ #endif @@ -33539,8 +35654,8 @@ index 5042ff870308..57462466e188 100644 #endif -#endif /* HUF_STATIC_LINKING_ONLY */ +- +#endif /* HUF_H_298734234 */ - diff --git a/lib/zstd/common/mem.h b/lib/zstd/common/mem.h index c22a2e69bf46..d9bd752fe17b 100644 --- a/lib/zstd/common/mem.h @@ -33562,7 +35677,7 @@ index c22a2e69bf46..d9bd752fe17b 100644 /*-************************************************************** diff --git a/lib/zstd/common/portability_macros.h b/lib/zstd/common/portability_macros.h -index 0e3b2c0a527d..f08638cced6c 100644 +index 0e3b2c0a527d..05286af72683 100644 --- a/lib/zstd/common/portability_macros.h +++ b/lib/zstd/common/portability_macros.h @@ -1,5 +1,6 @@ @@ -33582,7 +35697,7 @@ index 0e3b2c0a527d..f08638cced6c 100644 * This header is shared between C and ASM code, so it MUST only * contain macro definitions. It MUST not contain any C code. * -@@ -45,6 +46,8 @@ +@@ -45,30 +46,35 @@ /* Mark the internal assembly functions as hidden */ #ifdef __ELF__ # define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func @@ -33591,16 +35706,42 @@ index 0e3b2c0a527d..f08638cced6c 100644 #else # define ZSTD_HIDE_ASM_FUNCTION(func) #endif -@@ -65,7 +68,7 @@ + ++/* Compile time determination of BMI2 support */ ++ ++ + /* Enable runtime BMI2 dispatch based on the CPU. + * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. + */ + #ifndef DYNAMIC_BMI2 +- #if ((defined(__clang__) && __has_attribute(__target__)) \ ++# if ((defined(__clang__) && __has_attribute(__target__)) \ + || (defined(__GNUC__) \ + && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ +- && (defined(__x86_64__) || defined(_M_X64)) \ ++ && (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)) \ + && !defined(__BMI2__) +- # define DYNAMIC_BMI2 1 +- #else +- # define DYNAMIC_BMI2 0 +- #endif ++# define DYNAMIC_BMI2 1 ++# else ++# define DYNAMIC_BMI2 0 ++# endif #endif /* - * Only enable assembly for GNUC comptabile compilers, -+ * Only enable assembly for GNUC compatible compilers, ++ * Only enable assembly for GNU C compatible compilers, * because other platforms may not support GAS assembly syntax. * - * Only enable assembly for Linux / MacOS, other platforms may -@@ -90,4 +93,23 @@ +- * Only enable assembly for Linux / MacOS, other platforms may ++ * Only enable assembly for Linux / MacOS / Win32, other platforms may + * work, but they haven't been tested. This could likely be + * extended to BSD systems. + * +@@ -90,4 +96,23 @@ */ #define ZSTD_ENABLE_ASM_X86_64_BMI2 0 @@ -33713,7 +35854,7 @@ index 2c34e8a33a1c..f931f7d0e294 100644 +#endif /* ZSTD_DEPS_STDINT */ +#endif /* ZSTD_DEPS_NEED_STDINT */ diff --git a/lib/zstd/common/zstd_internal.h b/lib/zstd/common/zstd_internal.h -index 93305d9b41bb..11da1233e890 100644 +index 93305d9b41bb..52a79435caf6 100644 --- a/lib/zstd/common/zstd_internal.h +++ b/lib/zstd/common/zstd_internal.h @@ -1,5 +1,6 @@ @@ -33724,7 +35865,7 @@ index 93305d9b41bb..11da1233e890 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the -@@ -28,7 +29,6 @@ +@@ -28,12 +29,10 @@ #include #define FSE_STATIC_LINKING_ONLY #include "fse.h" @@ -33732,7 +35873,12 @@ index 93305d9b41bb..11da1233e890 100644 #include "huf.h" #include /* XXH_reset, update, digest */ #define ZSTD_TRACE 0 -@@ -83,9 +83,9 @@ typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; + +- + /* ---- static assert (debug) --- */ + #define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) + #define ZSTD_isError ERR_isError /* for inlining */ +@@ -83,16 +82,17 @@ typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; #define ZSTD_FRAMECHECKSUMSIZE 4 #define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ @@ -33741,10 +35887,11 @@ index 93305d9b41bb..11da1233e890 100644 +#define MIN_LITERALS_FOR_4_STREAMS 6 -#define HufLog 12 - typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; +-typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; ++typedef enum { set_basic, set_rle, set_compressed, set_repeat } SymbolEncodingType_e; #define LONGNBSEQ 0x7F00 -@@ -93,6 +93,7 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy + #define MINMATCH 3 #define Litbits 8 @@ -33752,7 +35899,7 @@ index 93305d9b41bb..11da1233e890 100644 #define MaxLit ((1<= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN); -@@ -225,12 +228,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e +@@ -225,12 +227,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e * one COPY16() in the first call. Then, do two calls per loop since * at that point it is more likely to have a high trip count. */ @@ -33801,7 +35948,7 @@ index 93305d9b41bb..11da1233e890 100644 ZSTD_copy16(op, ip); if (16 >= length) return; op += 16; -@@ -240,7 +237,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e +@@ -240,7 +236,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e COPY16(op, ip); } while (op < oend); @@ -33809,48 +35956,70 @@ index 93305d9b41bb..11da1233e890 100644 } } -@@ -289,11 +285,11 @@ typedef enum { - typedef struct { - seqDef* sequencesStart; - seqDef* sequences; /* ptr to end of sequences */ +@@ -273,62 +268,6 @@ typedef enum { + /*-******************************************* + * Private declarations + *********************************************/ +-typedef struct seqDef_s { +- U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ +- U16 litLength; +- U16 mlBase; /* mlBase == matchLength - MINMATCH */ +-} seqDef; +- +-/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */ +-typedef enum { +- ZSTD_llt_none = 0, /* no longLengthType */ +- ZSTD_llt_literalLength = 1, /* represents a long literal */ +- ZSTD_llt_matchLength = 2 /* represents a long match */ +-} ZSTD_longLengthType_e; +- +-typedef struct { +- seqDef* sequencesStart; +- seqDef* sequences; /* ptr to end of sequences */ - BYTE* litStart; - BYTE* lit; /* ptr to end of literals */ - BYTE* llCode; - BYTE* mlCode; - BYTE* ofCode; -+ BYTE* litStart; -+ BYTE* lit; /* ptr to end of literals */ -+ BYTE* llCode; -+ BYTE* mlCode; -+ BYTE* ofCode; - size_t maxNbSeq; - size_t maxNbLit; - -@@ -301,8 +297,8 @@ typedef struct { - * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment - * the existing value of the litLength or matchLength by 0x10000. - */ +- size_t maxNbSeq; +- size_t maxNbLit; +- +- /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength +- * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment +- * the existing value of the litLength or matchLength by 0x10000. +- */ - ZSTD_longLengthType_e longLengthType; - U32 longLengthPos; /* Index of the sequence to apply long length modification to */ -+ ZSTD_longLengthType_e longLengthType; -+ U32 longLengthPos; /* Index of the sequence to apply long length modification to */ - } seqStore_t; - - typedef struct { -@@ -321,10 +317,10 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore - seqLen.matchLength = seq->mlBase + MINMATCH; - if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { - if (seqStore->longLengthType == ZSTD_llt_literalLength) { +-} seqStore_t; +- +-typedef struct { +- U32 litLength; +- U32 matchLength; +-} ZSTD_sequenceLength; +- +-/* +- * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences +- * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. +- */ +-MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq) +-{ +- ZSTD_sequenceLength seqLen; +- seqLen.litLength = seq->litLength; +- seqLen.matchLength = seq->mlBase + MINMATCH; +- if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { +- if (seqStore->longLengthType == ZSTD_llt_literalLength) { - seqLen.litLength += 0xFFFF; -+ seqLen.litLength += 0x10000; - } - if (seqStore->longLengthType == ZSTD_llt_matchLength) { +- } +- if (seqStore->longLengthType == ZSTD_llt_matchLength) { - seqLen.matchLength += 0xFFFF; -+ seqLen.matchLength += 0x10000; - } - } - return seqLen; -@@ -337,72 +333,13 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore +- } +- } +- return seqLen; +-} + + /* + * Contains the compressed frame size and an upper-bound for the decompressed frame size. +@@ -337,74 +276,11 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` */ typedef struct { @@ -33859,7 +36028,7 @@ index 93305d9b41bb..11da1233e890 100644 unsigned long long decompressedBound; } ZSTD_frameSizeInfo; /* decompress & legacy */ - const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ +-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ - -/* custom memory allocation functions */ @@ -33921,11 +36090,12 @@ index 93305d9b41bb..11da1233e890 100644 -# endif - } -} -+int ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ - - +- +- /* ZSTD_invalidateRepCodes() : -@@ -420,13 +357,13 @@ typedef struct { + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; +@@ -420,13 +296,13 @@ typedef struct { /*! ZSTD_getcBlockSize() : * Provides the size of compressed block from block header `src` */ @@ -33941,6 +36111,12 @@ index 93305d9b41bb..11da1233e890 100644 size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, const void* src, size_t srcSize); +@@ -439,5 +315,4 @@ MEM_STATIC int ZSTD_cpuSupportsBmi2(void) + return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid); + } + +- + #endif /* ZSTD_CCOMMON_H_MODULE */ diff --git a/lib/zstd/compress/clevels.h b/lib/zstd/compress/clevels.h index d9a76112ec3a..6ab8be6532ef 100644 --- a/lib/zstd/compress/clevels.h @@ -34123,7 +36299,7 @@ index ec5b1ca6d71a..44a3c10becf2 100644 - #endif /* FSE_COMMONDEFS_ONLY */ diff --git a/lib/zstd/compress/hist.c b/lib/zstd/compress/hist.c -index 3ddc6dfb6894..0b12587cc14b 100644 +index 3ddc6dfb6894..87145a2d9160 100644 --- a/lib/zstd/compress/hist.c +++ b/lib/zstd/compress/hist.c @@ -1,7 +1,8 @@ @@ -34136,8 +36312,25 @@ index 3ddc6dfb6894..0b12587cc14b 100644 * * You can contact the author at : * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy +@@ -26,6 +27,16 @@ unsigned HIST_isError(size_t code) { return ERR_isError(code); } + /*-************************************************************** + * Histogram functions + ****************************************************************/ ++void HIST_add(unsigned* count, const void* src, size_t srcSize) ++{ ++ const BYTE* ip = (const BYTE*)src; ++ const BYTE* const end = ip + srcSize; ++ ++ while (ip 1 << 17 == 128Ki positions. + * This structure is only used in zstd_opt. + * Since allocation is centralized for all strategies, it has to be known here. +- * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3, ++ * The actual (selected) size of the hash table is then stored in ZSTD_MatchState_t.hashLog3, + * so that zstd_opt.c doesn't need to know about this constant. + */ + #ifndef ZSTD_HASHLOG3_MAX +@@ -55,14 +58,17 @@ * Helper functions ***************************************/ /* ZSTD_compressBound() @@ -35079,7 +37293,38 @@ index 16bb995bc6c4..885167f7e47b 100644 } -@@ -168,15 +173,13 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx) +@@ -75,12 +81,12 @@ struct ZSTD_CDict_s { + ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */ + U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */ + ZSTD_cwksp workspace; +- ZSTD_matchState_t matchState; ++ ZSTD_MatchState_t matchState; + ZSTD_compressedBlockState_t cBlockState; + ZSTD_customMem customMem; + U32 dictID; + int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ +- ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use ++ ZSTD_ParamSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use + * row-based matchfinder. Unless the cdict is reloaded, we will use + * the same greedy/lazy matchfinder at compression time. + */ +@@ -130,11 +136,12 @@ ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) + ZSTD_cwksp_move(&cctx->workspace, &ws); + cctx->staticSize = workspaceSize; + +- /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ +- if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; ++ /* statically sized space. tmpWorkspace never moves (but prev/next block swap places) */ ++ if (!ZSTD_cwksp_check_available(&cctx->workspace, TMP_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; + cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); + cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); +- cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE); ++ cctx->tmpWorkspace = ZSTD_cwksp_reserve_object(&cctx->workspace, TMP_WORKSPACE_SIZE); ++ cctx->tmpWkspSize = TMP_WORKSPACE_SIZE; + cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); + return cctx; + } +@@ -168,15 +175,13 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx) size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) { @@ -35098,7 +37343,62 @@ index 16bb995bc6c4..885167f7e47b 100644 } return 0; } -@@ -257,9 +260,9 @@ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, +@@ -205,7 +210,7 @@ size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) + } + + /* private API call, for dictBuilder only */ +-const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } ++const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } + + /* Returns true if the strategy supports using a row based matchfinder */ + static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) { +@@ -215,32 +220,23 @@ static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) { + /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder + * for this compression. + */ +-static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) { ++static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_ParamSwitch_e mode) { + assert(mode != ZSTD_ps_auto); + return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable); + } + + /* Returns row matchfinder usage given an initial mode and cParams */ +-static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode, ++static ZSTD_ParamSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_ParamSwitch_e mode, + const ZSTD_compressionParameters* const cParams) { +-#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON) +- int const kHasSIMD128 = 1; +-#else +- int const kHasSIMD128 = 0; +-#endif + if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */ + mode = ZSTD_ps_disable; + if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode; +- if (kHasSIMD128) { +- if (cParams->windowLog > 14) mode = ZSTD_ps_enable; +- } else { +- if (cParams->windowLog > 17) mode = ZSTD_ps_enable; +- } ++ if (cParams->windowLog > 14) mode = ZSTD_ps_enable; + return mode; + } + + /* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ +-static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode, ++static ZSTD_ParamSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_ParamSwitch_e mode, + const ZSTD_compressionParameters* const cParams) { + if (mode != ZSTD_ps_auto) return mode; + return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable; +@@ -248,7 +244,7 @@ static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode, + + /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ + static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, +- const ZSTD_paramSwitch_e useRowMatchFinder, ++ const ZSTD_ParamSwitch_e useRowMatchFinder, + const U32 forDDSDict) { + assert(useRowMatchFinder != ZSTD_ps_auto); + /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate. +@@ -257,16 +253,44 @@ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder)); } @@ -35108,9 +37408,10 @@ index 16bb995bc6c4..885167f7e47b 100644 - * Returns 0 otherwise. + * Returns ZSTD_ps_disable otherwise. */ - static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, +-static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, ++static ZSTD_ParamSwitch_e ZSTD_resolveEnableLdm(ZSTD_ParamSwitch_e mode, const ZSTD_compressionParameters* const cParams) { -@@ -267,6 +270,34 @@ static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, + if (mode != ZSTD_ps_auto) return mode; return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable; } @@ -35127,7 +37428,7 @@ index 16bb995bc6c4..885167f7e47b 100644 + } +} + -+static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_paramSwitch_e value, int cLevel) { ++static ZSTD_ParamSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_ParamSwitch_e value, int cLevel) { + if (value != ZSTD_ps_auto) return value; + if (cLevel < 10) { + return ZSTD_ps_disable; @@ -35145,9 +37446,12 @@ index 16bb995bc6c4..885167f7e47b 100644 static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( ZSTD_compressionParameters cParams) { -@@ -284,6 +315,10 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( +@@ -282,8 +306,12 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( + assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); + assert(cctxParams.ldmParams.hashRateLog < 32); } - cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams); +- cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams); ++ cctxParams.postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.postBlockSplitter, &cParams); cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); + cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences); + cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize); @@ -35156,7 +37460,7 @@ index 16bb995bc6c4..885167f7e47b 100644 assert(!ZSTD_checkCParams(cParams)); return cctxParams; } -@@ -329,10 +364,13 @@ size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) +@@ -329,10 +357,13 @@ size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) #define ZSTD_NO_CLEVEL 0 /* @@ -35172,17 +37476,23 @@ index 16bb995bc6c4..885167f7e47b 100644 { assert(!ZSTD_checkCParams(params->cParams)); ZSTD_memset(cctxParams, 0, sizeof(*cctxParams)); -@@ -345,6 +383,9 @@ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_par +@@ -343,10 +374,13 @@ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_par + */ + cctxParams->compressionLevel = compressionLevel; cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams); - cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams); +- cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams); ++ cctxParams->postBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->postBlockSplitter, ¶ms->cParams); cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams); + cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences); + cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize); + cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel); DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d", - cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm); +- cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm); ++ cctxParams->useRowMatchFinder, cctxParams->postBlockSplitter, cctxParams->ldmParams.enableLdm); } -@@ -359,7 +400,7 @@ size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_paramete + + size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) +@@ -359,7 +393,7 @@ size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_paramete /* * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone. @@ -35191,7 +37501,7 @@ index 16bb995bc6c4..885167f7e47b 100644 */ static void ZSTD_CCtxParams_setZstdParams( ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) -@@ -455,8 +496,8 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) +@@ -455,8 +489,8 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) return bounds; case ZSTD_c_enableLongDistanceMatching: @@ -35202,7 +37512,25 @@ index 16bb995bc6c4..885167f7e47b 100644 return bounds; case ZSTD_c_ldmHashLog: -@@ -549,6 +590,26 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) +@@ -534,11 +568,16 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) + bounds.upperBound = 1; + return bounds; + +- case ZSTD_c_useBlockSplitter: ++ case ZSTD_c_splitAfterSequences: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; + ++ case ZSTD_c_blockSplitterLevel: ++ bounds.lowerBound = 0; ++ bounds.upperBound = ZSTD_BLOCKSPLITTER_LEVEL_MAX; ++ return bounds; ++ + case ZSTD_c_useRowMatchFinder: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; +@@ -549,6 +588,26 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) bounds.upperBound = 1; return bounds; @@ -35221,7 +37549,7 @@ index 16bb995bc6c4..885167f7e47b 100644 + bounds.upperBound = ZSTD_BLOCKSIZE_MAX; + return bounds; + -+ case ZSTD_c_searchForExternalRepcodes: ++ case ZSTD_c_repcodeResolution: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; + return bounds; @@ -35229,7 +37557,7 @@ index 16bb995bc6c4..885167f7e47b 100644 default: bounds.error = ERROR(parameter_unsupported); return bounds; -@@ -567,10 +628,11 @@ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) +@@ -567,10 +626,11 @@ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) return 0; } @@ -35245,18 +37573,30 @@ index 16bb995bc6c4..885167f7e47b 100644 static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) -@@ -613,6 +675,10 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) - case ZSTD_c_useBlockSplitter: +@@ -584,6 +644,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) + case ZSTD_c_minMatch: + case ZSTD_c_targetLength: + case ZSTD_c_strategy: ++ case ZSTD_c_blockSplitterLevel: + return 1; + + case ZSTD_c_format: +@@ -610,9 +671,13 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) + case ZSTD_c_stableOutBuffer: + case ZSTD_c_blockDelimiters: + case ZSTD_c_validateSequences: +- case ZSTD_c_useBlockSplitter: ++ case ZSTD_c_splitAfterSequences: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: + case ZSTD_c_prefetchCDictTables: + case ZSTD_c_enableSeqProducerFallback: + case ZSTD_c_maxBlockSize: -+ case ZSTD_c_searchForExternalRepcodes: ++ case ZSTD_c_repcodeResolution: default: return 0; } -@@ -625,7 +691,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) +@@ -625,7 +690,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) if (ZSTD_isUpdateAuthorized(param)) { cctx->cParamsChanged = 1; } else { @@ -35265,14 +37605,19 @@ index 16bb995bc6c4..885167f7e47b 100644 } } switch(param) -@@ -668,6 +734,10 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) - case ZSTD_c_useBlockSplitter: +@@ -665,9 +730,14 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) + case ZSTD_c_stableOutBuffer: + case ZSTD_c_blockDelimiters: + case ZSTD_c_validateSequences: +- case ZSTD_c_useBlockSplitter: ++ case ZSTD_c_splitAfterSequences: ++ case ZSTD_c_blockSplitterLevel: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: + case ZSTD_c_prefetchCDictTables: + case ZSTD_c_enableSeqProducerFallback: + case ZSTD_c_maxBlockSize: -+ case ZSTD_c_searchForExternalRepcodes: ++ case ZSTD_c_repcodeResolution: break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); @@ -35322,8 +37667,9 @@ index 16bb995bc6c4..885167f7e47b 100644 } case ZSTD_c_literalCompressionMode : { - const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; +- const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; - BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); ++ const ZSTD_ParamSwitch_e lcm = (ZSTD_ParamSwitch_e)value; + BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm); CCtxParams->literalCompressionMode = lcm; return CCtxParams->literalCompressionMode; @@ -35336,8 +37682,9 @@ index 16bb995bc6c4..885167f7e47b 100644 + return (size_t)CCtxParams->enableDedicatedDictSearch; case ZSTD_c_enableLongDistanceMatching : +- CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; + BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value); - CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; ++ CCtxParams->ldmParams.enableLdm = (ZSTD_ParamSwitch_e)value; return CCtxParams->ldmParams.enableLdm; case ZSTD_c_ldmHashLog : @@ -35387,16 +37734,40 @@ index 16bb995bc6c4..885167f7e47b 100644 case ZSTD_c_stableInBuffer: BOUNDCHECK(ZSTD_c_stableInBuffer, value); -@@ -849,7 +922,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, +@@ -843,28 +916,55 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, + + case ZSTD_c_blockDelimiters: + BOUNDCHECK(ZSTD_c_blockDelimiters, value); +- CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value; ++ CCtxParams->blockDelimiters = (ZSTD_SequenceFormat_e)value; + return CCtxParams->blockDelimiters; + case ZSTD_c_validateSequences: BOUNDCHECK(ZSTD_c_validateSequences, value); CCtxParams->validateSequences = value; - return CCtxParams->validateSequences; + return (size_t)CCtxParams->validateSequences; - case ZSTD_c_useBlockSplitter: - BOUNDCHECK(ZSTD_c_useBlockSplitter, value); -@@ -864,7 +937,28 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, +- case ZSTD_c_useBlockSplitter: +- BOUNDCHECK(ZSTD_c_useBlockSplitter, value); +- CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value; +- return CCtxParams->useBlockSplitter; ++ case ZSTD_c_splitAfterSequences: ++ BOUNDCHECK(ZSTD_c_splitAfterSequences, value); ++ CCtxParams->postBlockSplitter = (ZSTD_ParamSwitch_e)value; ++ return CCtxParams->postBlockSplitter; ++ ++ case ZSTD_c_blockSplitterLevel: ++ BOUNDCHECK(ZSTD_c_blockSplitterLevel, value); ++ CCtxParams->preBlockSplitter_level = value; ++ return (size_t)CCtxParams->preBlockSplitter_level; + + case ZSTD_c_useRowMatchFinder: + BOUNDCHECK(ZSTD_c_useRowMatchFinder, value); +- CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; ++ CCtxParams->useRowMatchFinder = (ZSTD_ParamSwitch_e)value; + return CCtxParams->useRowMatchFinder; + case ZSTD_c_deterministicRefPrefix: BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value); CCtxParams->deterministicRefPrefix = !!value; @@ -35405,7 +37776,7 @@ index 16bb995bc6c4..885167f7e47b 100644 + + case ZSTD_c_prefetchCDictTables: + BOUNDCHECK(ZSTD_c_prefetchCDictTables, value); -+ CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value; ++ CCtxParams->prefetchCDictTables = (ZSTD_ParamSwitch_e)value; + return CCtxParams->prefetchCDictTables; + + case ZSTD_c_enableSeqProducerFallback: @@ -35416,17 +37787,100 @@ index 16bb995bc6c4..885167f7e47b 100644 + case ZSTD_c_maxBlockSize: + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_maxBlockSize, value); -+ CCtxParams->maxBlockSize = value; ++ assert(value>=0); ++ CCtxParams->maxBlockSize = (size_t)value; + return CCtxParams->maxBlockSize; + -+ case ZSTD_c_searchForExternalRepcodes: -+ BOUNDCHECK(ZSTD_c_searchForExternalRepcodes, value); -+ CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value; ++ case ZSTD_c_repcodeResolution: ++ BOUNDCHECK(ZSTD_c_repcodeResolution, value); ++ CCtxParams->searchForExternalRepcodes = (ZSTD_ParamSwitch_e)value; + return CCtxParams->searchForExternalRepcodes; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } -@@ -980,6 +1074,18 @@ size_t ZSTD_CCtxParams_getParameter( +@@ -881,7 +981,7 @@ size_t ZSTD_CCtxParams_getParameter( + switch(param) + { + case ZSTD_c_format : +- *value = CCtxParams->format; ++ *value = (int)CCtxParams->format; + break; + case ZSTD_c_compressionLevel : + *value = CCtxParams->compressionLevel; +@@ -896,16 +996,16 @@ size_t ZSTD_CCtxParams_getParameter( + *value = (int)CCtxParams->cParams.chainLog; + break; + case ZSTD_c_searchLog : +- *value = CCtxParams->cParams.searchLog; ++ *value = (int)CCtxParams->cParams.searchLog; + break; + case ZSTD_c_minMatch : +- *value = CCtxParams->cParams.minMatch; ++ *value = (int)CCtxParams->cParams.minMatch; + break; + case ZSTD_c_targetLength : +- *value = CCtxParams->cParams.targetLength; ++ *value = (int)CCtxParams->cParams.targetLength; + break; + case ZSTD_c_strategy : +- *value = (unsigned)CCtxParams->cParams.strategy; ++ *value = (int)CCtxParams->cParams.strategy; + break; + case ZSTD_c_contentSizeFlag : + *value = CCtxParams->fParams.contentSizeFlag; +@@ -920,10 +1020,10 @@ size_t ZSTD_CCtxParams_getParameter( + *value = CCtxParams->forceWindow; + break; + case ZSTD_c_forceAttachDict : +- *value = CCtxParams->attachDictPref; ++ *value = (int)CCtxParams->attachDictPref; + break; + case ZSTD_c_literalCompressionMode : +- *value = CCtxParams->literalCompressionMode; ++ *value = (int)CCtxParams->literalCompressionMode; + break; + case ZSTD_c_nbWorkers : + assert(CCtxParams->nbWorkers == 0); +@@ -939,19 +1039,19 @@ size_t ZSTD_CCtxParams_getParameter( + *value = CCtxParams->enableDedicatedDictSearch; + break; + case ZSTD_c_enableLongDistanceMatching : +- *value = CCtxParams->ldmParams.enableLdm; ++ *value = (int)CCtxParams->ldmParams.enableLdm; + break; + case ZSTD_c_ldmHashLog : +- *value = CCtxParams->ldmParams.hashLog; ++ *value = (int)CCtxParams->ldmParams.hashLog; + break; + case ZSTD_c_ldmMinMatch : +- *value = CCtxParams->ldmParams.minMatchLength; ++ *value = (int)CCtxParams->ldmParams.minMatchLength; + break; + case ZSTD_c_ldmBucketSizeLog : +- *value = CCtxParams->ldmParams.bucketSizeLog; ++ *value = (int)CCtxParams->ldmParams.bucketSizeLog; + break; + case ZSTD_c_ldmHashRateLog : +- *value = CCtxParams->ldmParams.hashRateLog; ++ *value = (int)CCtxParams->ldmParams.hashRateLog; + break; + case ZSTD_c_targetCBlockSize : + *value = (int)CCtxParams->targetCBlockSize; +@@ -971,8 +1071,11 @@ size_t ZSTD_CCtxParams_getParameter( + case ZSTD_c_validateSequences : + *value = (int)CCtxParams->validateSequences; + break; +- case ZSTD_c_useBlockSplitter : +- *value = (int)CCtxParams->useBlockSplitter; ++ case ZSTD_c_splitAfterSequences : ++ *value = (int)CCtxParams->postBlockSplitter; ++ break; ++ case ZSTD_c_blockSplitterLevel : ++ *value = CCtxParams->preBlockSplitter_level; + break; + case ZSTD_c_useRowMatchFinder : + *value = (int)CCtxParams->useRowMatchFinder; +@@ -980,6 +1083,18 @@ size_t ZSTD_CCtxParams_getParameter( case ZSTD_c_deterministicRefPrefix: *value = (int)CCtxParams->deterministicRefPrefix; break; @@ -35439,13 +37893,13 @@ index 16bb995bc6c4..885167f7e47b 100644 + case ZSTD_c_maxBlockSize: + *value = (int)CCtxParams->maxBlockSize; + break; -+ case ZSTD_c_searchForExternalRepcodes: ++ case ZSTD_c_repcodeResolution: + *value = (int)CCtxParams->searchForExternalRepcodes; + break; default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); } return 0; -@@ -1006,9 +1112,47 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams( +@@ -1006,9 +1121,47 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams( return 0; } @@ -35455,13 +37909,13 @@ index 16bb995bc6c4..885167f7e47b 100644 + DEBUGLOG(4, "ZSTD_CCtx_setCParams"); + /* only update if all parameters are valid */ + FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), ""); -+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, cparams.windowLog), ""); -+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, cparams.chainLog), ""); -+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, cparams.hashLog), ""); -+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, cparams.searchLog), ""); -+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, cparams.minMatch), ""); -+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, cparams.targetLength), ""); -+ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, cparams.strategy), ""); ++ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, (int)cparams.windowLog), ""); ++ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, (int)cparams.chainLog), ""); ++ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, (int)cparams.hashLog), ""); ++ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, (int)cparams.searchLog), ""); ++ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, (int)cparams.minMatch), ""); ++ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, (int)cparams.targetLength), ""); ++ FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, (int)cparams.strategy), ""); + return 0; +} + @@ -35494,7 +37948,7 @@ index 16bb995bc6c4..885167f7e47b 100644 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, "Can't set pledgedSrcSize when not in init stage."); cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; -@@ -1024,9 +1168,9 @@ static void ZSTD_dedicatedDictSearch_revertCParams( +@@ -1024,9 +1177,9 @@ static void ZSTD_dedicatedDictSearch_revertCParams( ZSTD_compressionParameters* cParams); /* @@ -35507,7 +37961,7 @@ index 16bb995bc6c4..885167f7e47b 100644 */ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) { -@@ -1039,8 +1183,8 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) +@@ -1039,8 +1192,8 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) return 0; } if (dl->cdict != NULL) { @@ -35517,7 +37971,7 @@ index 16bb995bc6c4..885167f7e47b 100644 return 0; } assert(dl->dictSize > 0); -@@ -1060,26 +1204,30 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) +@@ -1060,26 +1213,30 @@ static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) } size_t ZSTD_CCtx_loadDictionary_advanced( @@ -35558,7 +38012,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } cctx->localDict.dictSize = dictSize; cctx->localDict.dictContentType = dictContentType; -@@ -1149,7 +1297,7 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset) +@@ -1149,7 +1306,7 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset) if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, @@ -35567,7 +38021,16 @@ index 16bb995bc6c4..885167f7e47b 100644 ZSTD_clearAllDicts(cctx); return ZSTD_CCtxParams_reset(&cctx->requestedParams); } -@@ -1178,11 +1326,12 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) +@@ -1168,7 +1325,7 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) + BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog); + BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch); + BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength); +- BOUNDCHECK(ZSTD_c_strategy, cParams.strategy); ++ BOUNDCHECK(ZSTD_c_strategy, (int)cParams.strategy); + return 0; + } + +@@ -1178,11 +1335,12 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) static ZSTD_compressionParameters ZSTD_clampCParams(ZSTD_compressionParameters cParams) { @@ -35585,13 +38048,21 @@ index 16bb995bc6c4..885167f7e47b 100644 # define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned) CLAMP(ZSTD_c_windowLog, cParams.windowLog); CLAMP(ZSTD_c_chainLog, cParams.chainLog); -@@ -1247,12 +1396,55 @@ static ZSTD_compressionParameters +@@ -1240,19 +1398,62 @@ static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize) + * optimize `cPar` for a specified input (`srcSize` and `dictSize`). + * mostly downsize to reduce memory consumption and initialization latency. + * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. +- * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`. ++ * `mode` is the mode for parameter adjustment. See docs for `ZSTD_CParamMode_e`. + * note : `srcSize==0` means 0! + * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ + static ZSTD_compressionParameters ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize, - ZSTD_cParamMode_e mode) -+ ZSTD_cParamMode_e mode, -+ ZSTD_paramSwitch_e useRowMatchFinder) ++ ZSTD_CParamMode_e mode, ++ ZSTD_ParamSwitch_e useRowMatchFinder) { const U64 minSrcSize = 513; /* (1<<9) + 1 */ const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); @@ -35642,7 +38113,7 @@ index 16bb995bc6c4..885167f7e47b 100644 switch (mode) { case ZSTD_cpm_unknown: case ZSTD_cpm_noAttachDict: -@@ -1281,8 +1473,8 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, +@@ -1281,8 +1482,8 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, } /* resize windowLog if input is small enough, to use less memory */ @@ -35653,7 +38124,7 @@ index 16bb995bc6c4..885167f7e47b 100644 U32 const tSize = (U32)(srcSize + dictSize); static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : -@@ -1300,6 +1492,42 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, +@@ -1300,6 +1501,42 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */ @@ -35696,7 +38167,7 @@ index 16bb995bc6c4..885167f7e47b 100644 return cPar; } -@@ -1310,7 +1538,7 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar, +@@ -1310,11 +1547,11 @@ ZSTD_adjustCParams(ZSTD_compressionParameters cPar, { cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */ if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; @@ -35704,8 +38175,28 @@ index 16bb995bc6c4..885167f7e47b 100644 + return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto); } - static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); -@@ -1341,7 +1569,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( +-static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); +-static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); ++static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); ++static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); + + static void ZSTD_overrideCParams( + ZSTD_compressionParameters* cParams, +@@ -1330,24 +1567,25 @@ static void ZSTD_overrideCParams( + } + + ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( +- const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) ++ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) + { + ZSTD_compressionParameters cParams; + if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) { +- srcSizeHint = CCtxParams->srcSizeHint; ++ assert(CCtxParams->srcSizeHint>=0); ++ srcSizeHint = (U64)CCtxParams->srcSizeHint; + } + cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode); + if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); assert(!ZSTD_checkCParams(cParams)); /* srcSizeHint == 0 means 0 */ @@ -35714,21 +38205,37 @@ index 16bb995bc6c4..885167f7e47b 100644 } static size_t -@@ -1367,10 +1595,10 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, - + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32)) - + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32)) - + ZSTD_cwksp_aligned_alloc_size((1<strategy, useRowMatchFinder) - ? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16)) -+ ? ZSTD_cwksp_aligned_alloc_size(hSize) ++ ? ZSTD_cwksp_aligned64_alloc_size(hSize) : 0; size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt)) ? optPotentialSpace -@@ -1386,6 +1614,13 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, +@@ -1386,30 +1624,38 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, return tableSpace + optSpace + slackSpace + lazyAdditionalSpace; } @@ -35742,8 +38249,9 @@ index 16bb995bc6c4..885167f7e47b 100644 static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( const ZSTD_compressionParameters* cParams, const ldmParams_t* ldmParams, -@@ -1393,12 +1628,13 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( - const ZSTD_paramSwitch_e useRowMatchFinder, + const int isStatic, +- const ZSTD_paramSwitch_e useRowMatchFinder, ++ const ZSTD_ParamSwitch_e useRowMatchFinder, const size_t buffInSize, const size_t buffOutSize, - const U64 pledgedSrcSize) @@ -35758,21 +38266,37 @@ index 16bb995bc6c4..885167f7e47b 100644 + size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize); + size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer); size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) - + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef)) +- + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef)) ++ + ZSTD_cwksp_aligned64_alloc_size(maxNbSeq * sizeof(SeqDef)) + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); -@@ -1417,6 +1653,11 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( +- size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE); ++ size_t const tmpWorkSpace = ZSTD_cwksp_alloc_size(TMP_WORKSPACE_SIZE); + size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t)); + size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1); + + size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); + size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); + size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ? +- ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; ++ ZSTD_cwksp_aligned64_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; + + + size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) +@@ -1417,15 +1663,21 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; + size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + size_t const externalSeqSpace = useSequenceProducer -+ ? ZSTD_cwksp_aligned_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence)) ++ ? ZSTD_cwksp_aligned64_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence)) + : 0; + size_t const neededSpace = cctxSpace + - entropySpace + -@@ -1425,7 +1666,8 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( +- entropySpace + ++ tmpWorkSpace + + blockStateSpace + + ldmSpace + ldmSeqSpace + matchStateSize + tokenSpace + @@ -35782,7 +38306,16 @@ index 16bb995bc6c4..885167f7e47b 100644 DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace); return neededSpace; -@@ -1443,7 +1685,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) +@@ -1435,7 +1687,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) + { + ZSTD_compressionParameters const cParams = + ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); +- ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ++ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, + &cParams); + + RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); +@@ -1443,7 +1695,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) * be needed. However, we still allocate two 0-sized buffers, which can * take space under ASAN. */ return ZSTD_estimateCCtxSize_usingCCtxParams_internal( @@ -35791,7 +38324,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) -@@ -1493,7 +1735,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) +@@ -1493,18 +1745,18 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); @@ -35800,7 +38333,11 @@ index 16bb995bc6c4..885167f7e47b 100644 size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered) ? ((size_t)1 << cParams.windowLog) + blockSize : 0; -@@ -1504,7 +1746,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) + size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered) + ? ZSTD_compressBound(blockSize) + 1 + : 0; +- ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); ++ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); return ZSTD_estimateCCtxSize_usingCCtxParams_internal( &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, @@ -35809,7 +38346,16 @@ index 16bb995bc6c4..885167f7e47b 100644 } } -@@ -1637,6 +1879,19 @@ typedef enum { +@@ -1600,7 +1852,7 @@ void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) + * Invalidate all the matches in the match finder tables. + * Requires nextSrc and base to be set (can be NULL). + */ +-static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) ++static void ZSTD_invalidateMatchState(ZSTD_MatchState_t* ms) + { + ZSTD_window_clear(&ms->window); + +@@ -1637,12 +1889,25 @@ typedef enum { ZSTD_resetTarget_CCtx } ZSTD_resetTarget_e; @@ -35823,13 +38369,21 @@ index 16bb995bc6c4..885167f7e47b 100644 +} + +/* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */ -+static void ZSTD_advanceHashSalt(ZSTD_matchState_t* ms) { ++static void ZSTD_advanceHashSalt(ZSTD_MatchState_t* ms) { + ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix((U64) ms->hashSaltEntropy, 4); +} static size_t - ZSTD_reset_matchState(ZSTD_matchState_t* ms, -@@ -1664,6 +1919,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, +-ZSTD_reset_matchState(ZSTD_matchState_t* ms, ++ZSTD_reset_matchState(ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, + const ZSTD_compressionParameters* cParams, +- const ZSTD_paramSwitch_e useRowMatchFinder, ++ const ZSTD_ParamSwitch_e useRowMatchFinder, + const ZSTD_compResetPolicy_e crp, + const ZSTD_indexResetPolicy_e forceResetIndex, + const ZSTD_resetTarget_e forWho) +@@ -1664,6 +1929,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, } ms->hashLog3 = hashLog3; @@ -35837,7 +38391,7 @@ index 16bb995bc6c4..885167f7e47b 100644 ZSTD_invalidateMatchState(ms); -@@ -1685,22 +1941,19 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, +@@ -1685,22 +1951,19 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, ZSTD_cwksp_clean_tables(ws); } @@ -35866,39 +38420,51 @@ index 16bb995bc6c4..885167f7e47b 100644 + ZSTD_advanceHashSalt(ms); + } else { + /* When we are not salting we want to always memset the memory */ -+ ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned(ws, tagTableSize); ++ ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned64(ws, tagTableSize); + ZSTD_memset(ms->tagTable, 0, tagTableSize); + ms->hashSalt = 0; } { /* Switch to 32-entry rows if searchLog is 5 (or more) */ U32 const rowLog = BOUNDED(4, cParams->searchLog, 6); -@@ -1709,6 +1962,17 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, +@@ -1709,6 +1972,17 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, } } + /* opt parser space */ + if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { + DEBUGLOG(4, "reserving optimal parser space"); -+ ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned)); -+ ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned)); -+ ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned)); -+ ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t)); -+ ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t)); ++ ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxLL+1) * sizeof(unsigned)); ++ ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxML+1) * sizeof(unsigned)); ++ ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned64(ws, (MaxOff+1) * sizeof(unsigned)); ++ ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t)); ++ ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned64(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t)); + } + ms->cParams = *cParams; RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, -@@ -1768,6 +2032,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, +@@ -1754,7 +2028,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, + { + ZSTD_cwksp* const ws = &zc->workspace; + DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d", +- (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter); ++ (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->postBlockSplitter); + assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); + + zc->isFirstBlock = 1; +@@ -1766,8 +2040,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, + params = &zc->appliedParams; + assert(params->useRowMatchFinder != ZSTD_ps_auto); - assert(params->useBlockSplitter != ZSTD_ps_auto); +- assert(params->useBlockSplitter != ZSTD_ps_auto); ++ assert(params->postBlockSplitter != ZSTD_ps_auto); assert(params->ldmParams.enableLdm != ZSTD_ps_auto); + assert(params->maxBlockSize != 0); if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* Adjust long distance matching parameters */ ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams); -@@ -1776,9 +2041,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, +@@ -1776,9 +2051,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, } { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize)); @@ -35910,7 +38476,7 @@ index 16bb995bc6c4..885167f7e47b 100644 size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered) ? ZSTD_compressBound(blockSize) + 1 : 0; -@@ -1795,8 +2059,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, +@@ -1795,8 +2069,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, size_t const neededSpace = ZSTD_estimateCCtxSize_usingCCtxParams_internal( ¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder, @@ -35920,7 +38486,7 @@ index 16bb995bc6c4..885167f7e47b 100644 FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!"); -@@ -1805,7 +2068,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, +@@ -1805,7 +2078,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, { /* Check if workspace is large enough, alloc a new one if needed */ int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace; int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); @@ -35929,7 +38495,26 @@ index 16bb995bc6c4..885167f7e47b 100644 DEBUGLOG(4, "Need %zu B workspace", neededSpace); DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); -@@ -1838,6 +2101,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, +@@ -1823,21 +2096,23 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, + + DEBUGLOG(5, "reserving object space"); + /* Statically sized space. +- * entropyWorkspace never moves, ++ * tmpWorkspace never moves, + * though prev/next block swap places */ + assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t))); + zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); + RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock"); + zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); + RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock"); +- zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE); +- RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace"); ++ zc->tmpWorkspace = ZSTD_cwksp_reserve_object(ws, TMP_WORKSPACE_SIZE); ++ RETURN_ERROR_IF(zc->tmpWorkspace == NULL, memory_allocation, "couldn't allocate tmpWorkspace"); ++ zc->tmpWkspSize = TMP_WORKSPACE_SIZE; + } } + + ZSTD_cwksp_clear(ws); /* init params */ zc->blockState.matchState.cParams = params->cParams; @@ -35937,7 +38522,16 @@ index 16bb995bc6c4..885167f7e47b 100644 zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; zc->consumedSrcSize = 0; zc->producedCSize = 0; -@@ -1854,13 +2118,46 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, +@@ -1845,7 +2120,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, + zc->appliedParams.fParams.contentSizeFlag = 0; + DEBUGLOG(4, "pledged content size : %u ; flag : %u", + (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); +- zc->blockSize = blockSize; ++ zc->blockSizeMax = blockSize; + + xxh64_reset(&zc->xxhState, 0); + zc->stage = ZSTDcs_init; +@@ -1854,13 +2129,46 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); @@ -35950,15 +38544,15 @@ index 16bb995bc6c4..885167f7e47b 100644 + needsIndexReset, + ZSTD_resetTarget_CCtx), ""); + -+ zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef)); ++ zc->seqStore.sequencesStart = (SeqDef*)ZSTD_cwksp_reserve_aligned64(ws, maxNbSeq * sizeof(SeqDef)); + + /* ldm hash table */ + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { + /* TODO: avoid memset? */ + size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; -+ zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); ++ zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned64(ws, ldmHSize * sizeof(ldmEntry_t)); + ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); -+ zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq)); ++ zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned64(ws, maxNbLdmSeq * sizeof(rawSeq)); + zc->maxNbLdmSequences = maxNbLdmSeq; + + ZSTD_window_init(&zc->ldmState.window); @@ -35970,7 +38564,7 @@ index 16bb995bc6c4..885167f7e47b 100644 + size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize); + zc->extSeqBufCapacity = maxNbExternalSeq; + zc->extSeqBuf = -+ (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence)); ++ (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned64(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence)); + } + + /* buffers */ @@ -35985,7 +38579,7 @@ index 16bb995bc6c4..885167f7e47b 100644 zc->bufferedPolicy = zbuff; zc->inBuffSize = buffInSize; zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); -@@ -1883,32 +2180,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, +@@ -1883,32 +2191,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); @@ -36019,7 +38613,7 @@ index 16bb995bc6c4..885167f7e47b 100644 zc->initialized = 1; -@@ -1980,7 +2254,8 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, +@@ -1980,7 +2265,8 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, } params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize, @@ -36029,7 +38623,7 @@ index 16bb995bc6c4..885167f7e47b 100644 params.cParams.windowLog = windowLog; params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */ FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize, -@@ -2019,6 +2294,22 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, +@@ -2019,6 +2305,22 @@ ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, return 0; } @@ -36052,7 +38646,7 @@ index 16bb995bc6c4..885167f7e47b 100644 static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, -@@ -2054,21 +2345,23 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, +@@ -2054,26 +2356,29 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, : 0; size_t const hSize = (size_t)1 << cdict_cParams->hashLog; @@ -36085,24 +38679,89 @@ index 16bb995bc6c4..885167f7e47b 100644 } } -@@ -2147,6 +2440,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, - params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter; + /* Zero the hashTable3, since the cdict never fills it */ +- { int const h3log = cctx->blockState.matchState.hashLog3; ++ assert(cctx->blockState.matchState.hashLog3 <= 31); ++ { U32 const h3log = cctx->blockState.matchState.hashLog3; + size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; + assert(cdict->matchState.hashLog3 == 0); + ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); +@@ -2082,8 +2387,8 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, + ZSTD_cwksp_mark_tables_clean(&cctx->workspace); + + /* copy dictionary offsets */ +- { ZSTD_matchState_t const* srcMatchState = &cdict->matchState; +- ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; ++ { ZSTD_MatchState_t const* srcMatchState = &cdict->matchState; ++ ZSTD_MatchState_t* dstMatchState = &cctx->blockState.matchState; + dstMatchState->window = srcMatchState->window; + dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; + dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; +@@ -2141,12 +2446,13 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, + /* Copy only compression parameters related to tables. */ + params.cParams = srcCCtx->appliedParams.cParams; + assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto); +- assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto); ++ assert(srcCCtx->appliedParams.postBlockSplitter != ZSTD_ps_auto); + assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto); + params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; +- params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter; ++ params.postBlockSplitter = srcCCtx->appliedParams.postBlockSplitter; params.ldmParams = srcCCtx->appliedParams.ldmParams; params.fParams = fParams; + params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize; ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize, /* loadedDictSize */ 0, ZSTDcrp_leaveDirty, zbuff); -@@ -2294,7 +2588,7 @@ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* par +@@ -2166,7 +2472,7 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, + ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog) + : 0; + size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; +- int const h3log = srcCCtx->blockState.matchState.hashLog3; ++ U32 const h3log = srcCCtx->blockState.matchState.hashLog3; + size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; + + ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable, +@@ -2184,8 +2490,8 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, + + /* copy dictionary offsets */ + { +- const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState; +- ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; ++ const ZSTD_MatchState_t* srcMatchState = &srcCCtx->blockState.matchState; ++ ZSTD_MatchState_t* dstMatchState = &dstCCtx->blockState.matchState; + dstMatchState->window = srcMatchState->window; + dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; + dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; +@@ -2234,7 +2540,7 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa + /* Protect special index values < ZSTD_WINDOW_START_INDEX. */ + U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX; + assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ +- assert(size < (1U<<31)); /* can be casted to int */ ++ assert(size < (1U<<31)); /* can be cast to int */ + + + for (rowNb=0 ; rowNb < nbRows ; rowNb++) { +@@ -2267,7 +2573,7 @@ static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const + + /*! ZSTD_reduceIndex() : + * rescale all indexes to avoid future overflow (indexes are U32) */ +-static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue) ++static void ZSTD_reduceIndex (ZSTD_MatchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue) + { + { U32 const hSize = (U32)1 << params->cParams.hashLog; + ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); +@@ -2294,26 +2600,32 @@ static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* par /* See doc/zstd_compression_format.md for detailed format description */ -void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) -+int ZSTD_seqToCodes(const seqStore_t* seqStorePtr) ++int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr) { - const seqDef* const sequences = seqStorePtr->sequencesStart; +- const seqDef* const sequences = seqStorePtr->sequencesStart; ++ const SeqDef* const sequences = seqStorePtr->sequencesStart; BYTE* const llCodeTable = seqStorePtr->llCode; -@@ -2302,18 +2596,24 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) + BYTE* const ofCodeTable = seqStorePtr->ofCode; BYTE* const mlCodeTable = seqStorePtr->mlCode; U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); U32 u; @@ -36128,7 +38787,20 @@ index 16bb995bc6c4..885167f7e47b 100644 } /* ZSTD_useTargetCBlockSize(): -@@ -2347,6 +2647,7 @@ typedef struct { +@@ -2333,9 +2645,9 @@ static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams) + * Returns 1 if true, 0 otherwise. */ + static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams) + { +- DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter); +- assert(cctxParams->useBlockSplitter != ZSTD_ps_auto); +- return (cctxParams->useBlockSplitter == ZSTD_ps_enable); ++ DEBUGLOG(5, "ZSTD_blockSplitterEnabled (postBlockSplitter=%d)", cctxParams->postBlockSplitter); ++ assert(cctxParams->postBlockSplitter != ZSTD_ps_auto); ++ return (cctxParams->postBlockSplitter == ZSTD_ps_enable); + } + + /* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types +@@ -2347,6 +2659,7 @@ typedef struct { U32 MLtype; size_t size; size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ @@ -36136,7 +38808,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } ZSTD_symbolEncodingTypeStats_t; /* ZSTD_buildSequencesStatistics(): -@@ -2357,11 +2658,13 @@ typedef struct { +@@ -2357,11 +2670,13 @@ typedef struct { * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32) */ static ZSTD_symbolEncodingTypeStats_t @@ -36146,7 +38818,7 @@ index 16bb995bc6c4..885167f7e47b 100644 - ZSTD_strategy strategy, unsigned* countWorkspace, - void* entropyWorkspace, size_t entropyWkspSize) { +ZSTD_buildSequencesStatistics( -+ const seqStore_t* seqStorePtr, size_t nbSeq, ++ const SeqStore_t* seqStorePtr, size_t nbSeq, + const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy, + BYTE* dst, const BYTE* const dstEnd, + ZSTD_strategy strategy, unsigned* countWorkspace, @@ -36155,7 +38827,7 @@ index 16bb995bc6c4..885167f7e47b 100644 BYTE* const ostart = dst; const BYTE* const oend = dstEnd; BYTE* op = ostart; -@@ -2375,7 +2678,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, +@@ -2375,7 +2690,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, stats.lastCountSize = 0; /* convert length/distances into codes */ @@ -36164,7 +38836,43 @@ index 16bb995bc6c4..885167f7e47b 100644 assert(op <= oend); assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */ /* build CTable for Literal Lengths */ -@@ -2480,22 +2783,22 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, +@@ -2392,7 +2707,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, + assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable( + op, (size_t)(oend - op), +- CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype, ++ CTable_LitLength, LLFSELog, (SymbolEncodingType_e)stats.LLtype, + countWorkspace, max, llCodeTable, nbSeq, + LL_defaultNorm, LL_defaultNormLog, MaxLL, + prevEntropy->litlengthCTable, +@@ -2413,7 +2728,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, + size_t const mostFrequent = HIST_countFast_wksp( + countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ + /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ +- ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; ++ ZSTD_DefaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; + DEBUGLOG(5, "Building OF table"); + nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; + stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, +@@ -2424,7 +2739,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, + assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable( + op, (size_t)(oend - op), +- CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype, ++ CTable_OffsetBits, OffFSELog, (SymbolEncodingType_e)stats.Offtype, + countWorkspace, max, ofCodeTable, nbSeq, + OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, + prevEntropy->offcodeCTable, +@@ -2454,7 +2769,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, + assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable( + op, (size_t)(oend - op), +- CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype, ++ CTable_MatchLength, MLFSELog, (SymbolEncodingType_e)stats.MLtype, + countWorkspace, max, mlCodeTable, nbSeq, + ML_defaultNorm, ML_defaultNormLog, MaxML, + prevEntropy->matchlengthCTable, +@@ -2480,22 +2795,23 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, */ #define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20 MEM_STATIC size_t @@ -36176,11 +38884,12 @@ index 16bb995bc6c4..885167f7e47b 100644 - void* entropyWorkspace, size_t entropyWkspSize, - const int bmi2) +ZSTD_entropyCompressSeqStore_internal( -+ const seqStore_t* seqStorePtr, ++ void* dst, size_t dstCapacity, ++ const void* literals, size_t litSize, ++ const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, -+ void* dst, size_t dstCapacity, + void* entropyWorkspace, size_t entropyWkspSize, + const int bmi2) { @@ -36190,13 +38899,14 @@ index 16bb995bc6c4..885167f7e47b 100644 FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; - const seqDef* const sequences = seqStorePtr->sequencesStart; +- const seqDef* const sequences = seqStorePtr->sequencesStart; - const size_t nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; ++ const SeqDef* const sequences = seqStorePtr->sequencesStart; + const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); const BYTE* const ofCodeTable = seqStorePtr->ofCode; const BYTE* const llCodeTable = seqStorePtr->llCode; const BYTE* const mlCodeTable = seqStorePtr->mlCode; -@@ -2503,29 +2806,31 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, +@@ -2503,29 +2819,28 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, BYTE* const oend = ostart + dstCapacity; BYTE* op = ostart; size_t lastCountSize; @@ -36211,14 +38921,14 @@ index 16bb995bc6c4..885167f7e47b 100644 assert(entropyWkspSize >= HUF_WORKSPACE_SIZE); /* Compress literals */ - { const BYTE* const literals = seqStorePtr->litStart; +- { const BYTE* const literals = seqStorePtr->litStart; - size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart; - size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart; -+ size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); -+ size_t const numLiterals = (size_t)(seqStorePtr->lit - seqStorePtr->litStart); ++ { size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); /* Base suspicion of uncompressibility on ratio of literals to sequences */ - unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); - size_t const litSize = (size_t)(seqStorePtr->lit - literals); +- unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); +- size_t const litSize = (size_t)(seqStorePtr->lit - literals); ++ int const suspectUncompressible = (numSequences == 0) || (litSize / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); + size_t const cSize = ZSTD_compressLiterals( - &prevEntropy->huf, &nextEntropy->huf, @@ -36235,7 +38945,7 @@ index 16bb995bc6c4..885167f7e47b 100644 FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); assert(cSize <= dstCapacity); op += cSize; -@@ -2551,11 +2856,10 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, +@@ -2551,11 +2866,10 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); return (size_t)(op - ostart); } @@ -36250,7 +38960,7 @@ index 16bb995bc6c4..885167f7e47b 100644 &prevEntropy->fse, &nextEntropy->fse, op, oend, strategy, count, -@@ -2564,6 +2868,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, +@@ -2564,6 +2878,7 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2)); lastCountSize = stats.lastCountSize; op += stats.size; @@ -36258,10 +38968,11 @@ index 16bb995bc6c4..885167f7e47b 100644 } { size_t const bitstreamSize = ZSTD_encodeSequences( -@@ -2598,14 +2903,15 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, +@@ -2597,104 +2912,146 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, + return (size_t)(op - ostart); } - MEM_STATIC size_t +-MEM_STATIC size_t -ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr, - const ZSTD_entropyCTables_t* prevEntropy, - ZSTD_entropyCTables_t* nextEntropy, @@ -36270,31 +38981,38 @@ index 16bb995bc6c4..885167f7e47b 100644 - size_t srcSize, - void* entropyWorkspace, size_t entropyWkspSize, - int bmi2) -+ZSTD_entropyCompressSeqStore( -+ const seqStore_t* seqStorePtr, ++static size_t ++ZSTD_entropyCompressSeqStore_wExtLitBuffer( ++ void* dst, size_t dstCapacity, ++ const void* literals, size_t litSize, ++ size_t blockSize, ++ const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, -+ void* dst, size_t dstCapacity, -+ size_t srcSize, + void* entropyWorkspace, size_t entropyWkspSize, + int bmi2) { size_t const cSize = ZSTD_entropyCompressSeqStore_internal( - seqStorePtr, prevEntropy, nextEntropy, cctxParams, -@@ -2615,15 +2921,21 @@ ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr, +- seqStorePtr, prevEntropy, nextEntropy, cctxParams, + dst, dstCapacity, ++ literals, litSize, ++ seqStorePtr, prevEntropy, nextEntropy, cctxParams, + entropyWorkspace, entropyWkspSize, bmi2); + if (cSize == 0) return 0; /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. */ - if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) -+ if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) { ++ if ((cSize == ERROR(dstSize_tooSmall)) & (blockSize <= dstCapacity)) { + DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity); return 0; /* block not compressed */ + } FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed"); /* Check compressibility */ - { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); +- { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); ++ { size_t const maxCSize = blockSize - ZSTD_minGain(blockSize, cctxParams->cParams.strategy); if (cSize >= maxCSize) return 0; /* block not compressed */ } - DEBUGLOG(4, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize); @@ -36306,8 +39024,36 @@ index 16bb995bc6c4..885167f7e47b 100644 return cSize; } -@@ -2635,40 +2947,43 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS - static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { ++static size_t ++ZSTD_entropyCompressSeqStore( ++ const SeqStore_t* seqStorePtr, ++ const ZSTD_entropyCTables_t* prevEntropy, ++ ZSTD_entropyCTables_t* nextEntropy, ++ const ZSTD_CCtx_params* cctxParams, ++ void* dst, size_t dstCapacity, ++ size_t srcSize, ++ void* entropyWorkspace, size_t entropyWkspSize, ++ int bmi2) ++{ ++ return ZSTD_entropyCompressSeqStore_wExtLitBuffer( ++ dst, dstCapacity, ++ seqStorePtr->litStart, (size_t)(seqStorePtr->lit - seqStorePtr->litStart), ++ srcSize, ++ seqStorePtr, ++ prevEntropy, nextEntropy, ++ cctxParams, ++ entropyWorkspace, entropyWkspSize, ++ bmi2); ++} ++ + /* ZSTD_selectBlockCompressor() : + * Not static, but internal use only (used by long distance matcher) + * assumption : strat is a valid strategy */ +-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) ++ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) + { +- static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { ++ static const ZSTD_BlockCompressor_f blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { { ZSTD_compressBlock_fast /* default for 0 */, ZSTD_compressBlock_fast, - ZSTD_compressBlock_doubleFast, @@ -36377,10 +39123,18 @@ index 16bb995bc6c4..885167f7e47b 100644 NULL, NULL, NULL, -@@ -2681,18 +2996,26 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS - DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder); + NULL } + }; +- ZSTD_blockCompressor selectedCompressor; ++ ZSTD_BlockCompressor_f selectedCompressor; + ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); + +- assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); +- DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder); ++ assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat)); ++ DEBUGLOG(5, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder); if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) { - static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = { +- static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = { - { ZSTD_compressBlock_greedy_row, - ZSTD_compressBlock_lazy_row, - ZSTD_compressBlock_lazy2_row }, @@ -36393,6 +39147,7 @@ index 16bb995bc6c4..885167f7e47b 100644 - { ZSTD_compressBlock_greedy_dedicatedDictSearch_row, - ZSTD_compressBlock_lazy_dedicatedDictSearch_row, - ZSTD_compressBlock_lazy2_dedicatedDictSearch_row } ++ static const ZSTD_BlockCompressor_f rowBasedBlockCompressors[4][3] = { + { + ZSTD_COMPRESSBLOCK_GREEDY_ROW, + ZSTD_COMPRESSBLOCK_LAZY_ROW, @@ -36414,12 +39169,32 @@ index 16bb995bc6c4..885167f7e47b 100644 + ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW + } }; - DEBUGLOG(4, "Selecting a row-based matchfinder"); +- DEBUGLOG(4, "Selecting a row-based matchfinder"); ++ DEBUGLOG(5, "Selecting a row-based matchfinder"); assert(useRowMatchFinder != ZSTD_ps_auto); -@@ -2718,6 +3041,72 @@ void ZSTD_resetSeqStore(seqStore_t* ssPtr) + selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy]; + } else { +@@ -2704,30 +3061,126 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS + return selectedCompressor; + } + +-static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, ++static void ZSTD_storeLastLiterals(SeqStore_t* seqStorePtr, + const BYTE* anchor, size_t lastLLSize) + { + ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } + +-void ZSTD_resetSeqStore(seqStore_t* ssPtr) ++void ZSTD_resetSeqStore(SeqStore_t* ssPtr) + { + ssPtr->lit = ssPtr->litStart; + ssPtr->sequences = ssPtr->sequencesStart; ssPtr->longLengthType = ZSTD_llt_none; } +-typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; +/* ZSTD_postProcessSequenceProducerResult() : + * Validates and post-processes sequences obtained through the external matchfinder API: + * - Checks whether nbExternalSeqs represents an error condition. @@ -36486,10 +39261,41 @@ index 16bb995bc6c4..885167f7e47b 100644 + return litLenSum + matchLenSum; +} + - typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; ++/* ++ * Function to validate sequences produced by a block compressor. ++ */ ++static void ZSTD_validateSeqStore(const SeqStore_t* seqStore, const ZSTD_compressionParameters* cParams) ++{ ++#if DEBUGLEVEL >= 1 ++ const SeqDef* seq = seqStore->sequencesStart; ++ const SeqDef* const seqEnd = seqStore->sequences; ++ size_t const matchLenLowerBound = cParams->minMatch == 3 ? 3 : 4; ++ for (; seq < seqEnd; ++seq) { ++ const ZSTD_SequenceLength seqLength = ZSTD_getSequenceLength(seqStore, seq); ++ assert(seqLength.matchLength >= matchLenLowerBound); ++ (void)seqLength; ++ (void)matchLenLowerBound; ++ } ++#else ++ (void)seqStore; ++ (void)cParams; ++#endif ++} ++ ++static size_t ++ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx, ++ ZSTD_SequencePosition* seqPos, ++ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, ++ const void* src, size_t blockSize, ++ ZSTD_ParamSwitch_e externalRepSearch); ++ ++typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_BuildSeqStore_e; static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) -@@ -2727,7 +3116,9 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) + { +- ZSTD_matchState_t* const ms = &zc->blockState.matchState; ++ ZSTD_MatchState_t* const ms = &zc->blockState.matchState; + DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize); assert(srcSize <= ZSTD_BLOCKSIZE_MAX); /* Assert that we have correctly flushed the ctx params into the ms's copy */ ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); @@ -36500,7 +39306,7 @@ index 16bb995bc6c4..885167f7e47b 100644 if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) { ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize); } else { -@@ -2763,6 +3154,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) +@@ -2763,6 +3216,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) } if (zc->externSeqStore.pos < zc->externSeqStore.size) { assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable); @@ -36516,10 +39322,13 @@ index 16bb995bc6c4..885167f7e47b 100644 /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, -@@ -2774,6 +3174,14 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) +@@ -2772,7 +3234,15 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) + src, srcSize); + assert(zc->externSeqStore.pos <= zc->externSeqStore.size); } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { - rawSeqStore_t ldmSeqStore = kNullRawSeqStore; - +- rawSeqStore_t ldmSeqStore = kNullRawSeqStore; ++ RawSeqStore_t ldmSeqStore = kNullRawSeqStore; ++ + /* External matchfinder + LDM is technically possible, just not implemented yet. + * We need to revisit soon and implement it. */ + RETURN_ERROR_IF( @@ -36527,11 +39336,10 @@ index 16bb995bc6c4..885167f7e47b 100644 + parameter_combination_unsupported, + "Long-distance matching with external sequence producer enabled is not currently supported." + ); -+ + ldmSeqStore.seq = zc->ldmSequences; ldmSeqStore.capacity = zc->maxNbLdmSequences; - /* Updates ldmSeqStore.size */ -@@ -2788,10 +3196,74 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) +@@ -2788,42 +3258,116 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) zc->appliedParams.useRowMatchFinder, src, srcSize); assert(ldmSeqStore.pos == ldmSeqStore.size); @@ -36566,11 +39374,11 @@ index 16bb995bc6c4..885167f7e47b 100644 + + /* Return early if there is no error, since we don't need to worry about last literals */ + if (!ZSTD_isError(nbPostProcessedSeqs)) { -+ ZSTD_sequencePosition seqPos = {0,0,0}; ++ ZSTD_SequencePosition seqPos = {0,0,0}; + size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs); + RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!"); + FORWARD_IF_ERROR( -+ ZSTD_copySequencesToSeqStoreExplicitBlockDelim( ++ ZSTD_transferSequences_wBlockDelim( + zc, &seqPos, + zc->extSeqBuf, nbPostProcessedSeqs, + src, srcSize, @@ -36589,7 +39397,7 @@ index 16bb995bc6c4..885167f7e47b 100644 + } + + /* Fallback to software matchfinder */ -+ { ZSTD_blockCompressor const blockCompressor = ++ { ZSTD_BlockCompressor_f const blockCompressor = + ZSTD_selectBlockCompressor( + zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, @@ -36603,19 +39411,22 @@ index 16bb995bc6c4..885167f7e47b 100644 + lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); + } } + } else { /* not long range mode and no external matchfinder */ -+ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor( ++ ZSTD_BlockCompressor_f const blockCompressor = ZSTD_selectBlockCompressor( + zc->appliedParams.cParams.strategy, + zc->appliedParams.useRowMatchFinder, + dictMode); ms->ldmSeqStore = NULL; lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); } -@@ -2801,29 +3273,38 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) + { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize; + ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); + } } ++ ZSTD_validateSeqStore(&zc->seqStore, &zc->appliedParams.cParams); return ZSTDbss_compress; } -static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) -+static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const seqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM]) ++static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const SeqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM]) { - const seqStore_t* seqStore = ZSTD_getSeqStore(zc); - const seqDef* seqStoreSeqs = seqStore->sequencesStart; @@ -36623,18 +39434,18 @@ index 16bb995bc6c4..885167f7e47b 100644 - size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart); - size_t literalsRead = 0; - size_t lastLLSize; -+ const seqDef* inSeqs = seqStore->sequencesStart; -+ const size_t nbInSequences = seqStore->sequences - inSeqs; ++ const SeqDef* inSeqs = seqStore->sequencesStart; ++ const size_t nbInSequences = (size_t)(seqStore->sequences - inSeqs); + const size_t nbInLiterals = (size_t)(seqStore->lit - seqStore->litStart); - ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex]; + ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex; + const size_t nbOutSequences = nbInSequences + 1; + size_t nbOutLiterals = 0; -+ repcodes_t repcodes; ++ Repcodes_t repcodes; size_t i; - repcodes_t updatedRepcodes; - +- - assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences); - /* Ensure we have enough space for last literals "sequence" */ - assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1); @@ -36643,6 +39454,7 @@ index 16bb995bc6c4..885167f7e47b 100644 - U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM; - outSeqs[i].litLength = seqStoreSeqs[i].litLength; - outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH; ++ + /* Bounds check that we have enough space for every input sequence + * and the block delimiter + */ @@ -36666,7 +39478,7 @@ index 16bb995bc6c4..885167f7e47b 100644 if (i == seqStore->longLengthPos) { if (seqStore->longLengthType == ZSTD_llt_literalLength) { outSeqs[i].litLength += 0x10000; -@@ -2832,37 +3313,55 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) +@@ -2832,46 +3376,75 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) } } @@ -36741,9 +39553,11 @@ index 16bb995bc6c4..885167f7e47b 100644 } size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, -@@ -2871,6 +3370,16 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, + size_t outSeqsSize, const void* src, size_t srcSize) + { const size_t dstCapacity = ZSTD_compressBound(srcSize); - void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); +- void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); ++ void* dst; /* Make C90 happy. */ SeqCollector seqCollector; + { + int targetCBlockSize; @@ -36756,9 +39570,11 @@ index 16bb995bc6c4..885167f7e47b 100644 + RETURN_ERROR_IF(nbWorkers != 0, parameter_unsupported, "nbWorkers != 0"); + } ++ dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem); RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!"); -@@ -2880,8 +3389,12 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, + seqCollector.collectSequences = 1; +@@ -2880,8 +3453,12 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, seqCollector.maxSequences = outSeqsSize; zc->seqCollector = seqCollector; @@ -36773,7 +39589,7 @@ index 16bb995bc6c4..885167f7e47b 100644 return zc->seqCollector.seqIndex; } -@@ -2910,19 +3423,17 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) { +@@ -2910,19 +3487,17 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) { const size_t unrollMask = unrollSize - 1; const size_t prefixLength = length & unrollMask; size_t i; @@ -36795,7 +39611,16 @@ index 16bb995bc6c4..885167f7e47b 100644 return 1; } -@@ -2938,7 +3449,8 @@ static int ZSTD_maybeRLE(seqStore_t const* seqStore) +@@ -2930,7 +3505,7 @@ static int ZSTD_isRLE(const BYTE* src, size_t length) { + * This is just a heuristic based on the compressibility. + * It may return both false positives and false negatives. + */ +-static int ZSTD_maybeRLE(seqStore_t const* seqStore) ++static int ZSTD_maybeRLE(SeqStore_t const* seqStore) + { + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); + size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart); +@@ -2938,7 +3513,8 @@ static int ZSTD_maybeRLE(seqStore_t const* seqStore) return nbSeqs < 4 && nbLits < 10; } @@ -36805,7 +39630,7 @@ index 16bb995bc6c4..885167f7e47b 100644 { ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock; bs->prevCBlock = bs->nextCBlock; -@@ -2946,7 +3458,9 @@ static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* c +@@ -2946,12 +3522,14 @@ static void ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* c } /* Writes the block header */ @@ -36816,7 +39641,13 @@ index 16bb995bc6c4..885167f7e47b 100644 U32 const cBlockHeader = cSize == 1 ? lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); -@@ -2959,13 +3473,16 @@ static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastB + MEM_writeLE24(op, cBlockHeader); +- DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock); ++ DEBUGLOG(5, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock); + } + + /* ZSTD_buildBlockEntropyStats_literals() : +@@ -2959,13 +3537,16 @@ static void writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastB * Stores literals block type (raw, rle, compressed, repeat) and * huffman description table to hufMetadata. * Requires ENTROPY_WORKSPACE_SIZE workspace @@ -36840,7 +39671,7 @@ index 16bb995bc6c4..885167f7e47b 100644 { BYTE* const wkspStart = (BYTE*)workspace; BYTE* const wkspEnd = wkspStart + wkspSize; -@@ -2973,9 +3490,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi +@@ -2973,9 +3554,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi unsigned* const countWksp = (unsigned*)workspace; const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned); BYTE* const nodeWksp = countWkspStart + countWkspSize; @@ -36852,7 +39683,7 @@ index 16bb995bc6c4..885167f7e47b 100644 HUF_repeat repeat = prevHuf->repeatMode; DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize); -@@ -2990,73 +3507,77 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi +@@ -2990,73 +3571,77 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi /* small ? don't even attempt compression (speed opt) */ #ifndef COMPRESS_LITERALS_SIZE_MIN @@ -36950,13 +39781,14 @@ index 16bb995bc6c4..885167f7e47b 100644 - hufMetadata->hType = set_compressed; - nextHuf->repeatMode = HUF_repeat_check; - return hSize; +- } + } } + if (newCSize + hSize >= srcSize) { + DEBUGLOG(5, "set_basic - no gains"); + ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + hufMetadata->hType = set_basic; + return 0; - } ++ } + DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); + hufMetadata->hType = set_compressed; + nextHuf->repeatMode = HUF_repeat_check; @@ -36964,7 +39796,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } } -@@ -3066,8 +3587,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi +@@ -3066,8 +3651,9 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi * and updates nextEntropy to the appropriate repeatMode. */ static ZSTD_symbolEncodingTypeStats_t @@ -36976,7 +39808,7 @@ index 16bb995bc6c4..885167f7e47b 100644 nextEntropy->litlength_repeatMode = FSE_repeat_none; nextEntropy->offcode_repeatMode = FSE_repeat_none; nextEntropy->matchlength_repeatMode = FSE_repeat_none; -@@ -3078,16 +3600,18 @@ ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) { +@@ -3078,16 +3664,18 @@ ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy) { * Builds entropy for the sequences. * Stores symbol compression modes and fse table to fseMetadata. * Requires ENTROPY_WORKSPACE_SIZE wksp. @@ -36990,7 +39822,7 @@ index 16bb995bc6c4..885167f7e47b 100644 + * @return : size of fse tables or error code */ +static size_t +ZSTD_buildBlockEntropyStats_sequences( -+ const seqStore_t* seqStorePtr, ++ const SeqStore_t* seqStorePtr, + const ZSTD_fseCTables_t* prevEntropy, + ZSTD_fseCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, @@ -37003,7 +39835,20 @@ index 16bb995bc6c4..885167f7e47b 100644 BYTE* const ostart = fseMetadata->fseTablesBuffer; BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); BYTE* op = ostart; -@@ -3114,23 +3638,28 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr, +@@ -3103,9 +3691,9 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr, + entropyWorkspace, entropyWorkspaceSize) + : ZSTD_buildDummySequencesStatistics(nextEntropy); + FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!"); +- fseMetadata->llType = (symbolEncodingType_e) stats.LLtype; +- fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype; +- fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype; ++ fseMetadata->llType = (SymbolEncodingType_e) stats.LLtype; ++ fseMetadata->ofType = (SymbolEncodingType_e) stats.Offtype; ++ fseMetadata->mlType = (SymbolEncodingType_e) stats.MLtype; + fseMetadata->lastCountSize = stats.lastCountSize; + return stats.size; + } +@@ -3114,23 +3702,28 @@ static size_t ZSTD_buildBlockEntropyStats_sequences(seqStore_t* seqStorePtr, /* ZSTD_buildBlockEntropyStats() : * Builds entropy for the block. * Requires workspace size ENTROPY_WORKSPACE_SIZE @@ -37021,7 +39866,7 @@ index 16bb995bc6c4..885167f7e47b 100644 -{ - size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart; +size_t ZSTD_buildBlockEntropyStats( -+ const seqStore_t* seqStorePtr, ++ const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, @@ -37043,7 +39888,7 @@ index 16bb995bc6c4..885167f7e47b 100644 FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed"); entropyMetadata->fseMetadata.fseTablesSize = ZSTD_buildBlockEntropyStats_sequences(seqStorePtr, -@@ -3143,11 +3672,12 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, +@@ -3143,11 +3736,12 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, } /* Returns the size estimate for the literals section (header + content) of a block */ @@ -37061,7 +39906,7 @@ index 16bb995bc6c4..885167f7e47b 100644 { unsigned* const countWksp = (unsigned*)workspace; unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX; -@@ -3169,12 +3699,13 @@ static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSiz +@@ -3169,12 +3763,13 @@ static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSiz } /* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */ @@ -37072,7 +39917,7 @@ index 16bb995bc6c4..885167f7e47b 100644 - short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, - void* workspace, size_t wkspSize) +static size_t -+ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, ++ZSTD_estimateBlockSize_symbolType(SymbolEncodingType_e type, + const BYTE* codeTable, size_t nbSeq, unsigned maxCode, + const FSE_CTable* fseCTable, + const U8* additionalBits, @@ -37081,7 +39926,7 @@ index 16bb995bc6c4..885167f7e47b 100644 { unsigned* const countWksp = (unsigned*)workspace; const BYTE* ctp = codeTable; -@@ -3206,99 +3737,107 @@ static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, +@@ -3206,116 +3801,121 @@ static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, } /* Returns the size estimate for the sequences section (header + content) of a block */ @@ -37172,7 +40017,7 @@ index 16bb995bc6c4..885167f7e47b 100644 -static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) { - ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; +static size_t -+ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) ++ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(SeqStore_t* seqStore, ZSTD_CCtx* zc) +{ + ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata; DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()"); @@ -37183,7 +40028,7 @@ index 16bb995bc6c4..885167f7e47b 100644 entropyMetadata, - zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); - return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), -+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE), ""); ++ zc->tmpWorkspace, zc->tmpWkspSize), ""); + return ZSTD_estimateBlockSize( + seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), seqStore->ofCode, seqStore->llCode, seqStore->mlCode, @@ -37191,13 +40036,13 @@ index 16bb995bc6c4..885167f7e47b 100644 - &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, + &zc->blockState.nextCBlock->entropy, + entropyMetadata, -+ zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, ++ zc->tmpWorkspace, zc->tmpWkspSize, (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1); } /* Returns literals bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) { -+static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore) ++static size_t ZSTD_countSeqStoreLiteralsBytes(const SeqStore_t* const seqStore) +{ size_t literalsBytes = 0; - size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; @@ -37205,7 +40050,7 @@ index 16bb995bc6c4..885167f7e47b 100644 size_t i; for (i = 0; i < nbSeqs; ++i) { - seqDef seq = seqStore->sequencesStart[i]; -+ seqDef const seq = seqStore->sequencesStart[i]; ++ SeqDef const seq = seqStore->sequencesStart[i]; literalsBytes += seq.litLength; if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) { literalsBytes += 0x10000; @@ -37217,14 +40062,15 @@ index 16bb995bc6c4..885167f7e47b 100644 /* Returns match bytes represented in a seqStore */ -static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) { -+static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) ++static size_t ZSTD_countSeqStoreMatchBytes(const SeqStore_t* const seqStore) +{ size_t matchBytes = 0; - size_t const nbSeqs = seqStore->sequences - seqStore->sequencesStart; + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); size_t i; for (i = 0; i < nbSeqs; ++i) { - seqDef seq = seqStore->sequencesStart[i]; +- seqDef seq = seqStore->sequencesStart[i]; ++ SeqDef seq = seqStore->sequencesStart[i]; matchBytes += seq.mlBase + MINMATCH; if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) { matchBytes += 0x10000; @@ -37234,15 +40080,18 @@ index 16bb995bc6c4..885167f7e47b 100644 return matchBytes; } -@@ -3307,15 +3846,12 @@ static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) { + /* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx). + * Stores the result in resultSeqStore. */ - static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, - const seqStore_t* originalSeqStore, +-static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, +- const seqStore_t* originalSeqStore, - size_t startIdx, size_t endIdx) { - BYTE* const litEnd = originalSeqStore->lit; - size_t literalsBytes; - size_t literalsBytesPreceding = 0; - ++static void ZSTD_deriveSeqStoreChunk(SeqStore_t* resultSeqStore, ++ const SeqStore_t* originalSeqStore, + size_t startIdx, size_t endIdx) +{ *resultSeqStore = *originalSeqStore; @@ -37253,7 +40102,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } /* Move longLengthPos into the correct position if necessary */ -@@ -3328,13 +3864,12 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, +@@ -3328,13 +3928,12 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, } resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx; resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx; @@ -37270,7 +40119,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } resultSeqStore->llCode += startIdx; resultSeqStore->mlCode += startIdx; -@@ -3342,20 +3877,26 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, +@@ -3342,20 +3941,26 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, } /* @@ -37307,26 +40156,27 @@ index 16bb995bc6c4..885167f7e47b 100644 } /* -@@ -3371,30 +3912,33 @@ ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, c +@@ -3371,30 +3976,33 @@ ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, c * 1-3 : repcode 1-3 * 4+ : real_offset+3 */ -static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes, - seqStore_t* const seqStore, U32 const nbSeq) { +static void -+ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes, -+ const seqStore_t* const seqStore, U32 const nbSeq) ++ZSTD_seqStore_resolveOffCodes(Repcodes_t* const dRepcodes, Repcodes_t* const cRepcodes, ++ const SeqStore_t* const seqStore, U32 const nbSeq) +{ U32 idx = 0; + U32 const longLitLenIdx = seqStore->longLengthType == ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq; for (; idx < nbSeq; ++idx) { - seqDef* const seq = seqStore->sequencesStart + idx; +- seqDef* const seq = seqStore->sequencesStart + idx; - U32 const ll0 = (seq->litLength == 0); - U32 const offCode = OFFBASE_TO_STORED(seq->offBase); - assert(seq->offBase > 0); - if (STORED_IS_REPCODE(offCode)) { - U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0); - U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0); ++ SeqDef* const seq = seqStore->sequencesStart + idx; + U32 const ll0 = (seq->litLength == 0) && (idx != longLitLenIdx); + U32 const offBase = seq->offBase; + assert(offBase > 0); @@ -37352,21 +40202,40 @@ index 16bb995bc6c4..885167f7e47b 100644 } } -@@ -3404,10 +3948,11 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_ +@@ -3404,10 +4012,11 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_ * Returns the total size of that block (including header) or a ZSTD error code. */ static size_t -ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, +- repcodes_t* const dRep, repcodes_t* const cRep, +ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, -+ const seqStore_t* const seqStore, - repcodes_t* const dRep, repcodes_t* const cRep, ++ const SeqStore_t* const seqStore, ++ Repcodes_t* const dRep, Repcodes_t* const cRep, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, + const void* src, size_t srcSize, U32 lastBlock, U32 isPartition) { const U32 rleMaxLength = 25; -@@ -3442,8 +3987,9 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, +@@ -3417,7 +4026,7 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, + size_t cSeqsSize; + + /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ +- repcodes_t const dRepOriginal = *dRep; ++ Repcodes_t const dRepOriginal = *dRep; + DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock"); + if (isPartition) + ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart)); +@@ -3428,7 +4037,7 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, + &zc->appliedParams, + op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, + srcSize, +- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, ++ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */, + zc->bmi2); + FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!"); + +@@ -3442,8 +4051,9 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, cSeqsSize = 1; } @@ -37377,7 +40246,29 @@ index 16bb995bc6c4..885167f7e47b 100644 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); return 0; } -@@ -3481,45 +4027,49 @@ typedef struct { +@@ -3451,18 +4061,18 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, + if (cSeqsSize == 0) { + cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); + FORWARD_IF_ERROR(cSize, "Nocompress block failed"); +- DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize); ++ DEBUGLOG(5, "Writing out nocompress block, size: %zu", cSize); + *dRep = dRepOriginal; /* reset simulated decompression repcode history */ + } else if (cSeqsSize == 1) { + cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock); + FORWARD_IF_ERROR(cSize, "RLE compress block failed"); +- DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize); ++ DEBUGLOG(5, "Writing out RLE block, size: %zu", cSize); + *dRep = dRepOriginal; /* reset simulated decompression repcode history */ + } else { + ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); + writeBlockHeader(op, cSeqsSize, srcSize, lastBlock); + cSize = ZSTD_blockHeaderSize + cSeqsSize; +- DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize); ++ DEBUGLOG(5, "Writing out compressed block, size: %zu", cSize); + } + + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) +@@ -3481,45 +4091,49 @@ typedef struct { /* Helper function to perform the recursive search for block splits. * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. @@ -37399,14 +40290,15 @@ index 16bb995bc6c4..885167f7e47b 100644 */ static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, - ZSTD_CCtx* zc, const seqStore_t* origSeqStore) +- ZSTD_CCtx* zc, const seqStore_t* origSeqStore) ++ ZSTD_CCtx* zc, const SeqStore_t* origSeqStore) { - seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; - seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; - seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; -+ seqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; -+ seqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; -+ seqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; ++ SeqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; ++ SeqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; ++ SeqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; size_t estimatedOriginalSize; size_t estimatedFirstHalfSize; size_t estimatedSecondHalfSize; @@ -37437,7 +40329,7 @@ index 16bb995bc6c4..885167f7e47b 100644 ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore); splits->splitLocations[splits->idx] = (U32)midIdx; splits->idx++; -@@ -3527,14 +4077,18 @@ ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t end +@@ -3527,14 +4141,18 @@ ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t end } } @@ -37461,7 +40353,7 @@ index 16bb995bc6c4..885167f7e47b 100644 /* Refuse to try and split anything with less than 4 sequences */ return 0; } -@@ -3550,18 +4104,20 @@ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) +@@ -3550,18 +4168,20 @@ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) * Returns combined size of all blocks (which includes headers), or a ZSTD error code. */ static size_t @@ -37482,15 +40374,26 @@ index 16bb995bc6c4..885167f7e47b 100644 - seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore; - size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); + U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ -+ seqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore; -+ seqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore; ++ SeqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore; ++ SeqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore; + size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two -@@ -3583,30 +4139,31 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac - ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); - ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t)); +@@ -3577,36 +4197,37 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac + * + * See ZSTD_seqStore_resolveOffCodes() for more details. + */ +- repcodes_t dRep; +- repcodes_t cRep; +- ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); +- ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); +- ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t)); ++ Repcodes_t dRep; ++ Repcodes_t cRep; ++ ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t)); ++ ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(Repcodes_t)); ++ ZSTD_memset(nextSeqStore, 0, sizeof(SeqStore_t)); - DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", + DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", @@ -37512,8 +40415,8 @@ index 16bb995bc6c4..885167f7e47b 100644 FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!"); DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits"); - assert(cSizeSingleBlock <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); -+ assert(zc->blockSize <= ZSTD_BLOCKSIZE_MAX); -+ assert(cSizeSingleBlock <= zc->blockSize + ZSTD_blockHeaderSize); ++ assert(zc->blockSizeMax <= ZSTD_BLOCKSIZE_MAX); ++ assert(cSizeSingleBlock <= zc->blockSizeMax + ZSTD_blockHeaderSize); return cSizeSingleBlock; } @@ -37529,7 +40432,7 @@ index 16bb995bc6c4..885167f7e47b 100644 srcBytesTotal += srcBytes; if (lastPartition) { /* This is the final partition, need to account for possible last literals */ -@@ -3621,7 +4178,8 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac +@@ -3621,7 +4242,8 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac op, dstCapacity, ip, srcBytes, lastBlockEntireSrc, 1 /* isPartition */); @@ -37539,21 +40442,24 @@ index 16bb995bc6c4..885167f7e47b 100644 FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!"); ip += srcBytes; -@@ -3629,10 +4187,10 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac +@@ -3629,12 +4251,12 @@ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapac dstCapacity -= cSizeChunk; cSize += cSizeChunk; *currSeqStore = *nextSeqStore; - assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); -+ assert(cSizeChunk <= zc->blockSize + ZSTD_blockHeaderSize); ++ assert(cSizeChunk <= zc->blockSizeMax + ZSTD_blockHeaderSize); } - /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes - * for the next block. + /* cRep and dRep may have diverged during the compression. + * If so, we use the dRep repcodes for the next block. */ - ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t)); +- ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t)); ++ ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(Repcodes_t)); return cSize; -@@ -3643,8 +4201,6 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, + } + +@@ -3643,21 +4265,20 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) { @@ -37561,8 +40467,13 @@ index 16bb995bc6c4..885167f7e47b 100644 - BYTE* op = (BYTE*)dst; U32 nbSeq; size_t cSize; - DEBUGLOG(4, "ZSTD_compressBlock_splitBlock"); -@@ -3655,7 +4211,8 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, +- DEBUGLOG(4, "ZSTD_compressBlock_splitBlock"); +- assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable); ++ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock"); ++ assert(zc->appliedParams.postBlockSplitter == ZSTD_ps_enable); + + { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); + FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); if (bss == ZSTDbss_noCompress) { if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; @@ -37570,9 +40481,12 @@ index 16bb995bc6c4..885167f7e47b 100644 + RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block"); + cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); - DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block"); +- DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block"); ++ DEBUGLOG(5, "ZSTD_compressBlock_splitBlock: Nocompress block"); return cSize; -@@ -3673,9 +4230,9 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc, + } + nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart); +@@ -3673,9 +4294,9 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 frame) { @@ -37585,7 +40499,7 @@ index 16bb995bc6c4..885167f7e47b 100644 */ const U32 rleMaxLength = 25; size_t cSize; -@@ -3687,11 +4244,15 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc, +@@ -3687,11 +4308,15 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc, { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); @@ -37603,7 +40517,16 @@ index 16bb995bc6c4..885167f7e47b 100644 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); return 0; } -@@ -3767,10 +4328,11 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, +@@ -3702,7 +4327,7 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc, + &zc->appliedParams, + dst, dstCapacity, + srcSize, +- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, ++ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */, + zc->bmi2); + + if (frame && +@@ -3767,10 +4392,11 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, * * cSize >= blockBound(srcSize): We have expanded the block too much so * emit an uncompressed block. */ @@ -37618,7 +40541,7 @@ index 16bb995bc6c4..885167f7e47b 100644 FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed"); if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) { ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState); -@@ -3778,7 +4340,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, +@@ -3778,7 +4404,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, } } } @@ -37627,7 +40550,55 @@ index 16bb995bc6c4..885167f7e47b 100644 DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()"); /* Superblock compression failed, attempt to emit a single no compress block. -@@ -3836,7 +4398,7 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, +@@ -3807,7 +4433,7 @@ static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc, + return cSize; + } + +-static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, ++static void ZSTD_overflowCorrectIfNeeded(ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, + ZSTD_CCtx_params const* params, + void const* ip, +@@ -3831,39 +4457,82 @@ static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, + } + } + ++#include "zstd_preSplit.h" ++ ++static size_t ZSTD_optimalBlockSize(ZSTD_CCtx* cctx, const void* src, size_t srcSize, size_t blockSizeMax, int splitLevel, ZSTD_strategy strat, S64 savings) ++{ ++ /* split level based on compression strategy, from `fast` to `btultra2` */ ++ static const int splitLevels[] = { 0, 0, 1, 2, 2, 3, 3, 4, 4, 4 }; ++ /* note: conservatively only split full blocks (128 KB) currently. ++ * While it's possible to go lower, let's keep it simple for a first implementation. ++ * Besides, benefits of splitting are reduced when blocks are already small. ++ */ ++ if (srcSize < 128 KB || blockSizeMax < 128 KB) ++ return MIN(srcSize, blockSizeMax); ++ /* do not split incompressible data though: ++ * require verified savings to allow pre-splitting. ++ * Note: as a consequence, the first full block is not split. ++ */ ++ if (savings < 3) { ++ DEBUGLOG(6, "don't attempt splitting: savings (%i) too low", (int)savings); ++ return 128 KB; ++ } ++ /* apply @splitLevel, or use default value (which depends on @strat). ++ * note that splitting heuristic is still conditioned by @savings >= 3, ++ * so the first block will not reach this code path */ ++ if (splitLevel == 1) return 128 KB; ++ if (splitLevel == 0) { ++ assert(ZSTD_fast <= strat && strat <= ZSTD_btultra2); ++ splitLevel = splitLevels[strat]; ++ } else { ++ assert(2 <= splitLevel && splitLevel <= 6); ++ splitLevel -= 2; ++ } ++ return ZSTD_splitBlock(src, blockSizeMax, splitLevel, cctx->tmpWorkspace, cctx->tmpWkspSize); ++} ++ + /*! ZSTD_compress_frameChunk() : + * Compress a chunk of data into one or multiple blocks. * All blocks will be terminated, all input will be consumed. * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. * Frame is supposed already started (header already produced) @@ -37636,27 +40607,89 @@ index 16bb995bc6c4..885167f7e47b 100644 */ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, -@@ -3860,7 +4422,9 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, - ZSTD_matchState_t* const ms = &cctx->blockState.matchState; - U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); + const void* src, size_t srcSize, + U32 lastFrameChunk) + { +- size_t blockSize = cctx->blockSize; ++ size_t blockSizeMax = cctx->blockSizeMax; + size_t remaining = srcSize; + const BYTE* ip = (const BYTE*)src; + BYTE* const ostart = (BYTE*)dst; + BYTE* op = ostart; + U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; ++ S64 savings = (S64)cctx->consumedSrcSize - (S64)cctx->producedCSize; + assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX); + +- DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize); ++ DEBUGLOG(5, "ZSTD_compress_frameChunk (srcSize=%u, blockSizeMax=%u)", (unsigned)srcSize, (unsigned)blockSizeMax); + if (cctx->appliedParams.fParams.checksumFlag && srcSize) + xxh64_update(&cctx->xxhState, src, srcSize); + + while (remaining) { +- ZSTD_matchState_t* const ms = &cctx->blockState.matchState; +- U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); +- - RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE, ++ ZSTD_MatchState_t* const ms = &cctx->blockState.matchState; ++ size_t const blockSize = ZSTD_optimalBlockSize(cctx, ++ ip, remaining, ++ blockSizeMax, ++ cctx->appliedParams.preBlockSplitter_level, ++ cctx->appliedParams.cParams.strategy, ++ savings); ++ U32 const lastBlock = lastFrameChunk & (blockSize == remaining); ++ assert(blockSize <= remaining); ++ + /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding + * additional 1. We need to revisit and change this logic to be more consistent */ + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1, dstSize_tooSmall, "not enough space to store compressed block"); - if (remaining < blockSize) blockSize = remaining; -@@ -3899,7 +4463,7 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, +- if (remaining < blockSize) blockSize = remaining; + + ZSTD_overflowCorrectIfNeeded( + ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); +@@ -3899,8 +4568,23 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, MEM_writeLE24(op, cBlockHeader); cSize += ZSTD_blockHeaderSize; } - } +- + } /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/ - ++ ++ /* @savings is employed to ensure that splitting doesn't worsen expansion of incompressible data. ++ * Without splitting, the maximum expansion is 3 bytes per full block. ++ * An adversarial input could attempt to fudge the split detector, ++ * and make it split incompressible data, resulting in more block headers. ++ * Note that, since ZSTD_COMPRESSBOUND() assumes a worst case scenario of 1KB per block, ++ * and the splitter never creates blocks that small (current lower limit is 8 KB), ++ * there is already no risk to expand beyond ZSTD_COMPRESSBOUND() limit. ++ * But if the goal is to not expand by more than 3-bytes per 128 KB full block, ++ * then yes, it becomes possible to make the block splitter oversplit incompressible data. ++ * Using @savings, we enforce an even more conservative condition, ++ * requiring the presence of enough savings (at least 3 bytes) to authorize splitting, ++ * otherwise only full blocks are used. ++ * But being conservative is fine, ++ * since splitting barely compressible blocks is not fruitful anyway */ ++ savings += (S64)blockSize - (S64)cSize; ip += blockSize; -@@ -4001,19 +4565,15 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity) + assert(remaining >= blockSize); +@@ -3919,8 +4603,10 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, + + + static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, +- const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID) +-{ BYTE* const op = (BYTE*)dst; ++ const ZSTD_CCtx_params* params, ++ U64 pledgedSrcSize, U32 dictID) ++{ ++ BYTE* const op = (BYTE*)dst; + U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ + U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ + U32 const checksumFlag = params->fParams.checksumFlag>0; +@@ -4001,19 +4687,15 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity) } } @@ -37679,7 +40712,25 @@ index 16bb995bc6c4..885167f7e47b 100644 } -@@ -4078,31 +4638,51 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, +@@ -4022,7 +4704,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, + const void* src, size_t srcSize, + U32 frame, U32 lastFrameChunk) + { +- ZSTD_matchState_t* const ms = &cctx->blockState.matchState; ++ ZSTD_MatchState_t* const ms = &cctx->blockState.matchState; + size_t fhSize = 0; + + DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", +@@ -4057,7 +4739,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, + src, (BYTE const*)src + srcSize); + } + +- DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize); ++ DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSizeMax); + { size_t const cSize = frame ? + ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : + ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */); +@@ -4078,58 +4760,90 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, } } @@ -37738,13 +40789,20 @@ index 16bb995bc6c4..885167f7e47b 100644 /*! ZSTD_loadDictionaryContent() : * @return : 0, or an error code */ -@@ -4111,25 +4691,36 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, - ZSTD_cwksp* ws, - ZSTD_CCtx_params const* params, - const void* src, size_t srcSize, +-static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, +- ldmState_t* ls, +- ZSTD_cwksp* ws, +- ZSTD_CCtx_params const* params, +- const void* src, size_t srcSize, - ZSTD_dictTableLoadMethod_e dtlm) -+ ZSTD_dictTableLoadMethod_e dtlm, -+ ZSTD_tableFillPurpose_e tfp) ++static size_t ++ZSTD_loadDictionaryContent(ZSTD_MatchState_t* ms, ++ ldmState_t* ls, ++ ZSTD_cwksp* ws, ++ ZSTD_CCtx_params const* params, ++ const void* src, size_t srcSize, ++ ZSTD_dictTableLoadMethod_e dtlm, ++ ZSTD_tableFillPurpose_e tfp) { const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; @@ -37783,7 +40841,7 @@ index 16bb995bc6c4..885167f7e47b 100644 /* If the dictionary is too large, only load the suffix of the dictionary. */ if (srcSize > maxDictSize) { ip = iend - maxDictSize; -@@ -4138,35 +4729,58 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, +@@ -4138,35 +4852,59 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, } } @@ -37798,17 +40856,18 @@ index 16bb995bc6c4..885167f7e47b 100644 - ms->forceNonContiguous = params->deterministicRefPrefix; - if (loadLdmDict) { -+ DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder); ++ DEBUGLOG(4, "ZSTD_loadDictionaryContent: useRowMatchFinder=%d", (int)params->useRowMatchFinder); + + if (loadLdmDict) { /* Load the entire dict into LDM matchfinders. */ ++ DEBUGLOG(4, "ZSTD_loadDictionaryContent: Trigger loadLdmDict"); ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0); ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); + ZSTD_ldm_fillHashTable(ls, ip, iend, ¶ms->ldmParams); ++ DEBUGLOG(4, "ZSTD_loadDictionaryContent: ZSTD_ldm_fillHashTable completes"); } + /* If the dict is larger than we can reasonably index in our tables, only load the suffix. */ -+ if (params->cParams.strategy < ZSTD_btultra) { -+ U32 maxDictSize = 8U << MIN(MAX(params->cParams.hashLog, params->cParams.chainLog), 28); ++ { U32 maxDictSize = 1U << MIN(MAX(params->cParams.hashLog + 3, params->cParams.chainLog + 1), 31); + if (srcSize > maxDictSize) { + ip = iend - maxDictSize; + src = ip; @@ -37851,7 +40910,7 @@ index 16bb995bc6c4..885167f7e47b 100644 assert(srcSize >= HASH_READ_SIZE); if (ms->dedicatedDictSearch) { assert(ms->chainTable != NULL); -@@ -4174,7 +4788,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, +@@ -4174,7 +4912,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, } else { assert(params->useRowMatchFinder != ZSTD_ps_auto); if (params->useRowMatchFinder == ZSTD_ps_enable) { @@ -37860,7 +40919,7 @@ index 16bb995bc6c4..885167f7e47b 100644 ZSTD_memset(ms->tagTable, 0, tagTableSize); ZSTD_row_update(ms, iend-HASH_READ_SIZE); DEBUGLOG(4, "Using row-based hash table for lazy dict"); -@@ -4183,14 +4797,23 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, +@@ -4183,14 +4921,24 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, DEBUGLOG(4, "Using chain-based hash table for lazy dict"); } } @@ -37877,6 +40936,7 @@ index 16bb995bc6c4..885167f7e47b 100644 + || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) assert(srcSize >= HASH_READ_SIZE); ++ DEBUGLOG(4, "Fill %u bytes into the Binary Tree", (unsigned)srcSize); ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend); +#else + assert(0); /* shouldn't be called: cparams should've been adjusted. */ @@ -37884,7 +40944,12 @@ index 16bb995bc6c4..885167f7e47b 100644 break; default: -@@ -4237,11 +4860,10 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, +@@ -4233,20 +4981,19 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, + { unsigned maxSymbolValue = 255; + unsigned hasZeroWeights = 1; + size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, +- dictEnd-dictPtr, &hasZeroWeights); ++ (size_t)(dictEnd-dictPtr), &hasZeroWeights); /* We only set the loaded table as valid if it contains all non-zero * weights. Otherwise, we set it to check */ @@ -37897,7 +40962,46 @@ index 16bb995bc6c4..885167f7e47b 100644 dictPtr += hufHeaderSize; } -@@ -4327,6 +4949,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, + { unsigned offcodeLog; +- size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); ++ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr)); + RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); + /* fill all offset symbols to avoid garbage at end of table */ +@@ -4261,7 +5008,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, + + { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; +- size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); ++ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); + RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); + RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( +@@ -4275,7 +5022,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, + + { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; +- size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); ++ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); + RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); + RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( +@@ -4309,7 +5056,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, + RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, ""); + } } } + +- return dictPtr - (const BYTE*)dict; ++ return (size_t)(dictPtr - (const BYTE*)dict); + } + + /* Dictionary format : +@@ -4322,11 +5069,12 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, + * dictSize supposed >= 8 + */ + static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, +- ZSTD_matchState_t* ms, ++ ZSTD_MatchState_t* ms, + ZSTD_cwksp* ws, ZSTD_CCtx_params const* params, const void* dict, size_t dictSize, ZSTD_dictTableLoadMethod_e dtlm, @@ -37905,7 +41009,7 @@ index 16bb995bc6c4..885167f7e47b 100644 void* workspace) { const BYTE* dictPtr = (const BYTE*)dict; -@@ -4345,7 +4968,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, +@@ -4345,7 +5093,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); FORWARD_IF_ERROR(ZSTD_loadDictionaryContent( @@ -37914,7 +41018,15 @@ index 16bb995bc6c4..885167f7e47b 100644 } return dictID; } -@@ -4361,6 +4984,7 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, +@@ -4354,13 +5102,14 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, + * @return : dictID, or an error code */ + static size_t + ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, +- ZSTD_matchState_t* ms, ++ ZSTD_MatchState_t* ms, + ldmState_t* ls, + ZSTD_cwksp* ws, + const ZSTD_CCtx_params* params, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, @@ -37922,7 +41034,7 @@ index 16bb995bc6c4..885167f7e47b 100644 void* workspace) { DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); -@@ -4373,13 +4997,13 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, +@@ -4373,13 +5122,13 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, /* dict restricted modes */ if (dictContentType == ZSTD_dct_rawContent) @@ -37938,7 +41050,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); assert(0); /* impossible */ -@@ -4387,13 +5011,14 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, +@@ -4387,13 +5136,14 @@ ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, /* dict as full zstd dictionary */ return ZSTD_loadZstdDictionary( @@ -37954,21 +41066,21 @@ index 16bb995bc6c4..885167f7e47b 100644 * @return : 0, or an error code */ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, -@@ -4426,11 +5051,11 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, +@@ -4426,11 +5176,11 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, cdict->dictContentSize, cdict->dictContentType, dtlm, - cctx->entropyWorkspace) -+ ZSTD_tfp_forCCtx, cctx->entropyWorkspace) ++ ZSTD_tfp_forCCtx, cctx->tmpWorkspace) : ZSTD_compress_insertDictionary( cctx->blockState.prevCBlock, &cctx->blockState.matchState, &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, - dictContentType, dtlm, cctx->entropyWorkspace); -+ dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->entropyWorkspace); ++ dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->tmpWorkspace); FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= UINT_MAX); cctx->dictID = (U32)dictID; -@@ -4471,11 +5096,11 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, +@@ -4471,11 +5221,11 @@ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, &cctxParams, pledgedSrcSize); } @@ -37983,7 +41095,7 @@ index 16bb995bc6c4..885167f7e47b 100644 ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel); } DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); -@@ -4483,9 +5108,15 @@ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t di +@@ -4483,9 +5233,15 @@ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t di &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); } @@ -38000,7 +41112,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } -@@ -4496,14 +5127,13 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) +@@ -4496,14 +5252,13 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) { BYTE* const ostart = (BYTE*)dst; BYTE* op = ostart; @@ -38016,7 +41128,7 @@ index 16bb995bc6c4..885167f7e47b 100644 FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); dstCapacity -= fhSize; op += fhSize; -@@ -4513,8 +5143,9 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) +@@ -4513,8 +5268,9 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) if (cctx->stage != ZSTDcs_ending) { /* write one last empty block, make it the "last" block */ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; @@ -38028,7 +41140,16 @@ index 16bb995bc6c4..885167f7e47b 100644 op += ZSTD_blockHeaderSize; dstCapacity -= ZSTD_blockHeaderSize; } -@@ -4537,9 +5168,9 @@ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize) +@@ -4528,7 +5284,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) + } + + cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ +- return op-ostart; ++ return (size_t)(op-ostart); + } + + void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize) +@@ -4537,9 +5293,9 @@ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize) (void)extraCSize; } @@ -38041,7 +41162,7 @@ index 16bb995bc6c4..885167f7e47b 100644 { size_t endResult; size_t const cSize = ZSTD_compressContinue_internal(cctx, -@@ -4563,6 +5194,14 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, +@@ -4563,6 +5319,14 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, return cSize + endResult; } @@ -38056,7 +41177,7 @@ index 16bb995bc6c4..885167f7e47b 100644 size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, -@@ -4591,7 +5230,7 @@ size_t ZSTD_compress_advanced_internal( +@@ -4591,7 +5355,7 @@ size_t ZSTD_compress_advanced_internal( FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, params, srcSize, ZSTDb_not_buffered) , ""); @@ -38065,7 +41186,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, -@@ -4709,7 +5348,7 @@ static size_t ZSTD_initCDict_internal( +@@ -4709,7 +5473,7 @@ static size_t ZSTD_initCDict_internal( { size_t const dictID = ZSTD_compress_insertDictionary( &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace, ¶ms, cdict->dictContent, cdict->dictContentSize, @@ -38074,7 +41195,56 @@ index 16bb995bc6c4..885167f7e47b 100644 FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); assert(dictID <= (size_t)(U32)-1); cdict->dictID = (U32)dictID; -@@ -4813,7 +5452,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2( +@@ -4719,14 +5483,16 @@ static size_t ZSTD_initCDict_internal( + return 0; + } + +-static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize, +- ZSTD_dictLoadMethod_e dictLoadMethod, +- ZSTD_compressionParameters cParams, +- ZSTD_paramSwitch_e useRowMatchFinder, +- U32 enableDedicatedDictSearch, +- ZSTD_customMem customMem) ++static ZSTD_CDict* ++ZSTD_createCDict_advanced_internal(size_t dictSize, ++ ZSTD_dictLoadMethod_e dictLoadMethod, ++ ZSTD_compressionParameters cParams, ++ ZSTD_ParamSwitch_e useRowMatchFinder, ++ int enableDedicatedDictSearch, ++ ZSTD_customMem customMem) + { + if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; ++ DEBUGLOG(3, "ZSTD_createCDict_advanced_internal (dictSize=%u)", (unsigned)dictSize); + + { size_t const workspaceSize = + ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + +@@ -4763,6 +5529,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, + { + ZSTD_CCtx_params cctxParams; + ZSTD_memset(&cctxParams, 0, sizeof(cctxParams)); ++ DEBUGLOG(3, "ZSTD_createCDict_advanced, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType); + ZSTD_CCtxParams_init(&cctxParams, 0); + cctxParams.cParams = cParams; + cctxParams.customMem = customMem; +@@ -4783,7 +5550,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2( + ZSTD_compressionParameters cParams; + ZSTD_CDict* cdict; + +- DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType); ++ DEBUGLOG(3, "ZSTD_createCDict_advanced2, dictSize=%u, mode=%u", (unsigned)dictSize, (unsigned)dictContentType); + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + + if (cctxParams.enableDedicatedDictSearch) { +@@ -4802,7 +5569,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2( + &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict); + } + +- DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch); ++ DEBUGLOG(3, "ZSTD_createCDict_advanced2: DedicatedDictSearch=%u", cctxParams.enableDedicatedDictSearch); + cctxParams.cParams = cParams; + cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); + +@@ -4813,7 +5580,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2( if (!cdict) return NULL; @@ -38083,7 +41253,41 @@ index 16bb995bc6c4..885167f7e47b 100644 dict, dictSize, dictLoadMethod, dictContentType, cctxParams) )) { -@@ -4908,6 +5547,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict( +@@ -4867,7 +5634,7 @@ size_t ZSTD_freeCDict(ZSTD_CDict* cdict) + * workspaceSize: Use ZSTD_estimateCDictSize() + * to determine how large workspace must be. + * cParams : use ZSTD_getCParams() to transform a compression level +- * into its relevants cParams. ++ * into its relevant cParams. + * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) + * Note : there is no corresponding "free" function. + * Since workspace was allocated externally, it must be freed externally. +@@ -4879,7 +5646,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict( + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams) + { +- ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams); ++ ZSTD_ParamSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams); + /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */ + size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0); + size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +@@ -4890,6 +5657,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict( + ZSTD_CDict* cdict; + ZSTD_CCtx_params params; + ++ DEBUGLOG(4, "ZSTD_initStaticCDict (dictSize==%u)", (unsigned)dictSize); + if ((size_t)workspace & 7) return NULL; /* 8-aligned */ + + { +@@ -4900,14 +5668,13 @@ const ZSTD_CDict* ZSTD_initStaticCDict( + ZSTD_cwksp_move(&cdict->workspace, &ws); + } + +- DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", +- (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize)); + if (workspaceSize < neededSize) return NULL; + + ZSTD_CCtxParams_init(¶ms, 0); params.cParams = cParams; params.useRowMatchFinder = useRowMatchFinder; cdict->useRowMatchFinder = useRowMatchFinder; @@ -38091,7 +41295,7 @@ index 16bb995bc6c4..885167f7e47b 100644 if (ZSTD_isError( ZSTD_initCDict_internal(cdict, dict, dictSize, -@@ -4987,12 +5627,17 @@ size_t ZSTD_compressBegin_usingCDict_advanced( +@@ -4987,12 +5754,17 @@ size_t ZSTD_compressBegin_usingCDict_advanced( /* ZSTD_compressBegin_usingCDict() : * cdict must be != NULL */ @@ -38110,7 +41314,7 @@ index 16bb995bc6c4..885167f7e47b 100644 /*! ZSTD_compress_usingCDict_internal(): * Implementation of various ZSTD_compress_usingCDict* functions. */ -@@ -5002,7 +5647,7 @@ static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx, +@@ -5002,7 +5774,7 @@ static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) { FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */ @@ -38119,7 +41323,16 @@ index 16bb995bc6c4..885167f7e47b 100644 } /*! ZSTD_compress_usingCDict_advanced(): -@@ -5199,30 +5844,41 @@ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) +@@ -5068,7 +5840,7 @@ size_t ZSTD_CStreamOutSize(void) + return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; + } + +-static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize) ++static ZSTD_CParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize) + { + if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) + return ZSTD_cpm_attachDict; +@@ -5199,30 +5971,41 @@ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) { @@ -38127,11 +41340,11 @@ index 16bb995bc6c4..885167f7e47b 100644 - if (hintInSize==0) hintInSize = cctx->blockSize; - return hintInSize; + if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { -+ return cctx->blockSize - cctx->stableIn_notConsumed; ++ return cctx->blockSizeMax - cctx->stableIn_notConsumed; + } + assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered); + { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; -+ if (hintInSize==0) hintInSize = cctx->blockSize; ++ if (hintInSize==0) hintInSize = cctx->blockSizeMax; + return hintInSize; + } } @@ -38173,7 +41386,7 @@ index 16bb995bc6c4..885167f7e47b 100644 if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) { assert(zcs->inBuff != NULL); assert(zcs->inBuffSize > 0); -@@ -5231,8 +5887,10 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, +@@ -5231,8 +6014,10 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, assert(zcs->outBuff != NULL); assert(zcs->outBuffSize > 0); } @@ -38185,18 +41398,29 @@ index 16bb995bc6c4..885167f7e47b 100644 assert((U32)flushMode <= (U32)ZSTD_e_end); while (someMoreWork) { -@@ -5247,7 +5905,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, +@@ -5243,12 +6028,13 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + + case zcss_load: + if ( (flushMode == ZSTD_e_end) +- && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */ ++ && ( (size_t)(oend-op) >= ZSTD_compressBound((size_t)(iend-ip)) /* Enough output space */ || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */ && (zcs->inBuffPos == 0) ) { /* shortcut to compression pass directly into output buffer */ - size_t const cSize = ZSTD_compressEnd(zcs, +- op, oend-op, ip, iend-ip); + size_t const cSize = ZSTD_compressEnd_public(zcs, - op, oend-op, ip, iend-ip); ++ op, (size_t)(oend-op), ++ ip, (size_t)(iend-ip)); DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize); FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed"); -@@ -5264,8 +5922,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + ip = iend; +@@ -5262,10 +6048,9 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; + size_t const loaded = ZSTD_limitCopy( zcs->inBuff + zcs->inBuffPos, toLoad, - ip, iend-ip); +- ip, iend-ip); ++ ip, (size_t)(iend-ip)); zcs->inBuffPos += loaded; - if (loaded != 0) - ip += loaded; @@ -38204,14 +41428,14 @@ index 16bb995bc6c4..885167f7e47b 100644 if ( (flushMode == ZSTD_e_continue) && (zcs->inBuffPos < zcs->inBuffTarget) ) { /* not enough input to fill full block : stop here */ -@@ -5276,6 +5933,20 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, +@@ -5276,16 +6061,29 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, /* empty */ someMoreWork = 0; break; } + } else { + assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable); + if ( (flushMode == ZSTD_e_continue) -+ && ( (size_t)(iend - ip) < zcs->blockSize) ) { ++ && ( (size_t)(iend - ip) < zcs->blockSizeMax) ) { + /* can't compress a full block : stop here */ + zcs->stableIn_notConsumed = (size_t)(iend - ip); + ip = iend; /* pretend to have consumed input */ @@ -38225,19 +41449,20 @@ index 16bb995bc6c4..885167f7e47b 100644 } /* compress current block (note : this stage cannot be stopped in the middle) */ DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); -@@ -5283,9 +5954,8 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered); void* cDst; size_t cSize; - size_t oSize = oend-op; +- size_t oSize = oend-op; - size_t const iSize = inputBuffered - ? zcs->inBuffPos - zcs->inToCompress - : MIN((size_t)(iend - ip), zcs->blockSize); ++ size_t oSize = (size_t)(oend-op); + size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress -+ : MIN((size_t)(iend - ip), zcs->blockSize); ++ : MIN((size_t)(iend - ip), zcs->blockSizeMax); if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) cDst = op; /* compress into output buffer, to skip flush stage */ else -@@ -5293,9 +5963,9 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, +@@ -5293,34 +6091,31 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, if (inputBuffered) { unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); cSize = lastBlock ? @@ -38249,7 +41474,14 @@ index 16bb995bc6c4..885167f7e47b 100644 zcs->inBuff + zcs->inToCompress, iSize); FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); zcs->frameEnded = lastBlock; -@@ -5308,19 +5978,16 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + /* prepare next block */ +- zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; ++ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSizeMax; + if (zcs->inBuffTarget > zcs->inBuffSize) +- zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; ++ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSizeMax; + DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", + (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize); if (!lastBlock) assert(zcs->inBuffTarget <= zcs->inBuffSize); zcs->inToCompress = zcs->inBuffPos; @@ -38275,7 +41507,18 @@ index 16bb995bc6c4..885167f7e47b 100644 } if (cDst == op) { /* no need to flush */ op += cSize; -@@ -5390,8 +6057,10 @@ size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuf +@@ -5369,8 +6164,8 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + } + } + +- input->pos = ip - istart; +- output->pos = op - ostart; ++ input->pos = (size_t)(ip - istart); ++ output->pos = (size_t)(op - ostart); + if (zcs->frameEnded) return 0; + return ZSTD_nextInputSizeHint(zcs); + } +@@ -5390,8 +6185,10 @@ size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuf /* After a compression call set the expected input/output buffer. * This is validated at the start of the next compression call. */ @@ -38287,7 +41530,7 @@ index 16bb995bc6c4..885167f7e47b 100644 if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { cctx->expectedInBuffer = *input; } -@@ -5410,22 +6079,22 @@ static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx, +@@ -5410,22 +6207,27 @@ static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx, { if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) { ZSTD_inBuffer const expect = cctx->expectedInBuffer; @@ -38308,6 +41551,11 @@ index 16bb995bc6c4..885167f7e47b 100644 return 0; } ++/* ++ * If @endOp == ZSTD_e_end, @inSize becomes pledgedSrcSize. ++ * Otherwise, it's ignored. ++ * @return: 0 on success, or a ZSTD_error code otherwise. ++ */ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, ZSTD_EndDirective endOp, - size_t inSize) { @@ -38316,21 +41564,29 @@ index 16bb995bc6c4..885167f7e47b 100644 ZSTD_CCtx_params params = cctx->requestedParams; ZSTD_prefixDict const prefixDict = cctx->prefixDict; FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */ -@@ -5439,9 +6108,9 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, +@@ -5438,21 +6240,24 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, + */ params.compressionLevel = cctx->cdict->compressionLevel; } - DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); +- DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); - if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */ - { - size_t const dictSize = prefixDict.dict ++ DEBUGLOG(4, "ZSTD_CCtx_init_compressStream2 : transparent init stage"); + if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */ + + { size_t const dictSize = prefixDict.dict ? prefixDict.dictSize : (cctx->cdict ? cctx->cdict->dictContentSize : 0); - ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); -@@ -5453,6 +6122,9 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, - params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams); +- ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); ++ ZSTD_CParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, ¶ms, cctx->pledgedSrcSizePlusOne - 1); + params.cParams = ZSTD_getCParamsFromCCtxParams( + ¶ms, cctx->pledgedSrcSizePlusOne-1, + dictSize, mode); + } + +- params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams); ++ params.postBlockSplitter = ZSTD_resolveBlockSplitterMode(params.postBlockSplitter, ¶ms.cParams); params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams); params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams); + params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences); @@ -38339,7 +41595,16 @@ index 16bb995bc6c4..885167f7e47b 100644 { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1; assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); -@@ -5479,6 +6151,8 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, +@@ -5468,7 +6273,7 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, + /* for small input: avoid automatic flush on reaching end of block, since + * it would require to add a 3-bytes null block to end frame + */ +- cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize); ++ cctx->inBuffTarget = cctx->blockSizeMax + (cctx->blockSizeMax == pledgedSrcSize); + } else { + cctx->inBuffTarget = 0; + } +@@ -5479,6 +6284,8 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, return 0; } @@ -38348,7 +41613,7 @@ index 16bb995bc6c4..885167f7e47b 100644 size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input, -@@ -5493,8 +6167,27 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, +@@ -5493,8 +6300,27 @@ size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, /* transparent initialization stage */ if (cctx->streamStage == zcss_init) { @@ -38378,7 +41643,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } /* end of transparent initialization stage */ -@@ -5512,13 +6205,20 @@ size_t ZSTD_compressStream2_simpleArgs ( +@@ -5512,13 +6338,20 @@ size_t ZSTD_compressStream2_simpleArgs ( const void* src, size_t srcSize, size_t* srcPos, ZSTD_EndDirective endOp) { @@ -38405,7 +41670,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } size_t ZSTD_compress2(ZSTD_CCtx* cctx, -@@ -5541,6 +6241,7 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx, +@@ -5541,6 +6374,7 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx, /* Reset to the original values. */ cctx->requestedParams.inBufferMode = originalInBufferMode; cctx->requestedParams.outBufferMode = originalOutBufferMode; @@ -38413,7 +41678,7 @@ index 16bb995bc6c4..885167f7e47b 100644 FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed"); if (result != 0) { /* compression not completed, due to lack of output space */ assert(oPos == dstCapacity); -@@ -5551,64 +6252,61 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx, +@@ -5551,64 +6385,67 @@ size_t ZSTD_compress2(ZSTD_CCtx* cctx, } } @@ -38424,13 +41689,14 @@ index 16bb995bc6c4..885167f7e47b 100644 -} ZSTD_sequencePosition; - /* ZSTD_validateSequence() : - * @offCode : is presumed to follow format required by ZSTD_storeSeq() +- * @offCode : is presumed to follow format required by ZSTD_storeSeq() ++ * @offBase : must use the format required by ZSTD_storeSeq() * @returns a ZSTD error code if sequence is not valid */ static size_t -ZSTD_validateSequence(U32 offCode, U32 matchLength, - size_t posInSrc, U32 windowLog, size_t dictSize) -+ZSTD_validateSequence(U32 offCode, U32 matchLength, U32 minMatch, ++ZSTD_validateSequence(U32 offBase, U32 matchLength, U32 minMatch, + size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer) { - U32 const windowSize = 1 << windowLog; @@ -38444,7 +41710,7 @@ index 16bb995bc6c4..885167f7e47b 100644 - RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!"); - RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small"); + size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4; -+ RETURN_ERROR_IF(offCode > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!"); ++ RETURN_ERROR_IF(offBase > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!"); + /* Validate maxNbSeq is large enough for the given matchLength and minMatch */ + RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch"); return 0; @@ -38476,33 +41742,43 @@ index 16bb995bc6c4..885167f7e47b 100644 -/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of - * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. -- */ --static size_t -+size_t - ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, - ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, ++/* This function scans through an array of ZSTD_Sequence, ++ * storing the sequences it reads, until it reaches a block delimiter. ++ * Note that the block delimiter includes the last literals of the block. ++ * @blockSize must be == sum(sequence_lengths). ++ * @returns @blockSize on success, and a ZSTD_error otherwise. + */ + static size_t +-ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, +- ZSTD_sequencePosition* seqPos, +- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize) -+ const void* src, size_t blockSize, -+ ZSTD_paramSwitch_e externalRepSearch) ++ZSTD_transferSequences_wBlockDelim(ZSTD_CCtx* cctx, ++ ZSTD_SequencePosition* seqPos, ++ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, ++ const void* src, size_t blockSize, ++ ZSTD_ParamSwitch_e externalRepSearch) { U32 idx = seqPos->idx; + U32 const startIdx = idx; BYTE const* ip = (BYTE const*)(src); const BYTE* const iend = ip + blockSize; - repcodes_t updatedRepcodes; +- repcodes_t updatedRepcodes; ++ Repcodes_t updatedRepcodes; U32 dictSize; -+ DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreExplicitBlockDelim (blockSize = %zu)", blockSize); ++ DEBUGLOG(5, "ZSTD_transferSequences_wBlockDelim (blockSize = %zu)", blockSize); + if (cctx->cdict) { dictSize = (U32)cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { -@@ -5617,25 +6315,55 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, +@@ -5616,27 +6453,60 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, + } else { dictSize = 0; } - ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); +- ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); - for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) { ++ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); + for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) { U32 const litLength = inSeqs[idx].litLength; - U32 const ll0 = (litLength == 0); @@ -38525,8 +41801,10 @@ index 16bb995bc6c4..885167f7e47b 100644 seqPos->posInSrc += litLength + matchLength; - FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, dictSize), -+ FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc, -+ cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)), ++ FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, ++ seqPos->posInSrc, ++ cctx->appliedParams.cParams.windowLog, dictSize, ++ ZSTD_hasExtSeqProd(&cctx->appliedParams)), "Sequence validation failed"); } - RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, @@ -38536,6 +41814,8 @@ index 16bb995bc6c4..885167f7e47b 100644 + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength); ip += matchLength + litLength; } +- ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); ++ RETURN_ERROR_IF(idx == inSeqsSize, externalSequences_invalid, "Block delimiter not found."); + + /* If we skipped repcode search while parsing, we need to update repcodes now */ + assert(externalRepSearch != ZSTD_ps_auto); @@ -38560,40 +41840,61 @@ index 16bb995bc6c4..885167f7e47b 100644 + } + } + - ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); ++ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); if (inSeqs[idx].litLength) { -@@ -5644,26 +6372,15 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, + DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength); +@@ -5644,37 +6514,43 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ip += inSeqs[idx].litLength; seqPos->posInSrc += inSeqs[idx].litLength; } - RETURN_ERROR_IF(ip != iend, corruption_detected, "Blocksize doesn't agree with block delimiter!"); + RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!"); seqPos->idx = idx+1; - return 0; +- return 0; ++ return blockSize; } -/* Returns the number of bytes to move the current read position back by. Only non-zero - * if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something - * went wrong. -- * ++/* ++ * This function attempts to scan through @blockSize bytes in @src ++ * represented by the sequences in @inSeqs, ++ * storing any (partial) sequences. + * - * This function will attempt to scan through blockSize bytes represented by the sequences - * in inSeqs, storing any (partial) sequences. -- * ++ * Occasionally, we may want to reduce the actual number of bytes consumed from @src ++ * to avoid splitting a match, notably if it would produce a match smaller than MINMATCH. + * - * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to - * avoid splitting a match, or to avoid splitting a match such that it would produce a match - * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. -- */ --static size_t -+size_t - ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, ++ * @returns the number of bytes consumed from @src, necessarily <= @blockSize. ++ * Otherwise, it may return a ZSTD error if something went wrong. + */ + static size_t +-ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, +- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize) -+ const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch) ++ZSTD_transferSequences_noDelim(ZSTD_CCtx* cctx, ++ ZSTD_SequencePosition* seqPos, ++ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, ++ const void* src, size_t blockSize, ++ ZSTD_ParamSwitch_e externalRepSearch) { U32 idx = seqPos->idx; U32 startPosInSequence = seqPos->posInSequence; -@@ -5675,6 +6392,9 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* + U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize; + size_t dictSize; +- BYTE const* ip = (BYTE const*)(src); +- BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */ +- repcodes_t updatedRepcodes; ++ const BYTE* const istart = (const BYTE*)(src); ++ const BYTE* ip = istart; ++ const BYTE* iend = istart + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */ ++ Repcodes_t updatedRepcodes; U32 bytesAdjustment = 0; U32 finalMatchSplit = 0; @@ -38603,16 +41904,17 @@ index 16bb995bc6c4..885167f7e47b 100644 if (cctx->cdict) { dictSize = cctx->cdict->dictContentSize; } else if (cctx->prefixDict.dict) { -@@ -5682,7 +6402,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* +@@ -5682,15 +6558,15 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* } else { dictSize = 0; } - DEBUGLOG(5, "ZSTD_copySequencesToSeqStore: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); -+ DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreNoBlockDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); ++ DEBUGLOG(5, "ZSTD_transferSequences_noDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize); DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); - ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); +- ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); ++ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) { -@@ -5690,7 +6410,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* + const ZSTD_Sequence currSeq = inSeqs[idx]; U32 litLength = currSeq.litLength; U32 matchLength = currSeq.matchLength; U32 const rawOffset = currSeq.offset; @@ -38621,7 +41923,7 @@ index 16bb995bc6c4..885167f7e47b 100644 /* Modify the sequence depending on where endPosInSequence lies */ if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) { -@@ -5704,7 +6424,6 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* +@@ -5704,7 +6580,6 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* /* Move to the next sequence */ endPosInSequence -= currSeq.litLength + currSeq.matchLength; startPosInSequence = 0; @@ -38629,7 +41931,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } else { /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence does not reach the end of the match. So, we have to split the sequence */ -@@ -5744,21 +6463,23 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* +@@ -5744,58 +6619,113 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* } /* Check if this offset can be represented with a repcode */ { U32 const ll0 = (litLength == 0); @@ -38660,25 +41962,62 @@ index 16bb995bc6c4..885167f7e47b 100644 } DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength); -@@ -5781,7 +6502,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* + seqPos->idx = idx; + seqPos->posInSequence = endPosInSequence; +- ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); ++ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); - typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, -- const void* src, size_t blockSize); -+ const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); - static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) - { - ZSTD_sequenceCopier sequenceCopier = NULL; -@@ -5795,6 +6516,57 @@ static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) - return sequenceCopier; + iend -= bytesAdjustment; + if (ip != iend) { + /* Store any last literals */ +- U32 lastLLSize = (U32)(iend - ip); ++ U32 const lastLLSize = (U32)(iend - ip); + assert(ip <= iend); + DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize); + ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize); + seqPos->posInSrc += lastLLSize; + } + +- return bytesAdjustment; ++ return (size_t)(iend-istart); } +-typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, +- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, +- const void* src, size_t blockSize); +-static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) ++/* @seqPos represents a position within @inSeqs, ++ * it is read and updated by this function, ++ * once the goal to produce a block of size @blockSize is reached. ++ * @return: nb of bytes consumed from @src, necessarily <= @blockSize. ++ */ ++typedef size_t (*ZSTD_SequenceCopier_f)(ZSTD_CCtx* cctx, ++ ZSTD_SequencePosition* seqPos, ++ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, ++ const void* src, size_t blockSize, ++ ZSTD_ParamSwitch_e externalRepSearch); ++ ++static ZSTD_SequenceCopier_f ZSTD_selectSequenceCopier(ZSTD_SequenceFormat_e mode) + { +- ZSTD_sequenceCopier sequenceCopier = NULL; +- assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode)); ++ assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, (int)mode)); + if (mode == ZSTD_sf_explicitBlockDelimiters) { +- return ZSTD_copySequencesToSeqStoreExplicitBlockDelim; +- } else if (mode == ZSTD_sf_noBlockDelimiters) { +- return ZSTD_copySequencesToSeqStoreNoBlockDelim; ++ return ZSTD_transferSequences_wBlockDelim; ++ } ++ assert(mode == ZSTD_sf_noBlockDelimiters); ++ return ZSTD_transferSequences_noDelim; ++} ++ +/* Discover the size of next block by searching for the delimiter. + * Note that a block delimiter **must** exist in this mode, + * otherwise it's an input error. + * The block size retrieved will be later compared to ensure it remains within bounds */ +static size_t -+blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) ++blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_SequencePosition seqPos) +{ + int end = 0; + size_t blockSize = 0; @@ -38694,26 +42033,26 @@ index 16bb995bc6c4..885167f7e47b 100644 + break; + } + spos++; -+ } + } +- assert(sequenceCopier != NULL); +- return sequenceCopier; + if (!end) + RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter"); + return blockSize; -+} -+ -+/* More a "target" block size */ -+static size_t blockSize_noDelimiter(size_t blockSize, size_t remaining) -+{ -+ int const lastBlock = (remaining <= blockSize); -+ return lastBlock ? remaining : blockSize; -+} -+ -+static size_t determine_blockSize(ZSTD_sequenceFormat_e mode, + } + +-/* Compress, block-by-block, all of the sequences given. ++static size_t determine_blockSize(ZSTD_SequenceFormat_e mode, + size_t blockSize, size_t remaining, -+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos) ++ const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ++ ZSTD_SequencePosition seqPos) +{ + DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining); -+ if (mode == ZSTD_sf_noBlockDelimiters) -+ return blockSize_noDelimiter(blockSize, remaining); ++ if (mode == ZSTD_sf_noBlockDelimiters) { ++ /* Note: more a "target" block size */ ++ return MIN(remaining, blockSize); ++ } ++ assert(mode == ZSTD_sf_explicitBlockDelimiters); + { size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos); + FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters"); + if (explicitBlockSize > blockSize) @@ -38724,10 +42063,11 @@ index 16bb995bc6c4..885167f7e47b 100644 + } +} + - /* Compress, block-by-block, all of the sequences given. ++/* Compress all provided sequences, block-by-block. * * Returns the cumulative size of all compressed blocks (including their headers), -@@ -5807,9 +6579,6 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, + * otherwise a ZSTD error. +@@ -5807,15 +6737,12 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, const void* src, size_t srcSize) { size_t cSize = 0; @@ -38735,31 +42075,43 @@ index 16bb995bc6c4..885167f7e47b 100644 - size_t blockSize; - size_t compressedSeqsSize; size_t remaining = srcSize; - ZSTD_sequencePosition seqPos = {0, 0, 0}; +- ZSTD_sequencePosition seqPos = {0, 0, 0}; ++ ZSTD_SequencePosition seqPos = {0, 0, 0}; -@@ -5829,22 +6598,29 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, +- BYTE const* ip = (BYTE const*)src; ++ const BYTE* ip = (BYTE const*)src; + BYTE* op = (BYTE*)dst; +- ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); ++ ZSTD_SequenceCopier_f const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); + + DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize); + /* Special case: empty frame */ +@@ -5829,22 +6756,29 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, } while (remaining) { + size_t compressedSeqsSize; size_t cBlockSize; - size_t additionalByteAdjustment; +- size_t additionalByteAdjustment; - lastBlock = remaining <= cctx->blockSize; - blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize; + size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters, -+ cctx->blockSize, remaining, ++ cctx->blockSizeMax, remaining, + inSeqs, inSeqsSize, seqPos); + U32 const lastBlock = (blockSize == remaining); + FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size"); + assert(blockSize <= remaining); ZSTD_resetSeqStore(&cctx->seqStore); - DEBUGLOG(4, "Working on new block. Blocksize: %zu", blockSize); -+ DEBUGLOG(5, "Working on new block. Blocksize: %zu (total:%zu)", blockSize, (ip - (const BYTE*)src) + blockSize); - additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize); -+ additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize, cctx->appliedParams.searchForExternalRepcodes); - FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy"); - blockSize -= additionalByteAdjustment; +- FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy"); +- blockSize -= additionalByteAdjustment; ++ blockSize = sequenceCopier(cctx, ++ &seqPos, inSeqs, inSeqsSize, ++ ip, blockSize, ++ cctx->appliedParams.searchForExternalRepcodes); ++ FORWARD_IF_ERROR(blockSize, "Bad sequence copy"); /* If blocks are too small, emit as a nocompress block */ - if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { @@ -38769,11 +42121,11 @@ index 16bb995bc6c4..885167f7e47b 100644 cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed"); - DEBUGLOG(4, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize); -+ DEBUGLOG(5, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize); ++ DEBUGLOG(5, "Block too small (%zu): data remains uncompressed: cSize=%zu", blockSize, cBlockSize); cSize += cBlockSize; ip += blockSize; op += cBlockSize; -@@ -5853,6 +6629,7 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, +@@ -5853,35 +6787,36 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, continue; } @@ -38781,8 +42133,10 @@ index 16bb995bc6c4..885167f7e47b 100644 compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore, &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, &cctx->appliedParams, -@@ -5861,11 +6638,11 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, - cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, + op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, + blockSize, +- cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, ++ cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */, cctx->bmi2); FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); - DEBUGLOG(4, "Compressed sequences size: %zu", compressedSeqsSize); @@ -38791,11 +42145,18 @@ index 16bb995bc6c4..885167f7e47b 100644 if (!cctx->isFirstBlock && ZSTD_maybeRLE(&cctx->seqStore) && - ZSTD_isRLE((BYTE const*)src, srcSize)) { +- /* We don't want to emit our first block as a RLE even if it qualifies because +- * doing so will cause the decoder (cli only) to throw a "should consume all input error." +- * This is only an issue for zstd <= v1.4.3 +- */ + ZSTD_isRLE(ip, blockSize)) { - /* We don't want to emit our first block as a RLE even if it qualifies because - * doing so will cause the decoder (cli only) to throw a "should consume all input error." - * This is only an issue for zstd <= v1.4.3 -@@ -5876,12 +6653,12 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, ++ /* Note: don't emit the first block as RLE even if it qualifies because ++ * doing so will cause the decoder (cli <= v1.4.3 only) to throw an (invalid) error ++ * "should consume all input error." ++ */ + compressedSeqsSize = 1; + } + if (compressedSeqsSize == 0) { /* ZSTD_noCompressBlock writes the block header as well */ cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); @@ -38812,7 +42173,7 @@ index 16bb995bc6c4..885167f7e47b 100644 } else { U32 cBlockHeader; /* Error checking and repcodes update */ -@@ -5893,11 +6670,10 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, +@@ -5893,11 +6828,10 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); MEM_writeLE24(op, cBlockHeader); cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; @@ -38825,7 +42186,7 @@ index 16bb995bc6c4..885167f7e47b 100644 if (lastBlock) { break; -@@ -5908,12 +6684,15 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, +@@ -5908,41 +6842,50 @@ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, dstCapacity -= cBlockSize; cctx->isFirstBlock = 0; } @@ -38842,20 +42203,582 @@ index 16bb995bc6c4..885167f7e47b 100644 const ZSTD_Sequence* inSeqs, size_t inSeqsSize, const void* src, size_t srcSize) { -@@ -5923,7 +6702,7 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci - size_t frameHeaderSize = 0; + BYTE* op = (BYTE*)dst; + size_t cSize = 0; +- size_t compressedBlocksSize = 0; +- size_t frameHeaderSize = 0; /* Transparent initialization stage, same as compressStream2() */ - DEBUGLOG(3, "ZSTD_compressSequences()"); -+ DEBUGLOG(4, "ZSTD_compressSequences (dstCapacity=%zu)", dstCapacity); ++ DEBUGLOG(4, "ZSTD_compressSequences (nbSeqs=%zu,dstCapacity=%zu)", inSeqsSize, dstCapacity); assert(cctx != NULL); FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed"); ++ /* Begin writing output, starting with frame header */ -@@ -5951,26 +6730,34 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci +- frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID); +- op += frameHeaderSize; +- dstCapacity -= frameHeaderSize; +- cSize += frameHeaderSize; ++ { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, ++ &cctx->appliedParams, srcSize, cctx->dictID); ++ op += frameHeaderSize; ++ assert(frameHeaderSize <= dstCapacity); ++ dstCapacity -= frameHeaderSize; ++ cSize += frameHeaderSize; ++ } + if (cctx->appliedParams.fParams.checksumFlag && srcSize) { + xxh64_update(&cctx->xxhState, src, srcSize); + } +- /* cSize includes block header size and compressed sequences size */ +- compressedBlocksSize = ZSTD_compressSequences_internal(cctx, ++ ++ /* Now generate compressed blocks */ ++ { size_t const cBlocksSize = ZSTD_compressSequences_internal(cctx, + op, dstCapacity, + inSeqs, inSeqsSize, + src, srcSize); +- FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!"); +- cSize += compressedBlocksSize; +- dstCapacity -= compressedBlocksSize; ++ FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!"); ++ cSize += cBlocksSize; ++ assert(cBlocksSize <= dstCapacity); ++ dstCapacity -= cBlocksSize; ++ } + ++ /* Complete with frame checksum, if needed */ + if (cctx->appliedParams.fParams.checksumFlag) { + U32 const checksum = (U32) xxh64_digest(&cctx->xxhState); + RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); +@@ -5951,26 +6894,557 @@ size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapaci cSize += 4; } - DEBUGLOG(3, "Final compressed size: %zu", cSize); ++ DEBUGLOG(4, "Final compressed size: %zu", cSize); ++ return cSize; ++} ++ ++ ++#if defined(__AVX2__) ++ ++#include /* AVX2 intrinsics */ ++ ++/* ++ * Convert 2 sequences per iteration, using AVX2 intrinsics: ++ * - offset -> offBase = offset + 2 ++ * - litLength -> (U16) litLength ++ * - matchLength -> (U16)(matchLength - 3) ++ * - rep is ignored ++ * Store only 8 bytes per SeqDef (offBase[4], litLength[2], mlBase[2]). ++ * ++ * At the end, instead of extracting two __m128i, ++ * we use _mm256_permute4x64_epi64(..., 0xE8) to move lane2 into lane1, ++ * then store the lower 16 bytes in one go. ++ * ++ * @returns 0 on succes, with no long length detected ++ * @returns > 0 if there is one long length (> 65535), ++ * indicating the position, and type. ++ */ ++static size_t convertSequences_noRepcodes( ++ SeqDef* dstSeqs, ++ const ZSTD_Sequence* inSeqs, ++ size_t nbSequences) ++{ ++ /* ++ * addition: ++ * For each 128-bit half: (offset+2, litLength+0, matchLength-3, rep+0) ++ */ ++ const __m256i addition = _mm256_setr_epi32( ++ ZSTD_REP_NUM, 0, -MINMATCH, 0, /* for sequence i */ ++ ZSTD_REP_NUM, 0, -MINMATCH, 0 /* for sequence i+1 */ ++ ); ++ ++ /* limit: check if there is a long length */ ++ const __m256i limit = _mm256_set1_epi32(65535); ++ ++ /* ++ * shuffle mask for byte-level rearrangement in each 128-bit half: ++ * ++ * Input layout (after addition) per 128-bit half: ++ * [ offset+2 (4 bytes) | litLength (4 bytes) | matchLength (4 bytes) | rep (4 bytes) ] ++ * We only need: ++ * offBase (4 bytes) = offset+2 ++ * litLength (2 bytes) = low 2 bytes of litLength ++ * mlBase (2 bytes) = low 2 bytes of (matchLength) ++ * => Bytes [0..3, 4..5, 8..9], zero the rest. ++ */ ++ const __m256i mask = _mm256_setr_epi8( ++ /* For the lower 128 bits => sequence i */ ++ 0, 1, 2, 3, /* offset+2 */ ++ 4, 5, /* litLength (16 bits) */ ++ 8, 9, /* matchLength (16 bits) */ ++ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, ++ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, ++ ++ /* For the upper 128 bits => sequence i+1 */ ++ 16,17,18,19, /* offset+2 */ ++ 20,21, /* litLength */ ++ 24,25, /* matchLength */ ++ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, ++ (BYTE)0x80, (BYTE)0x80, (BYTE)0x80, (BYTE)0x80 ++ ); ++ ++ /* ++ * Next, we'll use _mm256_permute4x64_epi64(vshf, 0xE8). ++ * Explanation of 0xE8 = 11101000b => [lane0, lane2, lane2, lane3]. ++ * So the lower 128 bits become [lane0, lane2] => combining seq0 and seq1. ++ */ ++#define PERM_LANE_0X_E8 0xE8 /* [0,2,2,3] in lane indices */ ++ ++ size_t longLen = 0, i = 0; ++ ++ /* AVX permutation depends on the specific definition of target structures */ ++ ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); ++ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, offset) == 0); ++ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, litLength) == 4); ++ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); ++ ZSTD_STATIC_ASSERT(sizeof(SeqDef) == 8); ++ ZSTD_STATIC_ASSERT(offsetof(SeqDef, offBase) == 0); ++ ZSTD_STATIC_ASSERT(offsetof(SeqDef, litLength) == 4); ++ ZSTD_STATIC_ASSERT(offsetof(SeqDef, mlBase) == 6); ++ ++ /* Process 2 sequences per loop iteration */ ++ for (; i + 1 < nbSequences; i += 2) { ++ /* Load 2 ZSTD_Sequence (32 bytes) */ ++ __m256i vin = _mm256_loadu_si256((const __m256i*)(const void*)&inSeqs[i]); ++ ++ /* Add {2, 0, -3, 0} in each 128-bit half */ ++ __m256i vadd = _mm256_add_epi32(vin, addition); ++ ++ /* Check for long length */ ++ __m256i ll_cmp = _mm256_cmpgt_epi32(vadd, limit); /* 0xFFFFFFFF for element > 65535 */ ++ int ll_res = _mm256_movemask_epi8(ll_cmp); ++ ++ /* Shuffle bytes so each half gives us the 8 bytes we need */ ++ __m256i vshf = _mm256_shuffle_epi8(vadd, mask); ++ /* ++ * Now: ++ * Lane0 = seq0's 8 bytes ++ * Lane1 = 0 ++ * Lane2 = seq1's 8 bytes ++ * Lane3 = 0 ++ */ ++ ++ /* Permute 64-bit lanes => move Lane2 down into Lane1. */ ++ __m256i vperm = _mm256_permute4x64_epi64(vshf, PERM_LANE_0X_E8); ++ /* ++ * Now the lower 16 bytes (Lane0+Lane1) = [seq0, seq1]. ++ * The upper 16 bytes are [Lane2, Lane3] = [seq1, 0], but we won't use them. ++ */ ++ ++ /* Store only the lower 16 bytes => 2 SeqDef (8 bytes each) */ ++ _mm_storeu_si128((__m128i *)(void*)&dstSeqs[i], _mm256_castsi256_si128(vperm)); ++ /* ++ * This writes out 16 bytes total: ++ * - offset 0..7 => seq0 (offBase, litLength, mlBase) ++ * - offset 8..15 => seq1 (offBase, litLength, mlBase) ++ */ ++ ++ /* check (unlikely) long lengths > 65535 ++ * indices for lengths correspond to bits [4..7], [8..11], [20..23], [24..27] ++ * => combined mask = 0x0FF00FF0 ++ */ ++ if (UNLIKELY((ll_res & 0x0FF00FF0) != 0)) { ++ /* long length detected: let's figure out which one*/ ++ if (inSeqs[i].matchLength > 65535+MINMATCH) { ++ assert(longLen == 0); ++ longLen = i + 1; ++ } ++ if (inSeqs[i].litLength > 65535) { ++ assert(longLen == 0); ++ longLen = i + nbSequences + 1; ++ } ++ if (inSeqs[i+1].matchLength > 65535+MINMATCH) { ++ assert(longLen == 0); ++ longLen = i + 1 + 1; ++ } ++ if (inSeqs[i+1].litLength > 65535) { ++ assert(longLen == 0); ++ longLen = i + 1 + nbSequences + 1; ++ } ++ } ++ } ++ ++ /* Handle leftover if @nbSequences is odd */ ++ if (i < nbSequences) { ++ /* process last sequence */ ++ assert(i == nbSequences - 1); ++ dstSeqs[i].offBase = OFFSET_TO_OFFBASE(inSeqs[i].offset); ++ dstSeqs[i].litLength = (U16)inSeqs[i].litLength; ++ dstSeqs[i].mlBase = (U16)(inSeqs[i].matchLength - MINMATCH); ++ /* check (unlikely) long lengths > 65535 */ ++ if (UNLIKELY(inSeqs[i].matchLength > 65535+MINMATCH)) { ++ assert(longLen == 0); ++ longLen = i + 1; ++ } ++ if (UNLIKELY(inSeqs[i].litLength > 65535)) { ++ assert(longLen == 0); ++ longLen = i + nbSequences + 1; ++ } ++ } ++ ++ return longLen; ++} ++ ++/* the vector implementation could also be ported to SSSE3, ++ * but since this implementation is targeting modern systems (>= Sapphire Rapid), ++ * it's not useful to develop and maintain code for older pre-AVX2 platforms */ ++ ++#else /* no AVX2 */ ++ ++static size_t convertSequences_noRepcodes( ++ SeqDef* dstSeqs, ++ const ZSTD_Sequence* inSeqs, ++ size_t nbSequences) ++{ ++ size_t longLen = 0; ++ size_t n; ++ for (n=0; n 65535 */ ++ if (UNLIKELY(inSeqs[n].matchLength > 65535+MINMATCH)) { ++ assert(longLen == 0); ++ longLen = n + 1; ++ } ++ if (UNLIKELY(inSeqs[n].litLength > 65535)) { ++ assert(longLen == 0); ++ longLen = n + nbSequences + 1; ++ } ++ } ++ return longLen; ++} ++ ++#endif ++ ++/* ++ * Precondition: Sequences must end on an explicit Block Delimiter ++ * @return: 0 on success, or an error code. ++ * Note: Sequence validation functionality has been disabled (removed). ++ * This is helpful to generate a lean main pipeline, improving performance. ++ * It may be re-inserted later. ++ */ ++size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx, ++ const ZSTD_Sequence* const inSeqs, size_t nbSequences, ++ int repcodeResolution) ++{ ++ Repcodes_t updatedRepcodes; ++ size_t seqNb = 0; ++ ++ DEBUGLOG(5, "ZSTD_convertBlockSequences (nbSequences = %zu)", nbSequences); ++ ++ RETURN_ERROR_IF(nbSequences >= cctx->seqStore.maxNbSeq, externalSequences_invalid, ++ "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); ++ ++ ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(Repcodes_t)); ++ ++ /* check end condition */ ++ assert(nbSequences >= 1); ++ assert(inSeqs[nbSequences-1].matchLength == 0); ++ assert(inSeqs[nbSequences-1].offset == 0); ++ ++ /* Convert Sequences from public format to internal format */ ++ if (!repcodeResolution) { ++ size_t const longl = convertSequences_noRepcodes(cctx->seqStore.sequencesStart, inSeqs, nbSequences-1); ++ cctx->seqStore.sequences = cctx->seqStore.sequencesStart + nbSequences-1; ++ if (longl) { ++ DEBUGLOG(5, "long length"); ++ assert(cctx->seqStore.longLengthType == ZSTD_llt_none); ++ if (longl <= nbSequences-1) { ++ DEBUGLOG(5, "long match length detected at pos %zu", longl-1); ++ cctx->seqStore.longLengthType = ZSTD_llt_matchLength; ++ cctx->seqStore.longLengthPos = (U32)(longl-1); ++ } else { ++ DEBUGLOG(5, "long literals length detected at pos %zu", longl-nbSequences); ++ assert(longl <= 2* (nbSequences-1)); ++ cctx->seqStore.longLengthType = ZSTD_llt_literalLength; ++ cctx->seqStore.longLengthPos = (U32)(longl-(nbSequences-1)-1); ++ } ++ } ++ } else { ++ for (seqNb = 0; seqNb < nbSequences - 1 ; seqNb++) { ++ U32 const litLength = inSeqs[seqNb].litLength; ++ U32 const matchLength = inSeqs[seqNb].matchLength; ++ U32 const ll0 = (litLength == 0); ++ U32 const offBase = ZSTD_finalizeOffBase(inSeqs[seqNb].offset, updatedRepcodes.rep, ll0); ++ ++ DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength); ++ ZSTD_storeSeqOnly(&cctx->seqStore, litLength, offBase, matchLength); ++ ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0); ++ } ++ } ++ ++ /* If we skipped repcode search while parsing, we need to update repcodes now */ ++ if (!repcodeResolution && nbSequences > 1) { ++ U32* const rep = updatedRepcodes.rep; ++ ++ if (nbSequences >= 4) { ++ U32 lastSeqIdx = (U32)nbSequences - 2; /* index of last full sequence */ ++ rep[2] = inSeqs[lastSeqIdx - 2].offset; ++ rep[1] = inSeqs[lastSeqIdx - 1].offset; ++ rep[0] = inSeqs[lastSeqIdx].offset; ++ } else if (nbSequences == 3) { ++ rep[2] = rep[0]; ++ rep[1] = inSeqs[0].offset; ++ rep[0] = inSeqs[1].offset; ++ } else { ++ assert(nbSequences == 2); ++ rep[2] = rep[1]; ++ rep[1] = rep[0]; ++ rep[0] = inSeqs[0].offset; ++ } ++ } ++ ++ ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(Repcodes_t)); ++ ++ return 0; ++} ++ ++#if defined(ZSTD_ARCH_X86_AVX2) ++ ++BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) ++{ ++ size_t i; ++ __m256i const zeroVec = _mm256_setzero_si256(); ++ __m256i sumVec = zeroVec; /* accumulates match+lit in 32-bit lanes */ ++ ZSTD_ALIGNED(32) U32 tmp[8]; /* temporary buffer for reduction */ ++ size_t mSum = 0, lSum = 0; ++ ZSTD_STATIC_ASSERT(sizeof(ZSTD_Sequence) == 16); ++ ++ /* Process 2 structs (32 bytes) at a time */ ++ for (i = 0; i + 2 <= nbSeqs; i += 2) { ++ /* Load two consecutive ZSTD_Sequence (8×4 = 32 bytes) */ ++ __m256i data = _mm256_loadu_si256((const __m256i*)(const void*)&seqs[i]); ++ /* check end of block signal */ ++ __m256i cmp = _mm256_cmpeq_epi32(data, zeroVec); ++ int cmp_res = _mm256_movemask_epi8(cmp); ++ /* indices for match lengths correspond to bits [8..11], [24..27] ++ * => combined mask = 0x0F000F00 */ ++ ZSTD_STATIC_ASSERT(offsetof(ZSTD_Sequence, matchLength) == 8); ++ if (cmp_res & 0x0F000F00) break; ++ /* Accumulate in sumVec */ ++ sumVec = _mm256_add_epi32(sumVec, data); ++ } ++ ++ /* Horizontal reduction */ ++ _mm256_store_si256((__m256i*)tmp, sumVec); ++ lSum = tmp[1] + tmp[5]; ++ mSum = tmp[2] + tmp[6]; ++ ++ /* Handle the leftover */ ++ for (; i < nbSeqs; i++) { ++ lSum += seqs[i].litLength; ++ mSum += seqs[i].matchLength; ++ if (seqs[i].matchLength == 0) break; /* end of block */ ++ } ++ ++ if (i==nbSeqs) { ++ /* reaching end of sequences: end of block signal was not present */ ++ BlockSummary bs; ++ bs.nbSequences = ERROR(externalSequences_invalid); ++ return bs; ++ } ++ { BlockSummary bs; ++ bs.nbSequences = i+1; ++ bs.blockSize = lSum + mSum; ++ bs.litSize = lSum; ++ return bs; ++ } ++} ++ ++#else ++ ++BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs) ++{ ++ size_t totalMatchSize = 0; ++ size_t litSize = 0; ++ size_t n; ++ assert(seqs); ++ for (n=0; nappliedParams.searchForExternalRepcodes == ZSTD_ps_enable); ++ assert(cctx->appliedParams.searchForExternalRepcodes != ZSTD_ps_auto); ++ ++ DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals_internal: nbSeqs=%zu, litSize=%zu", nbSequences, litSize); ++ RETURN_ERROR_IF(nbSequences == 0, externalSequences_invalid, "Requires at least 1 end-of-block"); ++ ++ /* Special case: empty frame */ ++ if ((nbSequences == 1) && (inSeqs[0].litLength == 0)) { ++ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1); ++ RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "No room for empty frame block header"); ++ MEM_writeLE24(op, cBlockHeader24); ++ op += ZSTD_blockHeaderSize; ++ dstCapacity -= ZSTD_blockHeaderSize; ++ cSize += ZSTD_blockHeaderSize; ++ } ++ ++ while (nbSequences) { ++ size_t compressedSeqsSize, cBlockSize, conversionStatus; ++ BlockSummary const block = ZSTD_get1BlockSummary(inSeqs, nbSequences); ++ U32 const lastBlock = (block.nbSequences == nbSequences); ++ FORWARD_IF_ERROR(block.nbSequences, "Error while trying to determine nb of sequences for a block"); ++ assert(block.nbSequences <= nbSequences); ++ RETURN_ERROR_IF(block.litSize > litSize, externalSequences_invalid, "discrepancy: Sequences require more literals than present in buffer"); ++ ZSTD_resetSeqStore(&cctx->seqStore); ++ ++ conversionStatus = ZSTD_convertBlockSequences(cctx, ++ inSeqs, block.nbSequences, ++ repcodeResolution); ++ FORWARD_IF_ERROR(conversionStatus, "Bad sequence conversion"); ++ inSeqs += block.nbSequences; ++ nbSequences -= block.nbSequences; ++ remaining -= block.blockSize; ++ ++ /* Note: when blockSize is very small, other variant send it uncompressed. ++ * Here, we still send the sequences, because we don't have the original source to send it uncompressed. ++ * One could imagine in theory reproducing the source from the sequences, ++ * but that's complex and costly memory intensive, and goes against the objectives of this variant. */ ++ ++ RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block"); ++ ++ compressedSeqsSize = ZSTD_entropyCompressSeqStore_internal( ++ op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize, ++ literals, block.litSize, ++ &cctx->seqStore, ++ &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy, ++ &cctx->appliedParams, ++ cctx->tmpWorkspace, cctx->tmpWkspSize /* statically allocated in resetCCtx */, ++ cctx->bmi2); ++ FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed"); ++ /* note: the spec forbids for any compressed block to be larger than maximum block size */ ++ if (compressedSeqsSize > cctx->blockSizeMax) compressedSeqsSize = 0; ++ DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize); ++ litSize -= block.litSize; ++ literals = (const char*)literals + block.litSize; ++ ++ /* Note: difficult to check source for RLE block when only Literals are provided, ++ * but it could be considered from analyzing the sequence directly */ ++ ++ if (compressedSeqsSize == 0) { ++ /* Sending uncompressed blocks is out of reach, because the source is not provided. ++ * In theory, one could use the sequences to regenerate the source, like a decompressor, ++ * but it's complex, and memory hungry, killing the purpose of this variant. ++ * Current outcome: generate an error code. ++ */ ++ RETURN_ERROR(cannotProduce_uncompressedBlock, "ZSTD_compressSequencesAndLiterals cannot generate an uncompressed block"); ++ } else { ++ U32 cBlockHeader; ++ assert(compressedSeqsSize > 1); /* no RLE */ ++ /* Error checking and repcodes update */ ++ ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState); ++ if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) ++ cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; ++ ++ /* Write block header into beginning of block*/ ++ cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3); ++ MEM_writeLE24(op, cBlockHeader); ++ cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize; ++ DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize); ++ } ++ ++ cSize += cBlockSize; ++ op += cBlockSize; ++ dstCapacity -= cBlockSize; ++ cctx->isFirstBlock = 0; ++ DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity); ++ ++ if (lastBlock) { ++ assert(nbSequences == 0); ++ break; ++ } ++ } ++ ++ RETURN_ERROR_IF(litSize != 0, externalSequences_invalid, "literals must be entirely and exactly consumed"); ++ RETURN_ERROR_IF(remaining != 0, externalSequences_invalid, "Sequences must represent a total of exactly srcSize=%zu", srcSize); ++ DEBUGLOG(4, "cSize final total: %zu", cSize); ++ return cSize; ++} ++ ++size_t ++ZSTD_compressSequencesAndLiterals(ZSTD_CCtx* cctx, ++ void* dst, size_t dstCapacity, ++ const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ++ const void* literals, size_t litSize, size_t litCapacity, ++ size_t decompressedSize) ++{ ++ BYTE* op = (BYTE*)dst; ++ size_t cSize = 0; ++ ++ /* Transparent initialization stage, same as compressStream2() */ ++ DEBUGLOG(4, "ZSTD_compressSequencesAndLiterals (dstCapacity=%zu)", dstCapacity); ++ assert(cctx != NULL); ++ if (litCapacity < litSize) { ++ RETURN_ERROR(workSpace_tooSmall, "literals buffer is not large enough: must be at least 8 bytes larger than litSize (risk of read out-of-bound)"); ++ } ++ FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, decompressedSize), "CCtx initialization failed"); ++ ++ if (cctx->appliedParams.blockDelimiters == ZSTD_sf_noBlockDelimiters) { ++ RETURN_ERROR(frameParameter_unsupported, "This mode is only compatible with explicit delimiters"); ++ } ++ if (cctx->appliedParams.validateSequences) { ++ RETURN_ERROR(parameter_unsupported, "This mode is not compatible with Sequence validation"); ++ } ++ if (cctx->appliedParams.fParams.checksumFlag) { ++ RETURN_ERROR(frameParameter_unsupported, "this mode is not compatible with frame checksum"); ++ } ++ ++ /* Begin writing output, starting with frame header */ ++ { size_t const frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, ++ &cctx->appliedParams, decompressedSize, cctx->dictID); ++ op += frameHeaderSize; ++ assert(frameHeaderSize <= dstCapacity); ++ dstCapacity -= frameHeaderSize; ++ cSize += frameHeaderSize; ++ } ++ ++ /* Now generate compressed blocks */ ++ { size_t const cBlocksSize = ZSTD_compressSequencesAndLiterals_internal(cctx, ++ op, dstCapacity, ++ inSeqs, inSeqsSize, ++ literals, litSize, decompressedSize); ++ FORWARD_IF_ERROR(cBlocksSize, "Compressing blocks failed!"); ++ cSize += cBlocksSize; ++ assert(cBlocksSize <= dstCapacity); ++ dstCapacity -= cBlocksSize; ++ } ++ + DEBUGLOG(4, "Final compressed size: %zu", cSize); return cSize; } @@ -38879,7 +42802,7 @@ index 16bb995bc6c4..885167f7e47b 100644 return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush); } - +- size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) { - ZSTD_inBuffer input = { NULL, 0, 0 }; @@ -38890,7 +42813,27 @@ index 16bb995bc6c4..885167f7e47b 100644 if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */ /* single thread mode : attempt to calculate remaining to flush more precisely */ { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; -@@ -6092,7 +6879,7 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, +@@ -6046,7 +7520,7 @@ static void ZSTD_dedicatedDictSearch_revertCParams( + } + } + +-static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) ++static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) + { + switch (mode) { + case ZSTD_cpm_unknown: +@@ -6070,8 +7544,8 @@ static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMo + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. + * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. + * Use dictSize == 0 for unknown or unused. +- * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */ +-static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) ++ * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_CParamMode_e`. */ ++static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) + { + U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode); + U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); +@@ -6092,7 +7566,7 @@ static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, cp.targetLength = (unsigned)(-clampedCompressionLevel); } /* refine parameters based on srcSize & dictSize */ @@ -38899,7 +42842,24 @@ index 16bb995bc6c4..885167f7e47b 100644 } } -@@ -6127,3 +6914,29 @@ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeH +@@ -6109,7 +7583,9 @@ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long l + * same idea as ZSTD_getCParams() + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). + * Fields of `ZSTD_frameParameters` are set to default values */ +-static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) { ++static ZSTD_parameters ++ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode) ++{ + ZSTD_parameters params; + ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode); + DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); +@@ -6123,7 +7599,34 @@ static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned lo + * same idea as ZSTD_getCParams() + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). + * Fields of `ZSTD_frameParameters` are set to default values */ +-ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { ++ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) ++{ if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown); } @@ -38907,8 +42867,8 @@ index 16bb995bc6c4..885167f7e47b 100644 +void ZSTD_registerSequenceProducer( + ZSTD_CCtx* zc, + void* extSeqProdState, -+ ZSTD_sequenceProducer_F extSeqProdFunc -+) { ++ ZSTD_sequenceProducer_F extSeqProdFunc) ++{ + assert(zc != NULL); + ZSTD_CCtxParams_registerSequenceProducer( + &zc->requestedParams, extSeqProdState, extSeqProdFunc @@ -38918,8 +42878,8 @@ index 16bb995bc6c4..885167f7e47b 100644 +void ZSTD_CCtxParams_registerSequenceProducer( + ZSTD_CCtx_params* params, + void* extSeqProdState, -+ ZSTD_sequenceProducer_F extSeqProdFunc -+) { ++ ZSTD_sequenceProducer_F extSeqProdFunc) ++{ + assert(params != NULL); + if (extSeqProdFunc != NULL) { + params->extSeqProdFunc = extSeqProdFunc; @@ -38930,7 +42890,7 @@ index 16bb995bc6c4..885167f7e47b 100644 + } +} diff --git a/lib/zstd/compress/zstd_compress_internal.h b/lib/zstd/compress/zstd_compress_internal.h -index 71697a11ae30..53cb582a8d2b 100644 +index 71697a11ae30..b10978385876 100644 --- a/lib/zstd/compress/zstd_compress_internal.h +++ b/lib/zstd/compress/zstd_compress_internal.h @@ -1,5 +1,6 @@ @@ -38941,14 +42901,16 @@ index 71697a11ae30..53cb582a8d2b 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the -@@ -20,6 +21,7 @@ +@@ -20,7 +21,8 @@ ***************************************/ #include "../common/zstd_internal.h" #include "zstd_cwksp.h" +- +#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */ - ++#include "zstd_preSplit.h" /* ZSTD_SLIPBLOCK_WORKSPACESIZE */ /*-************************************* + * Constants @@ -32,7 +34,7 @@ It's not a big deal though : candidate will just be sorted again. Additionally, candidate position 1 will be lost. @@ -38958,7 +42920,100 @@ index 71697a11ae30..53cb582a8d2b 100644 This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */ -@@ -111,12 +113,13 @@ typedef struct { +@@ -75,6 +77,70 @@ typedef struct { + ZSTD_fseCTables_t fse; + } ZSTD_entropyCTables_t; + ++/* ********************************************* ++* Sequences * ++***********************************************/ ++typedef struct SeqDef_s { ++ U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ ++ U16 litLength; ++ U16 mlBase; /* mlBase == matchLength - MINMATCH */ ++} SeqDef; ++ ++/* Controls whether seqStore has a single "long" litLength or matchLength. See SeqStore_t. */ ++typedef enum { ++ ZSTD_llt_none = 0, /* no longLengthType */ ++ ZSTD_llt_literalLength = 1, /* represents a long literal */ ++ ZSTD_llt_matchLength = 2 /* represents a long match */ ++} ZSTD_longLengthType_e; ++ ++typedef struct { ++ SeqDef* sequencesStart; ++ SeqDef* sequences; /* ptr to end of sequences */ ++ BYTE* litStart; ++ BYTE* lit; /* ptr to end of literals */ ++ BYTE* llCode; ++ BYTE* mlCode; ++ BYTE* ofCode; ++ size_t maxNbSeq; ++ size_t maxNbLit; ++ ++ /* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength ++ * in the seqStore that has a value larger than U16 (if it exists). To do so, we increment ++ * the existing value of the litLength or matchLength by 0x10000. ++ */ ++ ZSTD_longLengthType_e longLengthType; ++ U32 longLengthPos; /* Index of the sequence to apply long length modification to */ ++} SeqStore_t; ++ ++typedef struct { ++ U32 litLength; ++ U32 matchLength; ++} ZSTD_SequenceLength; ++ ++/* ++ * Returns the ZSTD_SequenceLength for the given sequences. It handles the decoding of long sequences ++ * indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength. ++ */ ++MEM_STATIC ZSTD_SequenceLength ZSTD_getSequenceLength(SeqStore_t const* seqStore, SeqDef const* seq) ++{ ++ ZSTD_SequenceLength seqLen; ++ seqLen.litLength = seq->litLength; ++ seqLen.matchLength = seq->mlBase + MINMATCH; ++ if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { ++ if (seqStore->longLengthType == ZSTD_llt_literalLength) { ++ seqLen.litLength += 0x10000; ++ } ++ if (seqStore->longLengthType == ZSTD_llt_matchLength) { ++ seqLen.matchLength += 0x10000; ++ } ++ } ++ return seqLen; ++} ++ ++const SeqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ ++int ZSTD_seqToCodes(const SeqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ ++ ++ + /* ********************************************* + * Entropy buffer statistics structs and funcs * + ***********************************************/ +@@ -84,7 +150,7 @@ typedef struct { + * hufDesSize refers to the size of huffman tree description in bytes. + * This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */ + typedef struct { +- symbolEncodingType_e hType; ++ SymbolEncodingType_e hType; + BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE]; + size_t hufDesSize; + } ZSTD_hufCTablesMetadata_t; +@@ -95,9 +161,9 @@ typedef struct { + * fseTablesSize refers to the size of fse tables in bytes. + * This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */ + typedef struct { +- symbolEncodingType_e llType; +- symbolEncodingType_e ofType; +- symbolEncodingType_e mlType; ++ SymbolEncodingType_e llType; ++ SymbolEncodingType_e ofType; ++ SymbolEncodingType_e mlType; + BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE]; + size_t fseTablesSize; + size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */ +@@ -111,12 +177,13 @@ typedef struct { /* ZSTD_buildBlockEntropyStats() : * Builds entropy for the block. * @return : 0 on success or error code */ @@ -38969,7 +43024,7 @@ index 71697a11ae30..53cb582a8d2b 100644 - ZSTD_entropyCTablesMetadata_t* entropyMetadata, - void* workspace, size_t wkspSize); +size_t ZSTD_buildBlockEntropyStats( -+ const seqStore_t* seqStorePtr, ++ const SeqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, @@ -38978,17 +43033,15 @@ index 71697a11ae30..53cb582a8d2b 100644 /* ******************************* * Compression internals structs * -@@ -142,26 +145,33 @@ typedef struct { +@@ -140,28 +207,29 @@ typedef struct { + stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */ + size_t size; /* The number of sequences. <= capacity. */ size_t capacity; /* The capacity starting from `seq` pointer */ - } rawSeqStore_t; +-} rawSeqStore_t; ++} RawSeqStore_t; -+typedef struct { -+ U32 idx; /* Index in array of ZSTD_Sequence */ -+ U32 posInSequence; /* Position within sequence at idx */ -+ size_t posInSrc; /* Number of bytes given by sequences provided so far */ -+} ZSTD_sequencePosition; -+ - UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0}; +-UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0}; ++UNUSED_ATTR static const RawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0}; typedef struct { - int price; @@ -39019,7 +43072,30 @@ index 71697a11ae30..53cb582a8d2b 100644 U32 litSum; /* nb of literals */ U32 litLengthSum; /* nb of litLength codes */ -@@ -212,8 +222,10 @@ struct ZSTD_matchState_t { +@@ -173,7 +241,7 @@ typedef struct { + U32 offCodeSumBasePrice; /* to compare to log2(offreq) */ + ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */ + const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */ +- ZSTD_paramSwitch_e literalCompressionMode; ++ ZSTD_ParamSwitch_e literalCompressionMode; + } optState_t; + + typedef struct { +@@ -195,11 +263,11 @@ typedef struct { + + #define ZSTD_WINDOW_START_INDEX 2 + +-typedef struct ZSTD_matchState_t ZSTD_matchState_t; ++typedef struct ZSTD_MatchState_t ZSTD_MatchState_t; + + #define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */ + +-struct ZSTD_matchState_t { ++struct ZSTD_MatchState_t { + ZSTD_window_t window; /* State for window round buffer management */ + U32 loadedDictEnd; /* index of end of dictionary, within context's referential. + * When loadedDictEnd != 0, a dictionary is in use, and still valid. +@@ -212,28 +280,42 @@ struct ZSTD_matchState_t { U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */ U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/ @@ -39031,10 +43107,20 @@ index 71697a11ae30..53cb582a8d2b 100644 U32* hashTable; U32* hashTable3; -@@ -228,6 +240,18 @@ struct ZSTD_matchState_t { - const ZSTD_matchState_t* dictMatchState; + U32* chainTable; + +- U32 forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */ ++ int forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */ + + int dedicatedDictSearch; /* Indicates whether this matchState is using the + * dedicated dictionary search structure. + */ + optState_t opt; /* optimal parser state */ +- const ZSTD_matchState_t* dictMatchState; ++ const ZSTD_MatchState_t* dictMatchState; ZSTD_compressionParameters cParams; - const rawSeqStore_t* ldmSeqStore; +- const rawSeqStore_t* ldmSeqStore; ++ const RawSeqStore_t* ldmSeqStore; + + /* Controls prefetching in some dictMatchState matchfinders. + * This behavior is controlled from the cctx ms. @@ -39050,13 +43136,69 @@ index 71697a11ae30..53cb582a8d2b 100644 }; typedef struct { -@@ -324,6 +348,25 @@ struct ZSTD_CCtx_params_s { + ZSTD_compressedBlockState_t* prevCBlock; + ZSTD_compressedBlockState_t* nextCBlock; +- ZSTD_matchState_t matchState; ++ ZSTD_MatchState_t matchState; + } ZSTD_blockState_t; + + typedef struct { +@@ -260,7 +342,7 @@ typedef struct { + } ldmState_t; + + typedef struct { +- ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ ++ ZSTD_ParamSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ + U32 hashLog; /* Log size of hashTable */ + U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */ + U32 minMatchLength; /* Minimum match length */ +@@ -291,7 +373,7 @@ struct ZSTD_CCtx_params_s { + * There is no guarantee that hint is close to actual source size */ + + ZSTD_dictAttachPref_e attachDictPref; +- ZSTD_paramSwitch_e literalCompressionMode; ++ ZSTD_ParamSwitch_e literalCompressionMode; + + /* Multithreading: used to pass parameters to mtctx */ + int nbWorkers; +@@ -310,24 +392,54 @@ struct ZSTD_CCtx_params_s { + ZSTD_bufferMode_e outBufferMode; + + /* Sequence compression API */ +- ZSTD_sequenceFormat_e blockDelimiters; ++ ZSTD_SequenceFormat_e blockDelimiters; + int validateSequences; + +- /* Block splitting */ +- ZSTD_paramSwitch_e useBlockSplitter; ++ /* Block splitting ++ * @postBlockSplitter executes split analysis after sequences are produced, ++ * it's more accurate but consumes more resources. ++ * @preBlockSplitter_level splits before knowing sequences, ++ * it's more approximative but also cheaper. ++ * Valid @preBlockSplitter_level values range from 0 to 6 (included). ++ * 0 means auto, 1 means do not split, ++ * then levels are sorted in increasing cpu budget, from 2 (fastest) to 6 (slowest). ++ * Highest @preBlockSplitter_level combines well with @postBlockSplitter. ++ */ ++ ZSTD_ParamSwitch_e postBlockSplitter; ++ int preBlockSplitter_level; ++ ++ /* Adjust the max block size*/ ++ size_t maxBlockSize; + + /* Param for deciding whether to use row-based matchfinder */ +- ZSTD_paramSwitch_e useRowMatchFinder; ++ ZSTD_ParamSwitch_e useRowMatchFinder; + + /* Always load a dictionary in ext-dict mode (not prefix mode)? */ + int deterministicRefPrefix; /* Internal use, for createCCtxParams() and freeCCtxParams() only */ ZSTD_customMem customMem; + + /* Controls prefetching in some dictMatchState matchfinders */ -+ ZSTD_paramSwitch_e prefetchCDictTables; ++ ZSTD_ParamSwitch_e prefetchCDictTables; + + /* Controls whether zstd will fall back to an internal matchfinder + * if the external matchfinder returns an error code. */ @@ -39068,15 +43210,61 @@ index 71697a11ae30..53cb582a8d2b 100644 + void* extSeqProdState; + ZSTD_sequenceProducer_F extSeqProdFunc; + -+ /* Adjust the max block size*/ -+ size_t maxBlockSize; -+ + /* Controls repcode search in external sequence parsing */ -+ ZSTD_paramSwitch_e searchForExternalRepcodes; ++ ZSTD_ParamSwitch_e searchForExternalRepcodes; }; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */ #define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2)) -@@ -404,6 +447,7 @@ struct ZSTD_CCtx_s { + #define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE) ++#define TMP_WORKSPACE_SIZE (MAX(ENTROPY_WORKSPACE_SIZE, ZSTD_SLIPBLOCK_WORKSPACESIZE)) + + /* + * Indicates whether this compression proceeds directly from user-provided +@@ -345,11 +457,11 @@ typedef enum { + */ + #define ZSTD_MAX_NB_BLOCK_SPLITS 196 + typedef struct { +- seqStore_t fullSeqStoreChunk; +- seqStore_t firstHalfSeqStore; +- seqStore_t secondHalfSeqStore; +- seqStore_t currSeqStore; +- seqStore_t nextSeqStore; ++ SeqStore_t fullSeqStoreChunk; ++ SeqStore_t firstHalfSeqStore; ++ SeqStore_t secondHalfSeqStore; ++ SeqStore_t currSeqStore; ++ SeqStore_t nextSeqStore; + + U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS]; + ZSTD_entropyCTablesMetadata_t entropyMetadata; +@@ -366,7 +478,7 @@ struct ZSTD_CCtx_s { + size_t dictContentSize; + + ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */ +- size_t blockSize; ++ size_t blockSizeMax; + unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */ + unsigned long long consumedSrcSize; + unsigned long long producedCSize; +@@ -378,13 +490,14 @@ struct ZSTD_CCtx_s { + int isFirstBlock; + int initialized; + +- seqStore_t seqStore; /* sequences storage ptrs */ ++ SeqStore_t seqStore; /* sequences storage ptrs */ + ldmState_t ldmState; /* long distance matching state */ + rawSeq* ldmSequences; /* Storage for the ldm output sequences */ + size_t maxNbLdmSequences; +- rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */ ++ RawSeqStore_t externSeqStore; /* Mutable reference to external sequences */ + ZSTD_blockState_t blockState; +- U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */ ++ void* tmpWorkspace; /* used as substitute of stack space - must be aligned for S64 type */ ++ size_t tmpWkspSize; + + /* Whether we are streaming or not */ + ZSTD_buffered_policy_e bufferedPolicy; +@@ -404,6 +517,7 @@ struct ZSTD_CCtx_s { /* Stable in/out buffer verification */ ZSTD_inBuffer expectedInBuffer; @@ -39084,7 +43272,7 @@ index 71697a11ae30..53cb582a8d2b 100644 size_t expectedOutBufferSize; /* Dictionary */ -@@ -417,9 +461,14 @@ struct ZSTD_CCtx_s { +@@ -417,9 +531,14 @@ struct ZSTD_CCtx_s { /* Workspace for block splitter */ ZSTD_blockSplitCtx blockSplitCtx; @@ -39099,7 +43287,7 @@ index 71697a11ae30..53cb582a8d2b 100644 typedef enum { ZSTD_noDict = 0, -@@ -441,7 +490,7 @@ typedef enum { +@@ -441,17 +560,17 @@ typedef enum { * In this mode we take both the source size and the dictionary size * into account when selecting and adjusting the parameters. */ @@ -39108,7 +43296,43 @@ index 71697a11ae30..53cb582a8d2b 100644 * We don't know what these parameters are for. We default to the legacy * behavior of taking both the source size and the dict size into account * when selecting and adjusting parameters. -@@ -500,9 +549,11 @@ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) + */ +-} ZSTD_cParamMode_e; ++} ZSTD_CParamMode_e; + +-typedef size_t (*ZSTD_blockCompressor) ( +- ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++typedef size_t (*ZSTD_BlockCompressor_f) ( ++ ZSTD_MatchState_t* bs, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +-ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode); ++ZSTD_BlockCompressor_f ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_ParamSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode); + + + MEM_STATIC U32 ZSTD_LLcode(U32 litLength) +@@ -497,12 +616,33 @@ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) + return 1; + } + ++/* ZSTD_selectAddr: ++ * @return index >= lowLimit ? candidate : backup, ++ * tries to force branchless codegen. */ ++MEM_STATIC const BYTE* ++ZSTD_selectAddr(U32 index, U32 lowLimit, const BYTE* candidate, const BYTE* backup) ++{ ++#if defined(__x86_64__) ++ __asm__ ( ++ "cmp %1, %2\n" ++ "cmova %3, %0\n" ++ : "+r"(candidate) ++ : "r"(index), "r"(lowLimit), "r"(backup) ++ ); ++ return candidate; ++#else ++ return index >= lowLimit ? candidate : backup; ++#endif ++} ++ /* ZSTD_noCompressBlock() : * Writes uncompressed block to dst buffer from given src. * Returns the size of the block */ @@ -39121,7 +43345,7 @@ index 71697a11ae30..53cb582a8d2b 100644 RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity, dstSize_tooSmall, "dst buf too small for uncompressed block"); MEM_writeLE24(dst, cBlockHeader24); -@@ -510,7 +561,8 @@ MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const voi +@@ -510,7 +650,8 @@ MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const voi return ZSTD_blockHeaderSize + srcSize; } @@ -39131,7 +43355,7 @@ index 71697a11ae30..53cb582a8d2b 100644 { BYTE* const op = (BYTE*)dst; U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3); -@@ -529,7 +581,7 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) +@@ -529,7 +670,7 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) { U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6; ZSTD_STATIC_ASSERT(ZSTD_btultra == 8); @@ -39140,7 +43364,7 @@ index 71697a11ae30..53cb582a8d2b 100644 return (srcSize >> minlog) + 2; } -@@ -565,29 +617,27 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con +@@ -565,29 +706,68 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con while (ip < iend) *op++ = *ip++; } @@ -39166,25 +43390,67 @@ index 71697a11ae30..53cb582a8d2b 100644 +#define OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM) +#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM) +#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */ ++ ++/*! ZSTD_storeSeqOnly() : ++ * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. ++ * Literals themselves are not copied, but @litPtr is updated. ++ * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). ++ * @matchLength : must be >= MINMATCH ++*/ ++HINT_INLINE UNUSED_ATTR void ++ZSTD_storeSeqOnly(SeqStore_t* seqStorePtr, ++ size_t litLength, ++ U32 offBase, ++ size_t matchLength) ++{ ++ assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); ++ ++ /* literal Length */ ++ assert(litLength <= ZSTD_BLOCKSIZE_MAX); ++ if (UNLIKELY(litLength>0xFFFF)) { ++ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ ++ seqStorePtr->longLengthType = ZSTD_llt_literalLength; ++ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); ++ } ++ seqStorePtr->sequences[0].litLength = (U16)litLength; ++ ++ /* match offset */ ++ seqStorePtr->sequences[0].offBase = offBase; ++ ++ /* match Length */ ++ assert(matchLength <= ZSTD_BLOCKSIZE_MAX); ++ assert(matchLength >= MINMATCH); ++ { size_t const mlBase = matchLength - MINMATCH; ++ if (UNLIKELY(mlBase>0xFFFF)) { ++ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ ++ seqStorePtr->longLengthType = ZSTD_llt_matchLength; ++ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); ++ } ++ seqStorePtr->sequences[0].mlBase = (U16)mlBase; ++ } ++ ++ seqStorePtr->sequences++; ++} /*! ZSTD_storeSeq() : - * Store a sequence (litlen, litPtr, offCode and matchLength) into seqStore_t. - * @offBase_minus1 : Users should use employ macros STORE_REPCODE_X and STORE_OFFSET(). -+ * Store a sequence (litlen, litPtr, offBase and matchLength) into seqStore_t. ++ * Store a sequence (litlen, litPtr, offBase and matchLength) into SeqStore_t. + * @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE(). * @matchLength : must be >= MINMATCH - * Allowed to overread literals up to litLimit. + * Allowed to over-read literals up to litLimit. */ HINT_INLINE UNUSED_ATTR void - ZSTD_storeSeq(seqStore_t* seqStorePtr, +-ZSTD_storeSeq(seqStore_t* seqStorePtr, ++ZSTD_storeSeq(SeqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, - U32 offBase_minus1, + U32 offBase, size_t matchLength) { BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH; -@@ -596,8 +646,8 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, +@@ -596,8 +776,8 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, static const BYTE* g_start = NULL; if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */ { U32 const pos = (U32)((const BYTE*)literals - g_start); @@ -39195,7 +43461,7 @@ index 71697a11ae30..53cb582a8d2b 100644 } #endif assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); -@@ -607,9 +657,9 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, +@@ -607,9 +787,9 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, assert(literals + litLength <= litLimit); if (litEnd <= litLimit_w) { /* Common case we can use wildcopy. @@ -39208,16 +43474,35 @@ index 71697a11ae30..53cb582a8d2b 100644 ZSTD_copy16(seqStorePtr->lit, literals); if (litLength > 16) { ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap); -@@ -628,7 +678,7 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, - seqStorePtr->sequences[0].litLength = (U16)litLength; +@@ -619,44 +799,22 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, + } + seqStorePtr->lit += litLength; - /* match offset */ +- /* literal Length */ +- if (litLength>0xFFFF) { +- assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ +- seqStorePtr->longLengthType = ZSTD_llt_literalLength; +- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); +- } +- seqStorePtr->sequences[0].litLength = (U16)litLength; +- +- /* match offset */ - seqStorePtr->sequences[0].offBase = STORED_TO_OFFBASE(offBase_minus1); -+ seqStorePtr->sequences[0].offBase = offBase; - - /* match Length */ - assert(matchLength >= MINMATCH); -@@ -646,17 +696,17 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr, +- +- /* match Length */ +- assert(matchLength >= MINMATCH); +- { size_t const mlBase = matchLength - MINMATCH; +- if (mlBase>0xFFFF) { +- assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ +- seqStorePtr->longLengthType = ZSTD_llt_matchLength; +- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); +- } +- seqStorePtr->sequences[0].mlBase = (U16)mlBase; +- } +- +- seqStorePtr->sequences++; ++ ZSTD_storeSeqOnly(seqStorePtr, litLength, offBase, matchLength); + } /* ZSTD_updateRep() : * updates in-place @rep (array of repeat offsets) @@ -39240,21 +43525,27 @@ index 71697a11ae30..53cb582a8d2b 100644 if (repCode > 0) { /* note : if repCode==0, no change */ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; rep[2] = (repCode >= 2) ? rep[1] : rep[2]; -@@ -673,11 +723,11 @@ typedef struct repcodes_s { - } repcodes_t; +@@ -670,14 +828,14 @@ ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0) - MEM_STATIC repcodes_t + typedef struct repcodes_s { + U32 rep[3]; +-} repcodes_t; ++} Repcodes_t; + +-MEM_STATIC repcodes_t -ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0) ++MEM_STATIC Repcodes_t +ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0) { - repcodes_t newReps; +- repcodes_t newReps; ++ Repcodes_t newReps; ZSTD_memcpy(&newReps, rep, sizeof(newReps)); - ZSTD_updateRep(newReps.rep, offBase_minus1, ll0); + ZSTD_updateRep(newReps.rep, offBase, ll0); return newReps; } -@@ -685,59 +735,6 @@ ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0 +@@ -685,59 +843,6 @@ ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0 /*-************************************* * Match length counter ***************************************/ @@ -39314,7 +43605,18 @@ index 71697a11ae30..53cb582a8d2b 100644 MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) { const BYTE* const pStart = pIn; -@@ -783,32 +780,43 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match, +@@ -771,8 +876,8 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match, + size_t const matchLength = ZSTD_count(ip, match, vEnd); + if (match + matchLength != mEnd) return matchLength; + DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength); +- DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match); +- DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip); ++ DEBUGLOG(7, "distance from match beginning to end dictionary = %i", (int)(mEnd - match)); ++ DEBUGLOG(7, "distance from current pos to end buffer = %i", (int)(iEnd - ip)); + DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart); + DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd)); + return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd); +@@ -783,32 +888,43 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match, * Hashes ***************************************/ static const U32 prime3bytes = 506832829U; @@ -39370,7 +43672,7 @@ index 71697a11ae30..53cb582a8d2b 100644 switch(mls) { default: -@@ -820,6 +828,24 @@ size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) +@@ -820,6 +936,24 @@ size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) } } @@ -39395,7 +43697,34 @@ index 71697a11ae30..53cb582a8d2b 100644 /* ZSTD_ipow() : * Return base^exponent. */ -@@ -1011,7 +1037,9 @@ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window, +@@ -881,11 +1015,12 @@ MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 + /*-************************************* + * Round buffer management + ***************************************/ +-#if (ZSTD_WINDOWLOG_MAX_64 > 31) +-# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX" +-#endif +-/* Max current allowed */ +-#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX)) ++/* Max @current value allowed: ++ * In 32-bit mode: we want to avoid crossing the 2 GB limit, ++ * reducing risks of side effects in case of signed operations on indexes. ++ * In 64-bit mode: we want to ensure that adding the maximum job size (512 MB) ++ * doesn't overflow U32 index capacity (4 GB) */ ++#define ZSTD_CURRENT_MAX (MEM_64bits() ? 3500U MB : 2000U MB) + /* Maximum chunk size before overflow correction needs to be called again */ + #define ZSTD_CHUNKSIZE_MAX \ + ( ((U32)-1) /* Maximum ending current index */ \ +@@ -925,7 +1060,7 @@ MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window) + * Inspects the provided matchState and figures out what dictMode should be + * passed to the compressor. + */ +-MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms) ++MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_MatchState_t *ms) + { + return ZSTD_window_hasExtDict(ms->window) ? + ZSTD_extDict : +@@ -1011,7 +1146,9 @@ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window, * The least significant cycleLog bits of the indices must remain the same, * which may be 0. Every index up to maxDist in the past must be valid. */ @@ -39406,7 +43735,25 @@ index 71697a11ae30..53cb582a8d2b 100644 U32 maxDist, void const* src) { /* preemptive overflow correction: -@@ -1167,10 +1195,15 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window, +@@ -1112,7 +1249,7 @@ ZSTD_window_enforceMaxDist(ZSTD_window_t* window, + const void* blockEnd, + U32 maxDist, + U32* loadedDictEndPtr, +- const ZSTD_matchState_t** dictMatchStatePtr) ++ const ZSTD_MatchState_t** dictMatchStatePtr) + { + U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); + U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0; +@@ -1157,7 +1294,7 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window, + const void* blockEnd, + U32 maxDist, + U32* loadedDictEndPtr, +- const ZSTD_matchState_t** dictMatchStatePtr) ++ const ZSTD_MatchState_t** dictMatchStatePtr) + { + assert(loadedDictEndPtr != NULL); + assert(dictMatchStatePtr != NULL); +@@ -1167,10 +1304,15 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window, (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); assert(blockEndIdx >= loadedDictEnd); @@ -39423,18 +43770,66 @@ index 71697a11ae30..53cb582a8d2b 100644 */ DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)"); *loadedDictEndPtr = 0; -@@ -1199,7 +1232,9 @@ MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) { +@@ -1199,9 +1341,11 @@ MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) { * forget about the extDict. Handles overlap of the prefix and extDict. * Returns non-zero if the segment is contiguous. */ -MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, +- void const* src, size_t srcSize, +- int forceNonContiguous) +MEM_STATIC +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_window_update(ZSTD_window_t* window, - void const* src, size_t srcSize, - int forceNonContiguous) ++ const void* src, size_t srcSize, ++ int forceNonContiguous) { -@@ -1302,6 +1337,42 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max) + BYTE const* const ip = (BYTE const*)src; + U32 contiguous = 1; +@@ -1228,8 +1372,9 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, + /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ + if ( (ip+srcSize > window->dictBase + window->lowLimit) + & (ip < window->dictBase + window->dictLimit)) { +- ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase; +- U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx; ++ size_t const highInputIdx = (size_t)((ip + srcSize) - window->dictBase); ++ U32 const lowLimitMax = (highInputIdx > (size_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx; ++ assert(highInputIdx < UINT_MAX); + window->lowLimit = lowLimitMax; + DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit); + } +@@ -1239,7 +1384,7 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, + /* + * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. + */ +-MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog) ++MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog) + { + U32 const maxDistance = 1U << windowLog; + U32 const lowestValid = ms->window.lowLimit; +@@ -1256,7 +1401,7 @@ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, u + /* + * Returns the lowest allowed match index in the prefix. + */ +-MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog) ++MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_MatchState_t* ms, U32 curr, unsigned windowLog) + { + U32 const maxDistance = 1U << windowLog; + U32 const lowestValid = ms->window.dictLimit; +@@ -1269,6 +1414,13 @@ MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, + return matchLowest; + } + ++/* index_safety_check: ++ * intentional underflow : ensure repIndex isn't overlapping dict + prefix ++ * @return 1 if values are not overlapping, ++ * 0 otherwise */ ++MEM_STATIC int ZSTD_index_overlap_check(const U32 prefixLowestIndex, const U32 repIndex) { ++ return ((U32)((prefixLowestIndex-1) - repIndex) >= 3); ++} + + + /* debug functions */ +@@ -1302,7 +1454,42 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max) #endif @@ -39466,7 +43861,7 @@ index 71697a11ae30..53cb582a8d2b 100644 + assert(index >> (32 - ZSTD_SHORT_CACHE_TAG_BITS) == 0); + hashTable[hash] = (index << ZSTD_SHORT_CACHE_TAG_BITS) | tag; +} -+ + +/* Helper function for short cache matchfinders. + * Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */ +MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) { @@ -39475,9 +43870,53 @@ index 71697a11ae30..53cb582a8d2b 100644 + return tag1 == tag2; +} - /* =============================================================== -@@ -1381,11 +1452,10 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity); + * Shared internal declarations +@@ -1319,6 +1506,25 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, + + void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs); + ++typedef struct { ++ U32 idx; /* Index in array of ZSTD_Sequence */ ++ U32 posInSequence; /* Position within sequence at idx */ ++ size_t posInSrc; /* Number of bytes given by sequences provided so far */ ++} ZSTD_SequencePosition; ++ ++/* for benchmark */ ++size_t ZSTD_convertBlockSequences(ZSTD_CCtx* cctx, ++ const ZSTD_Sequence* const inSeqs, size_t nbSequences, ++ int const repcodeResolution); ++ ++typedef struct { ++ size_t nbSequences; ++ size_t blockSize; ++ size_t litSize; ++} BlockSummary; ++ ++BlockSummary ZSTD_get1BlockSummary(const ZSTD_Sequence* seqs, size_t nbSeqs); ++ + /* ============================================================== + * Private declarations + * These prototypes shall only be called from within lib/compress +@@ -1330,7 +1536,7 @@ void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs); + * Note: srcSizeHint == 0 means 0! + */ + ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( +- const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode); ++ const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_CParamMode_e mode); + + /*! ZSTD_initCStream_internal() : + * Private use only. Init streaming operation. +@@ -1342,7 +1548,7 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, + const ZSTD_CDict* cdict, + const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize); + +-void ZSTD_resetSeqStore(seqStore_t* ssPtr); ++void ZSTD_resetSeqStore(SeqStore_t* ssPtr); + + /*! ZSTD_getCParamsFromCDict() : + * as the name implies */ +@@ -1381,11 +1587,10 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity); * This cannot be used when long range matching is enabled. * Zstd will use these sequences, and pass the literals to a secondary block * compressor. @@ -39490,37 +43929,10 @@ index 71697a11ae30..53cb582a8d2b 100644 /* ZSTD_cycleLog() : * condition for correct operation : hashLog > 1 */ -@@ -1396,4 +1466,55 @@ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat); +@@ -1396,4 +1601,28 @@ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat); */ void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize); -+/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of -+ * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. -+ * Note that the block delimiter must include the last literals of the block. -+ */ -+size_t -+ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, -+ ZSTD_sequencePosition* seqPos, -+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, -+ const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); -+ -+/* Returns the number of bytes to move the current read position back by. -+ * Only non-zero if we ended up splitting a sequence. -+ * Otherwise, it may return a ZSTD error if something went wrong. -+ * -+ * This function will attempt to scan through blockSize bytes -+ * represented by the sequences in @inSeqs, -+ * storing any (partial) sequences. -+ * -+ * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to -+ * avoid splitting a match, or to avoid splitting a match such that it would produce a match -+ * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. -+ */ -+size_t -+ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, -+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, -+ const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch); -+ +/* Returns 1 if an external sequence producer is registered, otherwise returns 0. */ +MEM_STATIC int ZSTD_hasExtSeqProd(const ZSTD_CCtx_params* params) { + return params->extSeqProdFunc != NULL; @@ -39547,7 +43959,7 @@ index 71697a11ae30..53cb582a8d2b 100644 + #endif /* ZSTD_COMPRESS_H */ diff --git a/lib/zstd/compress/zstd_compress_literals.c b/lib/zstd/compress/zstd_compress_literals.c -index 52b0a8059aba..3e9ea46a670a 100644 +index 52b0a8059aba..ec39b4299b6f 100644 --- a/lib/zstd/compress/zstd_compress_literals.c +++ b/lib/zstd/compress/zstd_compress_literals.c @@ -1,5 +1,6 @@ @@ -39680,7 +44092,8 @@ index 52b0a8059aba..3e9ea46a670a 100644 size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); BYTE* const ostart = (BYTE*)dst; U32 singleStream = srcSize < 256; - symbolEncodingType_e hType = set_compressed; +- symbolEncodingType_e hType = set_compressed; ++ SymbolEncodingType_e hType = set_compressed; size_t cLitSize; - DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)", @@ -39833,7 +44246,7 @@ index 9775fb97cb70..a2a85d6b69e5 100644 #endif /* ZSTD_COMPRESS_LITERALS_H */ diff --git a/lib/zstd/compress/zstd_compress_sequences.c b/lib/zstd/compress/zstd_compress_sequences.c -index 21ddc1b37acf..5c028c78d889 100644 +index 21ddc1b37acf..256980c9d85a 100644 --- a/lib/zstd/compress/zstd_compress_sequences.c +++ b/lib/zstd/compress/zstd_compress_sequences.c @@ -1,5 +1,6 @@ @@ -39853,7 +44266,22 @@ index 21ddc1b37acf..5c028c78d889 100644 */ return nbSeq >= 2048; } -@@ -166,7 +167,7 @@ ZSTD_selectEncodingType( +@@ -153,20 +154,20 @@ size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, + return cost >> 8; + } + +-symbolEncodingType_e ++SymbolEncodingType_e + ZSTD_selectEncodingType( + FSE_repeat* repeatMode, unsigned const* count, unsigned const max, + size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, + FSE_CTable const* prevCTable, + short const* defaultNorm, U32 defaultNormLog, +- ZSTD_defaultPolicy_e const isDefaultAllowed, ++ ZSTD_DefaultPolicy_e const isDefaultAllowed, + ZSTD_strategy const strategy) + { + ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0); if (mostFrequent == nbSeq) { *repeatMode = FSE_repeat_none; if (isDefaultAllowed && nbSeq <= 2) { @@ -39862,8 +44290,53 @@ index 21ddc1b37acf..5c028c78d889 100644 * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. * If basic encoding isn't possible, always choose RLE. */ +@@ -241,7 +242,7 @@ typedef struct { + + size_t + ZSTD_buildCTable(void* dst, size_t dstCapacity, +- FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, ++ FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type, + unsigned* count, U32 max, + const BYTE* codeTable, size_t nbSeq, + const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, +@@ -293,7 +294,7 @@ ZSTD_encodeSequences_body( + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, +- seqDef const* sequences, size_t nbSeq, int longOffsets) ++ SeqDef const* sequences, size_t nbSeq, int longOffsets) + { + BIT_CStream_t blockStream; + FSE_CState_t stateMatchLength; +@@ -387,7 +388,7 @@ ZSTD_encodeSequences_default( + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, +- seqDef const* sequences, size_t nbSeq, int longOffsets) ++ SeqDef const* sequences, size_t nbSeq, int longOffsets) + { + return ZSTD_encodeSequences_body(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, +@@ -405,7 +406,7 @@ ZSTD_encodeSequences_bmi2( + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, +- seqDef const* sequences, size_t nbSeq, int longOffsets) ++ SeqDef const* sequences, size_t nbSeq, int longOffsets) + { + return ZSTD_encodeSequences_body(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, +@@ -421,7 +422,7 @@ size_t ZSTD_encodeSequences( + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, +- seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) ++ SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) + { + DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity); + #if DYNAMIC_BMI2 diff --git a/lib/zstd/compress/zstd_compress_sequences.h b/lib/zstd/compress/zstd_compress_sequences.h -index 7991364c2f71..7fe6f4ff5cf2 100644 +index 7991364c2f71..14fdccb6547f 100644 --- a/lib/zstd/compress/zstd_compress_sequences.h +++ b/lib/zstd/compress/zstd_compress_sequences.h @@ -1,5 +1,6 @@ @@ -39874,8 +44347,50 @@ index 7991364c2f71..7fe6f4ff5cf2 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the +@@ -11,26 +12,27 @@ + #ifndef ZSTD_COMPRESS_SEQUENCES_H + #define ZSTD_COMPRESS_SEQUENCES_H + ++#include "zstd_compress_internal.h" /* SeqDef */ + #include "../common/fse.h" /* FSE_repeat, FSE_CTable */ +-#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */ ++#include "../common/zstd_internal.h" /* SymbolEncodingType_e, ZSTD_strategy */ + + typedef enum { + ZSTD_defaultDisallowed = 0, + ZSTD_defaultAllowed = 1 +-} ZSTD_defaultPolicy_e; ++} ZSTD_DefaultPolicy_e; + +-symbolEncodingType_e ++SymbolEncodingType_e + ZSTD_selectEncodingType( + FSE_repeat* repeatMode, unsigned const* count, unsigned const max, + size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, + FSE_CTable const* prevCTable, + short const* defaultNorm, U32 defaultNormLog, +- ZSTD_defaultPolicy_e const isDefaultAllowed, ++ ZSTD_DefaultPolicy_e const isDefaultAllowed, + ZSTD_strategy const strategy); + + size_t + ZSTD_buildCTable(void* dst, size_t dstCapacity, +- FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, ++ FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type, + unsigned* count, U32 max, + const BYTE* codeTable, size_t nbSeq, + const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, +@@ -42,7 +44,7 @@ size_t ZSTD_encodeSequences( + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, +- seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2); ++ SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2); + + size_t ZSTD_fseBitCost( + FSE_CTable const* ctable, diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c -index 17d836cc84e8..41f6521b27cd 100644 +index 17d836cc84e8..dc12d64e935c 100644 --- a/lib/zstd/compress/zstd_compress_superblock.c +++ b/lib/zstd/compress/zstd_compress_superblock.c @@ -1,5 +1,6 @@ @@ -39907,8 +44422,12 @@ index 17d836cc84e8..41f6521b27cd 100644 { size_t const header = writeEntropy ? 200 : 0; size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); -@@ -53,8 +55,6 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, - symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; +@@ -50,11 +52,9 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart + lhSize; + U32 const singleStream = lhSize == 3; +- symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; ++ SymbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; size_t cLitSize = 0; - (void)bmi2; /* TODO bmi2... */ @@ -39951,8 +44470,8 @@ index 17d836cc84e8..41f6521b27cd 100644 - const seqDef* const send = sequences + nbSeq; - const seqDef* sp = sstart; +static size_t -+ZSTD_seqDecompressedSize(seqStore_t const* seqStore, -+ const seqDef* sequences, size_t nbSeqs, ++ZSTD_seqDecompressedSize(SeqStore_t const* seqStore, ++ const SeqDef* sequences, size_t nbSeqs, + size_t litSize, int lastSubBlock) +{ size_t matchLengthSum = 0; @@ -39962,7 +44481,7 @@ index 17d836cc84e8..41f6521b27cd 100644 - ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp); + size_t n; + for (n=0; nhuf.CTable, @@ -40070,6 +44598,15 @@ index 17d836cc84e8..41f6521b27cd 100644 } static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize, +@@ -322,7 +328,7 @@ static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t lit + return 0; + } + +-static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type, ++static size_t ZSTD_estimateSubBlockSize_symbolType(SymbolEncodingType_e type, + const BYTE* codeTable, unsigned maxCode, + size_t nbSeq, const FSE_CTable* fseCTable, + const U8* additionalBits, @@ -385,7 +391,11 @@ static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable, return cSeqSizeEstimate + sequencesSectionHeaderSize; } @@ -40108,11 +44645,11 @@ index 17d836cc84e8..41f6521b27cd 100644 } static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata) -@@ -415,13 +427,56 @@ static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMe +@@ -415,14 +427,57 @@ static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMe return 0; } -+static size_t countLiterals(seqStore_t const* seqStore, const seqDef* sp, size_t seqCount) ++static size_t countLiterals(SeqStore_t const* seqStore, const SeqDef* sp, size_t seqCount) +{ + size_t n, total = 0; + assert(sp != NULL); @@ -40125,7 +44662,7 @@ index 17d836cc84e8..41f6521b27cd 100644 + +#define BYTESCALE 256 + -+static size_t sizeBlockSequences(const seqDef* sp, size_t nbSeqs, ++static size_t sizeBlockSequences(const SeqDef* sp, size_t nbSeqs, + size_t targetBudget, size_t avgLitCost, size_t avgSeqCost, + int firstSubBlock) +{ @@ -40162,20 +44699,26 @@ index 17d836cc84e8..41f6521b27cd 100644 - * All sub-blocks are compressed blocks (no raw or rle blocks). - * @return : compressed size of the super block (which is multiple ZSTD blocks) - * Or 0 if it failed to compress. */ +-static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, + * Entropy will be written into the first block. + * The following blocks use repeat_mode to compress. + * Sub-blocks are all compressed, except the last one when beneficial. + * @return : compressed size of the super block (which features multiple ZSTD blocks) + * or 0 if it failed to compress. */ - static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, ++static size_t ZSTD_compressSubBlock_multi(const SeqStore_t* seqStorePtr, const ZSTD_compressedBlockState_t* prevCBlock, ZSTD_compressedBlockState_t* nextCBlock, -@@ -434,10 +489,12 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, + const ZSTD_entropyCTablesMetadata_t* entropyMetadata, +@@ -432,12 +487,14 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, + const int bmi2, U32 lastBlock, + void* workspace, size_t wkspSize) { - const seqDef* const sstart = seqStorePtr->sequencesStart; - const seqDef* const send = seqStorePtr->sequences; +- const seqDef* const sstart = seqStorePtr->sequencesStart; +- const seqDef* const send = seqStorePtr->sequences; - const seqDef* sp = sstart; -+ const seqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */ ++ const SeqDef* const sstart = seqStorePtr->sequencesStart; ++ const SeqDef* const send = seqStorePtr->sequences; ++ const SeqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */ + size_t const nbSeqs = (size_t)(send - sstart); const BYTE* const lstart = seqStorePtr->litStart; const BYTE* const lend = seqStorePtr->lit; @@ -40404,8 +44947,9 @@ index 17d836cc84e8..41f6521b27cd 100644 /* We have to regenerate the repcodes because we've skipped some sequences */ if (sp < send) { - seqDef const* seq; -+ const seqDef* seq; - repcodes_t rep; +- repcodes_t rep; ++ const SeqDef* seq; ++ Repcodes_t rep; ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); for (seq = sstart; seq < sp; ++seq) { - ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); @@ -40432,6 +44976,22 @@ index 17d836cc84e8..41f6521b27cd 100644 ZSTD_entropyCTablesMetadata_t entropyMetadata; FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore, +@@ -559,7 +675,7 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, + &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + &entropyMetadata, +- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); ++ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */), ""); + + return ZSTD_compressSubBlock_multi(&zc->seqStore, + zc->blockState.prevCBlock, +@@ -569,5 +685,5 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, + dst, dstCapacity, + src, srcSize, + zc->bmi2, lastBlock, +- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */); ++ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */); + } diff --git a/lib/zstd/compress/zstd_compress_superblock.h b/lib/zstd/compress/zstd_compress_superblock.h index 224ece79546e..826bbc9e029b 100644 --- a/lib/zstd/compress/zstd_compress_superblock.h @@ -40445,7 +45005,7 @@ index 224ece79546e..826bbc9e029b 100644 * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/zstd/compress/zstd_cwksp.h b/lib/zstd/compress/zstd_cwksp.h -index 349fc923c355..86bc3c2c23c7 100644 +index 349fc923c355..dce42f653bae 100644 --- a/lib/zstd/compress/zstd_cwksp.h +++ b/lib/zstd/compress/zstd_cwksp.h @@ -1,5 +1,6 @@ @@ -40456,16 +45016,18 @@ index 349fc923c355..86bc3c2c23c7 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the -@@ -14,7 +15,9 @@ +@@ -14,8 +15,10 @@ /*-************************************* * Dependencies ***************************************/ +#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */ #include "../common/zstd_internal.h" +- +#include "../common/portability_macros.h" - ++#include "../common/compiler.h" /* ZS2_isPower2 */ /*-************************************* + * Constants @@ -41,8 +44,9 @@ ***************************************/ typedef enum { @@ -40539,7 +45101,7 @@ index 349fc923c355..86bc3c2c23c7 100644 MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { (void)ws; -@@ -168,6 +184,8 @@ MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { +@@ -168,14 +184,16 @@ MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { assert(ws->tableEnd <= ws->allocStart); assert(ws->tableValidEnd <= ws->allocStart); assert(ws->allocStart <= ws->workspaceEnd); @@ -40548,7 +45110,45 @@ index 349fc923c355..86bc3c2c23c7 100644 } /* -@@ -210,14 +228,10 @@ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) { + * Align must be a power of 2. + */ +-MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) { ++MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t align) { + size_t const mask = align - 1; +- assert((align & mask) == 0); ++ assert(ZSTD_isPower2(align)); + return (size + mask) & ~mask; + } + +@@ -189,7 +207,7 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) { + * to figure out how much space you need for the matchState tables. Everything + * else is though. + * +- * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size(). ++ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size(). + */ + MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { + if (size == 0) +@@ -197,12 +215,16 @@ MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { + return size; + } + ++MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size, size_t alignment) { ++ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment)); ++} ++ + /* + * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes. + * Used to determine the number of bytes required for a given "aligned". + */ +-MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) { +- return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES)); ++MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size(size_t size) { ++ return ZSTD_cwksp_aligned_alloc_size(size, ZSTD_CWKSP_ALIGNMENT_BYTES); + } + + /* +@@ -210,14 +232,10 @@ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) { * for internal purposes (currently only alignment). */ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { @@ -40566,11 +45166,13 @@ index 349fc923c355..86bc3c2c23c7 100644 return slackSpace; } -@@ -230,10 +244,18 @@ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignByt +@@ -229,11 +247,23 @@ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { + MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) { size_t const alignBytesMask = alignBytes - 1; size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask; - assert((alignBytes & alignBytesMask) == 0); +- assert((alignBytes & alignBytesMask) == 0); - assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES); ++ assert(ZSTD_isPower2(alignBytes)); + assert(bytes < alignBytes); return bytes; } @@ -40579,14 +45181,27 @@ index 349fc923c355..86bc3c2c23c7 100644 + * Returns the initial value for allocStart which is used to determine the position from + * which we can allocate from the end of the workspace. + */ -+MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) { -+ return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1)); ++MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) ++{ ++ char* endPtr = (char*)ws->workspaceEnd; ++ assert(ZSTD_isPower2(ZSTD_CWKSP_ALIGNMENT_BYTES)); ++ endPtr = endPtr - ((size_t)endPtr % ZSTD_CWKSP_ALIGNMENT_BYTES); ++ return (void*)endPtr; +} + /* * Internal function. Do not use directly. * Reserves the given number of bytes within the aligned/buffer segment of the wksp, -@@ -274,27 +296,16 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase +@@ -246,7 +276,7 @@ ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) + { + void* const alloc = (BYTE*)ws->allocStart - bytes; + void* const bottom = ws->tableEnd; +- DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining", ++ DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining", + alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); + ZSTD_cwksp_assert_internal_consistency(ws); + assert(alloc >= bottom); +@@ -274,27 +304,16 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase { assert(phase >= ws->phase); if (phase > ws->phase) { @@ -40620,7 +45235,7 @@ index 349fc923c355..86bc3c2c23c7 100644 DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign); RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation, "table phase - alignment initial allocation failed!"); -@@ -302,7 +313,9 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase +@@ -302,7 +321,9 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase ws->tableEnd = objectEnd; /* table area starts being empty */ if (ws->tableValidEnd < ws->tableEnd) { ws->tableValidEnd = ws->tableEnd; @@ -40631,7 +45246,7 @@ index 349fc923c355..86bc3c2c23c7 100644 ws->phase = phase; ZSTD_cwksp_assert_internal_consistency(ws); } -@@ -314,7 +327,7 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase +@@ -314,7 +335,7 @@ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase */ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) { @@ -40640,24 +45255,26 @@ index 349fc923c355..86bc3c2c23c7 100644 } /* -@@ -343,6 +356,33 @@ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) - return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); - } +@@ -345,29 +366,61 @@ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) -+/* -+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). + /* + * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). + * This memory has been initialized at least once in the past. + * This doesn't mean it has been initialized this time, and it might contain data from previous + * operations. + * The main usage is for algorithms that might need read access into uninitialized memory. + * The algorithm must maintain safety under these conditions and must make sure it doesn't + * leak any of the past data (directly or in side channels). -+ */ + */ +-MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) +MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes) -+{ + { +- void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES), +- ZSTD_cwksp_alloc_aligned); +- assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); + size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES); + void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once); -+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); ++ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); + if(ptr && ptr < ws->initOnceStart) { + /* We assume the memory following the current allocation is either: + * 1. Not usable as initOnce memory (end of workspace) @@ -40671,10 +45288,17 @@ index 349fc923c355..86bc3c2c23c7 100644 + return ptr; +} + - /* - * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). - */ -@@ -356,18 +396,22 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) ++/* ++ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). ++ */ ++MEM_STATIC void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, size_t bytes) ++{ ++ void* const ptr = ZSTD_cwksp_reserve_internal(ws, ++ ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES), ++ ZSTD_cwksp_alloc_aligned); ++ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); + return ptr; + } /* * Aligned on 64 bytes. These buffers have the special property that @@ -40701,7 +45325,37 @@ index 349fc923c355..86bc3c2c23c7 100644 } alloc = ws->tableEnd; end = (BYTE *)alloc + bytes; -@@ -451,7 +495,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { +@@ -387,7 +440,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) + + + assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); +- assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); ++ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); + return alloc; + } + +@@ -421,6 +474,20 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) + + return alloc; + } ++/* ++ * with alignment control ++ * Note : should happen only once, at workspace first initialization ++ */ ++MEM_STATIC void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, size_t byteSize, size_t alignment) ++{ ++ size_t const mask = alignment - 1; ++ size_t const surplus = (alignment > sizeof(void*)) ? alignment - sizeof(void*) : 0; ++ void* const start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus); ++ if (start == NULL) return NULL; ++ if (surplus == 0) return start; ++ assert(ZSTD_isPower2(alignment)); ++ return (void*)(((size_t)start + surplus) & ~mask); ++} + + MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) + { +@@ -451,7 +518,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { assert(ws->tableValidEnd >= ws->objectEnd); assert(ws->tableValidEnd <= ws->allocStart); if (ws->tableValidEnd < ws->tableEnd) { @@ -40710,7 +45364,17 @@ index 349fc923c355..86bc3c2c23c7 100644 } ZSTD_cwksp_mark_tables_clean(ws); } -@@ -478,14 +522,23 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { +@@ -460,7 +527,8 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { + * Invalidates table allocations. + * All other allocations remain valid. + */ +-MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) { ++MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) ++{ + DEBUGLOG(4, "cwksp: clearing tables!"); + + +@@ -478,14 +546,23 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { ws->tableEnd = ws->objectEnd; @@ -40737,7 +45401,7 @@ index 349fc923c355..86bc3c2c23c7 100644 /* * The provided workspace takes ownership of the buffer [start, start+size). * Any existing values in the workspace are ignored (the previously managed -@@ -498,6 +551,7 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_c +@@ -498,6 +575,7 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_c ws->workspaceEnd = (BYTE*)start + size; ws->objectEnd = ws->workspace; ws->tableValidEnd = ws->objectEnd; @@ -40745,7 +45409,7 @@ index 349fc923c355..86bc3c2c23c7 100644 ws->phase = ZSTD_cwksp_alloc_objects; ws->isStatic = isStatic; ZSTD_cwksp_clear(ws); -@@ -529,15 +583,6 @@ MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { +@@ -529,15 +607,6 @@ MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { ZSTD_memset(src, 0, sizeof(ZSTD_cwksp)); } @@ -40761,7 +45425,7 @@ index 349fc923c355..86bc3c2c23c7 100644 MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { return ws->allocFailed; } -@@ -550,17 +595,11 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { +@@ -550,17 +619,11 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { * Returns if the estimated space needed for a wksp is within an acceptable limit of the * actual amount of space used. */ @@ -40784,8 +45448,14 @@ index 349fc923c355..86bc3c2c23c7 100644 } +@@ -591,5 +654,4 @@ MEM_STATIC void ZSTD_cwksp_bump_oversized_duration( + } + } + +- + #endif /* ZSTD_CWKSP_H */ diff --git a/lib/zstd/compress/zstd_double_fast.c b/lib/zstd/compress/zstd_double_fast.c -index 76933dea2624..5ff54f17d92f 100644 +index 76933dea2624..995e83f3a183 100644 --- a/lib/zstd/compress/zstd_double_fast.c +++ b/lib/zstd/compress/zstd_double_fast.c @@ -1,5 +1,6 @@ @@ -40805,7 +45475,7 @@ index 76933dea2624..5ff54f17d92f 100644 -void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms, ++void ZSTD_fillDoubleHashTableForCDict(ZSTD_MatchState_t* ms, + void const* end, ZSTD_dictTableLoadMethod_e dtlm) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -40843,11 +45513,11 @@ index 76933dea2624..5ff54f17d92f 100644 + +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms, ++void ZSTD_fillDoubleHashTableForCCtx(ZSTD_MatchState_t* ms, void const* end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; -@@ -43,11 +85,24 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, +@@ -43,13 +85,26 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, /* Only load extra positions for ZSTD_dtlm_full */ if (dtlm == ZSTD_dtlm_fast) break; @@ -40855,7 +45525,7 @@ index 76933dea2624..5ff54f17d92f 100644 + } } +} + -+void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, ++void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) @@ -40871,8 +45541,11 @@ index 76933dea2624..5ff54f17d92f 100644 FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_doubleFast_noDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls /* template */) + { + ZSTD_compressionParameters const* cParams = &ms->cParams; @@ -67,7 +122,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; @@ -40882,7 +45555,22 @@ index 76933dea2624..5ff54f17d92f 100644 size_t mLength; U32 offset; -@@ -100,8 +155,8 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( +@@ -88,9 +143,14 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( + const BYTE* matchl0; /* the long match for ip */ + const BYTE* matchs0; /* the short match for ip */ + const BYTE* matchl1; /* the long match for ip1 */ ++ const BYTE* matchs0_safe; /* matchs0 or safe address */ + + const BYTE* ip = istart; /* the current position */ + const BYTE* ip1; /* the next position */ ++ /* Array of ~random data, should have low probability of matching data ++ * we load from here instead of from tables, if matchl0/matchl1 are ++ * invalid indices. Used to avoid unpredictable branches. */ ++ const BYTE dummy[] = {0x12,0x34,0x56,0x78,0x9a,0xbc,0xde,0xf0,0xe2,0xb4}; + + DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic"); + +@@ -100,8 +160,8 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( U32 const current = (U32)(ip - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); U32 const maxRep = current - windowLow; @@ -40893,7 +45581,7 @@ index 76933dea2624..5ff54f17d92f 100644 } /* Outer Loop: one iteration per match found and stored */ -@@ -131,7 +186,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( +@@ -131,30 +191,35 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; ip++; @@ -40902,7 +45590,44 @@ index 76933dea2624..5ff54f17d92f 100644 goto _match_stored; } -@@ -175,9 +230,13 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( + hl1 = ZSTD_hashPtr(ip1, hBitsL, 8); + +- if (idxl0 > prefixLowestIndex) { ++ /* idxl0 > prefixLowestIndex is a (somewhat) unpredictable branch. ++ * However expression below complies into conditional move. Since ++ * match is unlikely and we only *branch* on idxl0 > prefixLowestIndex ++ * if there is a match, all branches become predictable. */ ++ { const BYTE* const matchl0_safe = ZSTD_selectAddr(idxl0, prefixLowestIndex, matchl0, &dummy[0]); ++ + /* check prefix long match */ +- if (MEM_read64(matchl0) == MEM_read64(ip)) { ++ if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) { + mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8; + offset = (U32)(ip-matchl0); + while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */ + goto _match_found; +- } +- } ++ } } + + idxl1 = hashLong[hl1]; + matchl1 = base + idxl1; + +- if (idxs0 > prefixLowestIndex) { +- /* check prefix short match */ +- if (MEM_read32(matchs0) == MEM_read32(ip)) { +- goto _search_next_long; +- } ++ /* Same optimization as matchl0 above */ ++ matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]); ++ ++ /* check prefix short match */ ++ if(MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) { ++ goto _search_next_long; + } + + if (ip1 >= nextStep) { +@@ -175,30 +240,36 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( } while (ip1 <= ilimit); _cleanup: @@ -40918,7 +45643,40 @@ index 76933dea2624..5ff54f17d92f 100644 /* Return the last literals size */ return (size_t)(iend - anchor); -@@ -217,7 +276,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( + + _search_next_long: + +- /* check prefix long +1 match */ +- if (idxl1 > prefixLowestIndex) { +- if (MEM_read64(matchl1) == MEM_read64(ip1)) { ++ /* short match found: let's check for a longer one */ ++ mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4; ++ offset = (U32)(ip - matchs0); ++ ++ /* check long match at +1 position */ ++ if ((idxl1 > prefixLowestIndex) && (MEM_read64(matchl1) == MEM_read64(ip1))) { ++ size_t const l1len = ZSTD_count(ip1+8, matchl1+8, iend) + 8; ++ if (l1len > mLength) { ++ /* use the long match instead */ + ip = ip1; +- mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8; ++ mLength = l1len; + offset = (U32)(ip-matchl1); +- while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */ +- goto _match_found; ++ matchs0 = matchl1; + } + } + +- /* if no long +1 match, explore the short match we found */ +- mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4; +- offset = (U32)(ip - matchs0); +- while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */ ++ while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* complete backward */ + + /* fall-through */ + +@@ -217,7 +288,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( hashLong[hl1] = (U32)(ip1 - base); } @@ -40927,7 +45685,7 @@ index 76933dea2624..5ff54f17d92f 100644 _match_stored: /* match found */ -@@ -243,7 +302,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( +@@ -243,7 +314,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); @@ -40936,23 +45694,29 @@ index 76933dea2624..5ff54f17d92f 100644 ip += rLength; anchor = ip; continue; /* faster when present ... (?) */ -@@ -254,6 +313,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( +@@ -254,8 +325,9 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic( FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, -@@ -275,7 +335,6 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( + U32 const mls /* template */) + { +@@ -275,9 +347,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved = 0; - const ZSTD_matchState_t* const dms = ms->dictMatchState; +- const ZSTD_matchState_t* const dms = ms->dictMatchState; ++ const ZSTD_MatchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams; -@@ -286,8 +345,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( + const U32* const dictHashLong = dms->hashTable; + const U32* const dictHashSmall = dms->chainTable; +@@ -286,8 +357,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dms->window.nextSrc; const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase); @@ -40963,7 +45727,7 @@ index 76933dea2624..5ff54f17d92f 100644 const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic"); -@@ -295,6 +354,13 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( +@@ -295,6 +366,13 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( /* if a dictionary is attached, it must be within window range */ assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); @@ -40977,7 +45741,7 @@ index 76933dea2624..5ff54f17d92f 100644 /* init */ ip += (dictAndPrefixLength == 0); -@@ -309,8 +375,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( +@@ -309,8 +387,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( U32 offset; size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); @@ -40992,7 +45756,13 @@ index 76933dea2624..5ff54f17d92f 100644 U32 const curr = (U32)(ip-base); U32 const matchIndexL = hashLong[h2]; U32 matchIndexS = hashSmall[h]; -@@ -328,7 +398,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( +@@ -323,26 +405,24 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( + hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ + + /* check repcode */ +- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) ++ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; ip++; @@ -41001,11 +45771,20 @@ index 76933dea2624..5ff54f17d92f 100644 goto _match_stored; } -@@ -340,9 +410,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( - while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ - goto _match_found; - } +- if (matchIndexL > prefixLowestIndex) { ++ if ((matchIndexL >= prefixLowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { + /* check prefix long match */ +- if (MEM_read64(matchLong) == MEM_read64(ip)) { +- mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; +- offset = (U32)(ip-matchLong); +- while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ +- goto _match_found; +- } - } else { ++ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; ++ offset = (U32)(ip-matchLong); ++ while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ ++ goto _match_found; + } else if (dictTagsMatchL) { /* check dictMatchState long match */ - U32 const dictMatchIndexL = dictHashLong[dictHL]; @@ -41013,7 +45792,12 @@ index 76933dea2624..5ff54f17d92f 100644 const BYTE* dictMatchL = dictBase + dictMatchIndexL; assert(dictMatchL < dictEnd); -@@ -358,9 +428,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( +@@ -354,13 +434,13 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( + } } + + if (matchIndexS > prefixLowestIndex) { +- /* check prefix short match */ ++ /* short match candidate */ if (MEM_read32(match) == MEM_read32(ip)) { goto _search_next_long; } @@ -41025,7 +45809,7 @@ index 76933dea2624..5ff54f17d92f 100644 match = dictBase + dictMatchIndexS; matchIndexS = dictMatchIndexS + dictIndexDelta; -@@ -375,10 +445,11 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( +@@ -375,25 +455,24 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( continue; _search_next_long: @@ -41039,11 +45823,22 @@ index 76933dea2624..5ff54f17d92f 100644 const BYTE* matchL3 = base + matchIndexL3; hashLong[hl3] = curr + 1; -@@ -391,9 +462,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( - while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ - goto _match_found; - } + /* check prefix long +1 match */ +- if (matchIndexL3 > prefixLowestIndex) { +- if (MEM_read64(matchL3) == MEM_read64(ip+1)) { +- mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; +- ip++; +- offset = (U32)(ip-matchL3); +- while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ +- goto _match_found; +- } - } else { ++ if ((matchIndexL3 >= prefixLowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1))) { ++ mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; ++ ip++; ++ offset = (U32)(ip-matchL3); ++ while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ ++ goto _match_found; + } else if (dictTagsMatchL3) { /* check dict long +1 match */ - U32 const dictMatchIndexL3 = dictHashLong[dictHLNext]; @@ -41051,7 +45846,7 @@ index 76933dea2624..5ff54f17d92f 100644 const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; assert(dictMatchL3 < dictEnd); if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) { -@@ -419,7 +490,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( +@@ -419,7 +498,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( offset_2 = offset_1; offset_1 = offset; @@ -41060,7 +45855,13 @@ index 76933dea2624..5ff54f17d92f 100644 _match_stored: /* match found */ -@@ -448,7 +519,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( +@@ -443,12 +522,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( + const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ? + dictBase + repIndex2 - dictIndexDelta : + base + repIndex2; +- if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) ++ if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex2)) + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ @@ -41069,7 +45870,7 @@ index 76933dea2624..5ff54f17d92f 100644 hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; -@@ -461,8 +532,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( +@@ -461,8 +540,8 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( } /* while (ip < ilimit) */ /* save reps for next block */ @@ -41080,18 +45881,54 @@ index 76933dea2624..5ff54f17d92f 100644 /* Return the last literals size */ return (size_t)(iend - anchor); -@@ -527,7 +598,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState( +@@ -470,7 +549,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( + + #define ZSTD_GEN_DFAST_FN(dictMode, mls) \ + static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \ +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ + void const* src, size_t srcSize) \ + { \ + return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \ +@@ -488,7 +567,7 @@ ZSTD_GEN_DFAST_FN(dictMatchState, 7) + + + size_t ZSTD_compressBlock_doubleFast( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) + { + const U32 mls = ms->cParams.minMatch; +@@ -508,7 +587,7 @@ size_t ZSTD_compressBlock_doubleFast( + + + size_t ZSTD_compressBlock_doubleFast_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) + { + const U32 mls = ms->cParams.minMatch; +@@ -527,8 +606,10 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState( } -static size_t ZSTD_compressBlock_doubleFast_extDict_generic( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_doubleFast_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls /* template */) -@@ -585,7 +658,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( + { +@@ -579,13 +660,13 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( + size_t mLength; + hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ + +- if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */ ++ if (((ZSTD_index_overlap_check(prefixStartIndex, repIndex)) + & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */ + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; ip++; @@ -41100,7 +45937,7 @@ index 76933dea2624..5ff54f17d92f 100644 } else { if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; -@@ -596,7 +669,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( +@@ -596,7 +677,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; @@ -41109,7 +45946,7 @@ index 76933dea2624..5ff54f17d92f 100644 } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) { size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); -@@ -621,7 +694,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( +@@ -621,7 +702,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( } offset_2 = offset_1; offset_1 = offset; @@ -41118,7 +45955,14 @@ index 76933dea2624..5ff54f17d92f 100644 } else { ip += ((ip-anchor) >> kSearchStrength) + 1; -@@ -653,7 +726,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( +@@ -647,13 +728,13 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; +- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */ ++ if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) + & (offset_2 <= current2 - dictStartIndex)) + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ @@ -41127,14 +45971,23 @@ index 76933dea2624..5ff54f17d92f 100644 hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; -@@ -694,3 +767,5 @@ size_t ZSTD_compressBlock_doubleFast_extDict( +@@ -677,7 +758,7 @@ ZSTD_GEN_DFAST_FN(extDict, 6) + ZSTD_GEN_DFAST_FN(extDict, 7) + + size_t ZSTD_compressBlock_doubleFast_extDict( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) + { + U32 const mls = ms->cParams.minMatch; +@@ -694,3 +775,5 @@ size_t ZSTD_compressBlock_doubleFast_extDict( return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize); } } + +#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */ diff --git a/lib/zstd/compress/zstd_double_fast.h b/lib/zstd/compress/zstd_double_fast.h -index 6822bde65a1d..b7ddc714f13e 100644 +index 6822bde65a1d..011556ce56f7 100644 --- a/lib/zstd/compress/zstd_double_fast.h +++ b/lib/zstd/compress/zstd_double_fast.h @@ -1,5 +1,6 @@ @@ -41145,24 +45998,36 @@ index 6822bde65a1d..b7ddc714f13e 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the -@@ -15,8 +16,12 @@ +@@ -11,22 +12,32 @@ + #ifndef ZSTD_DOUBLE_FAST_H + #define ZSTD_DOUBLE_FAST_H + +- #include "../common/mem.h" /* U32 */ #include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */ +-void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, +- void const* end, ZSTD_dictTableLoadMethod_e dtlm); +#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR + - void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, -- void const* end, ZSTD_dictTableLoadMethod_e dtlm); ++void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms, + void const* end, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp); + size_t ZSTD_compressBlock_doubleFast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -@@ -27,6 +32,14 @@ size_t ZSTD_compressBlock_doubleFast_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + size_t ZSTD_compressBlock_doubleFast_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + size_t ZSTD_compressBlock_doubleFast_extDict( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); +- +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST ZSTD_compressBlock_doubleFast +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE ZSTD_compressBlock_doubleFast_dictMatchState +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT ZSTD_compressBlock_doubleFast_extDict @@ -41172,10 +46037,9 @@ index 6822bde65a1d..b7ddc714f13e 100644 +#define ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT NULL +#endif /* ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR */ - #endif /* ZSTD_DOUBLE_FAST_H */ diff --git a/lib/zstd/compress/zstd_fast.c b/lib/zstd/compress/zstd_fast.c -index a752e6beab52..b7a63ba4ce56 100644 +index a752e6beab52..60e07e839e5f 100644 --- a/lib/zstd/compress/zstd_fast.c +++ b/lib/zstd/compress/zstd_fast.c @@ -1,5 +1,6 @@ @@ -41192,7 +46056,7 @@ index a752e6beab52..b7a63ba4ce56 100644 +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms, ++void ZSTD_fillHashTableForCDict(ZSTD_MatchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm) +{ @@ -41204,8 +46068,7 @@ index a752e6beab52..b7a63ba4ce56 100644 + const BYTE* ip = base + ms->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const U32 fastHashFillStep = 3; - --void ZSTD_fillHashTable(ZSTD_matchState_t* ms, ++ + /* Currently, we always use ZSTD_dtlm_full for filling CDict tables. + * Feel free to remove this assert if there's a good reason! */ + assert(dtlm == ZSTD_dtlm_full); @@ -41225,12 +46088,13 @@ index a752e6beab52..b7a63ba4ce56 100644 + size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls); + if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */ + ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p); -+ } } } } ++ } } } } +} -+ + +-void ZSTD_fillHashTable(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms, ++void ZSTD_fillHashTableForCCtx(ZSTD_MatchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm) { @@ -41245,11 +46109,11 @@ index a752e6beab52..b7a63ba4ce56 100644 /* Always insert every fastHashFillStep position into the hash table. * Insert the other positions if their hash entry is empty. */ -@@ -42,6 +85,18 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, +@@ -42,6 +85,60 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, } } } } } -+void ZSTD_fillHashTable(ZSTD_matchState_t* ms, ++void ZSTD_fillHashTable(ZSTD_MatchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp) @@ -41260,23 +46124,77 @@ index a752e6beab52..b7a63ba4ce56 100644 + ZSTD_fillHashTableForCCtx(ms, end, dtlm); + } +} ++ ++ ++typedef int (*ZSTD_match4Found) (const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit); ++ ++static int ++ZSTD_match4Found_cmov(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit) ++{ ++ /* Array of ~random data, should have low probability of matching data. ++ * Load from here if the index is invalid. ++ * Used to avoid unpredictable branches. */ ++ static const BYTE dummy[] = {0x12,0x34,0x56,0x78}; ++ ++ /* currentIdx >= lowLimit is a (somewhat) unpredictable branch. ++ * However expression below compiles into conditional move. ++ */ ++ const BYTE* mvalAddr = ZSTD_selectAddr(matchIdx, idxLowLimit, matchAddress, dummy); ++ /* Note: this used to be written as : return test1 && test2; ++ * Unfortunately, once inlined, these tests become branches, ++ * in which case it becomes critical that they are executed in the right order (test1 then test2). ++ * So we have to write these tests in a specific manner to ensure their ordering. ++ */ ++ if (MEM_read32(currentPtr) != MEM_read32(mvalAddr)) return 0; ++ /* force ordering of these tests, which matters once the function is inlined, as they become branches */ ++ __asm__(""); ++ return matchIdx >= idxLowLimit; ++} ++ ++static int ++ZSTD_match4Found_branch(const BYTE* currentPtr, const BYTE* matchAddress, U32 matchIdx, U32 idxLowLimit) ++{ ++ /* using a branch instead of a cmov, ++ * because it's faster in scenarios where matchIdx >= idxLowLimit is generally true, ++ * aka almost all candidates are within range */ ++ U32 mval; ++ if (matchIdx >= idxLowLimit) { ++ mval = MEM_read32(matchAddress); ++ } else { ++ mval = MEM_read32(currentPtr) ^ 1; /* guaranteed to not match. */ ++ } ++ ++ return (MEM_read32(currentPtr) == mval); ++} + /* * If you squint hard enough (and ignore repcodes), the search operation at any -@@ -89,8 +144,9 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, +@@ -89,17 +186,17 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, * * This is also the work we do at the beginning to enter the loop initially. */ -FORCE_INLINE_TEMPLATE size_t -ZSTD_compressBlock_fast_noDict_generic( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_fast_noDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, - U32 const mls, U32 const hasStep) -@@ -117,7 +173,7 @@ ZSTD_compressBlock_fast_noDict_generic( +- U32 const mls, U32 const hasStep) ++ U32 const mls, int useCmov) + { + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashTable = ms->hashTable; + U32 const hlog = cParams->hashLog; +- /* support stepSize of 0 */ +- size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2; ++ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; /* min 2 */ + const BYTE* const base = ms->window.base; + const BYTE* const istart = (const BYTE*)src; + const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); +@@ -117,12 +214,11 @@ ZSTD_compressBlock_fast_noDict_generic( U32 rep_offset1 = rep[0]; U32 rep_offset2 = rep[1]; @@ -41285,7 +46203,20 @@ index a752e6beab52..b7a63ba4ce56 100644 size_t hash0; /* hash for ip0 */ size_t hash1; /* hash for ip1 */ -@@ -141,8 +197,8 @@ ZSTD_compressBlock_fast_noDict_generic( +- U32 idx; /* match idx for ip0 */ +- U32 mval; /* src value at match idx */ ++ U32 matchIdx; /* match idx for ip0 */ + + U32 offcode; + const BYTE* match0; +@@ -135,14 +231,15 @@ ZSTD_compressBlock_fast_noDict_generic( + size_t step; + const BYTE* nextStep; + const size_t kStepIncr = (1 << (kSearchStrength - 1)); ++ const ZSTD_match4Found matchFound = useCmov ? ZSTD_match4Found_cmov : ZSTD_match4Found_branch; + + DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); + ip0 += (ip0 == prefixStart); { U32 const curr = (U32)(ip0 - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); U32 const maxRep = curr - windowLow; @@ -41296,7 +46227,16 @@ index a752e6beab52..b7a63ba4ce56 100644 } /* start each op */ -@@ -180,8 +236,14 @@ ZSTD_compressBlock_fast_noDict_generic( +@@ -163,7 +260,7 @@ ZSTD_compressBlock_fast_noDict_generic( + hash0 = ZSTD_hashPtr(ip0, hlog, mls); + hash1 = ZSTD_hashPtr(ip1, hlog, mls); + +- idx = hashTable[hash0]; ++ matchIdx = hashTable[hash0]; + + do { + /* load repcode match for ip[2]*/ +@@ -180,26 +277,28 @@ ZSTD_compressBlock_fast_noDict_generic( mLength = ip0[-1] == match0[-1]; ip0 -= mLength; match0 -= mLength; @@ -41304,50 +46244,75 @@ index a752e6beab52..b7a63ba4ce56 100644 + offcode = REPCODE1_TO_OFFBASE; mLength += 4; + -+ /* First write next hash table entry; we've already calculated it. -+ * This write is known to be safe because the ip1 is before the ++ /* Write next hash table entry: it's already calculated. ++ * This write is known to be safe because ip1 is before the + * repcode (ip2). */ + hashTable[hash1] = (U32)(ip1 - base); + goto _match; } -@@ -195,6 +257,12 @@ ZSTD_compressBlock_fast_noDict_generic( - /* check match at ip[0] */ - if (MEM_read32(ip0) == mval) { - /* found a match! */ -+ -+ /* First write next hash table entry; we've already calculated it. -+ * This write is known to be safe because the ip1 == ip0 + 1, so -+ * we know we will resume searching after ip1 */ +- /* load match for ip[0] */ +- if (idx >= prefixStartIndex) { +- mval = MEM_read32(base + idx); +- } else { +- mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */ +- } ++ if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) { ++ /* Write next hash table entry (it's already calculated). ++ * This write is known to be safe because the ip1 == ip0 + 1, ++ * so searching will resume after ip1 */ + hashTable[hash1] = (U32)(ip1 - base); -+ + +- /* check match at ip[0] */ +- if (MEM_read32(ip0) == mval) { +- /* found a match! */ goto _offset; } -@@ -224,6 +292,21 @@ ZSTD_compressBlock_fast_noDict_generic( - /* check match at ip[0] */ - if (MEM_read32(ip0) == mval) { - /* found a match! */ -+ -+ /* first write next hash table entry; we've already calculated it */ + /* lookup ip[1] */ +- idx = hashTable[hash1]; ++ matchIdx = hashTable[hash1]; + + /* hash ip[2] */ + hash0 = hash1; +@@ -214,21 +313,19 @@ ZSTD_compressBlock_fast_noDict_generic( + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + +- /* load match for ip[0] */ +- if (idx >= prefixStartIndex) { +- mval = MEM_read32(base + idx); +- } else { +- mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */ +- } +- +- /* check match at ip[0] */ +- if (MEM_read32(ip0) == mval) { +- /* found a match! */ ++ if (matchFound(ip0, base + matchIdx, matchIdx, prefixStartIndex)) { ++ /* Write next hash table entry, since it's already calculated */ + if (step <= 4) { -+ /* We need to avoid writing an index into the hash table >= the -+ * position at which we will pick up our searching after we've -+ * taken this match. -+ * -+ * The minimum possible match has length 4, so the earliest ip0 -+ * can be after we take this match will be the current ip0 + 4. -+ * ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely -+ * write this position. -+ */ ++ /* Avoid writing an index if it's >= position where search will resume. ++ * The minimum possible match has length 4, so search can resume at ip0 + 4. ++ */ + hashTable[hash1] = (U32)(ip1 - base); + } -+ goto _offset; } -@@ -254,9 +337,24 @@ ZSTD_compressBlock_fast_noDict_generic( + /* lookup ip[1] */ +- idx = hashTable[hash1]; ++ matchIdx = hashTable[hash1]; + + /* hash ip[2] */ + hash0 = hash1; +@@ -250,13 +347,28 @@ ZSTD_compressBlock_fast_noDict_generic( + } while (ip3 < ilimit); + + _cleanup: +- /* Note that there are probably still a couple positions we could search. ++ /* Note that there are probably still a couple positions one could search. * However, it seems to be a meaningful performance hit to try to search * them. So let's not. */ @@ -41374,8 +46339,12 @@ index a752e6beab52..b7a63ba4ce56 100644 /* Return the last literals size */ return (size_t)(iend - anchor); -@@ -267,7 +365,7 @@ ZSTD_compressBlock_fast_noDict_generic( - match0 = base + idx; +@@ -264,10 +376,10 @@ ZSTD_compressBlock_fast_noDict_generic( + _offset: /* Requires: ip0, idx */ + + /* Compute the offset code. */ +- match0 = base + idx; ++ match0 = base + matchIdx; rep_offset2 = rep_offset1; rep_offset1 = (U32)(ip0-match0); - offcode = STORE_OFFSET(rep_offset1); @@ -41383,7 +46352,7 @@ index a752e6beab52..b7a63ba4ce56 100644 mLength = 4; /* Count the backwards match length. */ -@@ -287,11 +385,6 @@ ZSTD_compressBlock_fast_noDict_generic( +@@ -287,11 +399,6 @@ ZSTD_compressBlock_fast_noDict_generic( ip0 += mLength; anchor = ip0; @@ -41395,7 +46364,7 @@ index a752e6beab52..b7a63ba4ce56 100644 /* Fill table and check for immediate repcode. */ if (ip0 <= ilimit) { /* Fill Table */ -@@ -306,7 +399,7 @@ ZSTD_compressBlock_fast_noDict_generic( +@@ -306,7 +413,7 @@ ZSTD_compressBlock_fast_noDict_generic( { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); ip0 += rLength; @@ -41404,15 +46373,70 @@ index a752e6beab52..b7a63ba4ce56 100644 anchor = ip0; continue; /* faster when present (confirmed on gcc-8) ... (?) */ } } } -@@ -369,6 +462,7 @@ size_t ZSTD_compressBlock_fast( +@@ -314,12 +421,12 @@ ZSTD_compressBlock_fast_noDict_generic( + goto _start; + } + +-#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \ +- static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \ +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ ++#define ZSTD_GEN_FAST_FN(dictMode, mml, cmov) \ ++ static size_t ZSTD_compressBlock_fast_##dictMode##_##mml##_##cmov( \ ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ + void const* src, size_t srcSize) \ + { \ +- return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \ ++ return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mml, cmov); \ + } + + ZSTD_GEN_FAST_FN(noDict, 4, 1) +@@ -333,13 +440,15 @@ ZSTD_GEN_FAST_FN(noDict, 6, 0) + ZSTD_GEN_FAST_FN(noDict, 7, 0) + + size_t ZSTD_compressBlock_fast( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) + { +- U32 const mls = ms->cParams.minMatch; ++ U32 const mml = ms->cParams.minMatch; ++ /* use cmov when "candidate in range" branch is likely unpredictable */ ++ int const useCmov = ms->cParams.windowLog < 19; + assert(ms->dictMatchState == NULL); +- if (ms->cParams.targetLength > 1) { +- switch(mls) ++ if (useCmov) { ++ switch(mml) + { + default: /* includes case 3 */ + case 4 : +@@ -352,7 +461,8 @@ size_t ZSTD_compressBlock_fast( + return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize); + } + } else { +- switch(mls) ++ /* use a branch instead */ ++ switch(mml) + { + default: /* includes case 3 */ + case 4 : +@@ -364,13 +474,13 @@ size_t ZSTD_compressBlock_fast( + case 7 : + return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize); + } +- + } } FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_fast_dictMatchState_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls, U32 const hasStep) -@@ -380,14 +474,14 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( + { + const ZSTD_compressionParameters* const cParams = &ms->cParams; +@@ -380,16 +490,16 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( U32 const stepSize = cParams->targetLength + !(cParams->targetLength); const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; @@ -41427,9 +46451,12 @@ index a752e6beab52..b7a63ba4ce56 100644 U32 offset_1=rep[0], offset_2=rep[1]; - U32 offsetSaved = 0; - const ZSTD_matchState_t* const dms = ms->dictMatchState; +- const ZSTD_matchState_t* const dms = ms->dictMatchState; ++ const ZSTD_MatchState_t* const dms = ms->dictMatchState; const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; -@@ -397,13 +491,13 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( + const U32* const dictHashTable = dms->hashTable; + const U32 dictStartIndex = dms->window.dictLimit; +@@ -397,13 +507,13 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( const BYTE* const dictStart = dictBase + dictStartIndex; const BYTE* const dictEnd = dms->window.nextSrc; const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase); @@ -41446,7 +46473,7 @@ index a752e6beab52..b7a63ba4ce56 100644 assert(endIndex - prefixStartIndex <= maxDistance); (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ -@@ -413,106 +507,155 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( +@@ -413,106 +523,154 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( * when translating a dict index into a local index */ assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); @@ -41525,8 +46552,7 @@ index a752e6beab52..b7a63ba4ce56 100644 + size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls); + hashTable[hash0] = curr; /* update hash table */ + -+ if (((U32) ((prefixStartIndex - 1) - repIndex) >= -+ 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ ++ if ((ZSTD_index_overlap_check(prefixStartIndex, repIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) { + const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4; @@ -41559,8 +46585,8 @@ index a752e6beab52..b7a63ba4ce56 100644 + } + } + -+ if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) { -+ /* found a regular match */ ++ if (ZSTD_match4Found_cmov(ip0, match, matchIndex, prefixStartIndex)) { ++ /* found a regular match of size >= 4 */ + U32 const offset = (U32) (ip0 - match); + mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4; + while (((ip0 > anchor) & (match > prefixStart)) @@ -41632,8 +46658,9 @@ index a752e6beab52..b7a63ba4ce56 100644 const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase - dictIndexDelta + repIndex2 : base + repIndex2; - if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) +- if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { ++ if ( (ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) + && (MEM_read32(repMatch2) == MEM_read32(ip0))) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; @@ -41667,18 +46694,28 @@ index a752e6beab52..b7a63ba4ce56 100644 /* Return the last literals size */ return (size_t)(iend - anchor); -@@ -545,7 +688,9 @@ size_t ZSTD_compressBlock_fast_dictMatchState( +@@ -525,7 +683,7 @@ ZSTD_GEN_FAST_FN(dictMatchState, 6, 0) + ZSTD_GEN_FAST_FN(dictMatchState, 7, 0) + + size_t ZSTD_compressBlock_fast_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) + { + U32 const mls = ms->cParams.minMatch; +@@ -545,19 +703,20 @@ size_t ZSTD_compressBlock_fast_dictMatchState( } -static size_t ZSTD_compressBlock_fast_extDict_generic( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_fast_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { -@@ -553,11 +698,10 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( + const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ @@ -41691,7 +46728,7 @@ index a752e6beab52..b7a63ba4ce56 100644 const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); -@@ -570,6 +714,28 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( +@@ -570,6 +729,28 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - 8; U32 offset_1=rep[0], offset_2=rep[1]; @@ -41720,7 +46757,7 @@ index a752e6beab52..b7a63ba4ce56 100644 (void)hasStep; /* not currently specialized on whether it's accelerated */ -@@ -579,75 +745,202 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( +@@ -579,75 +760,202 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( if (prefixStartIndex == dictStartIndex) return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); @@ -41966,7 +47003,7 @@ index a752e6beab52..b7a63ba4ce56 100644 + while (ip0 <= ilimit) { + U32 const repIndex2 = (U32)(ip0-base) - offset_2; + const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; -+ if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */ ++ if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2)) & (offset_2 > 0)) + && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) { + const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; @@ -41984,7 +47021,12 @@ index a752e6beab52..b7a63ba4ce56 100644 } ZSTD_GEN_FAST_FN(extDict, 4, 0) -@@ -660,6 +953,7 @@ size_t ZSTD_compressBlock_fast_extDict( +@@ -656,10 +964,11 @@ ZSTD_GEN_FAST_FN(extDict, 6, 0) + ZSTD_GEN_FAST_FN(extDict, 7, 0) + + size_t ZSTD_compressBlock_fast_extDict( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { U32 const mls = ms->cParams.minMatch; @@ -41993,7 +47035,7 @@ index a752e6beab52..b7a63ba4ce56 100644 { default: /* includes case 3 */ diff --git a/lib/zstd/compress/zstd_fast.h b/lib/zstd/compress/zstd_fast.h -index fddc2f532d21..e64d9e1b2d39 100644 +index fddc2f532d21..04fde0a72a4e 100644 --- a/lib/zstd/compress/zstd_fast.h +++ b/lib/zstd/compress/zstd_fast.h @@ -1,5 +1,6 @@ @@ -42004,18 +47046,36 @@ index fddc2f532d21..e64d9e1b2d39 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the -@@ -16,7 +17,8 @@ +@@ -11,21 +12,20 @@ + #ifndef ZSTD_FAST_H + #define ZSTD_FAST_H + +- + #include "../common/mem.h" /* U32 */ #include "zstd_compress_internal.h" - void ZSTD_fillHashTable(ZSTD_matchState_t* ms, +-void ZSTD_fillHashTable(ZSTD_matchState_t* ms, - void const* end, ZSTD_dictTableLoadMethod_e dtlm); ++void ZSTD_fillHashTable(ZSTD_MatchState_t* ms, + void const* end, ZSTD_dictTableLoadMethod_e dtlm, + ZSTD_tableFillPurpose_e tfp); size_t ZSTD_compressBlock_fast( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); + size_t ZSTD_compressBlock_fast_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + size_t ZSTD_compressBlock_fast_extDict( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + +- + #endif /* ZSTD_FAST_H */ diff --git a/lib/zstd/compress/zstd_lazy.c b/lib/zstd/compress/zstd_lazy.c -index 0298a01a7504..3e88d8a1a136 100644 +index 0298a01a7504..88e2501fe3ef 100644 --- a/lib/zstd/compress/zstd_lazy.c +++ b/lib/zstd/compress/zstd_lazy.c @@ -1,5 +1,6 @@ @@ -42048,7 +47108,7 @@ index 0298a01a7504..3e88d8a1a136 100644 -ZSTD_updateDUBT(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+void ZSTD_updateDUBT(ZSTD_matchState_t* ms, ++void ZSTD_updateDUBT(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend, U32 mls) { @@ -42060,22 +47120,33 @@ index 0298a01a7504..3e88d8a1a136 100644 -ZSTD_insertDUBT1(const ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+void ZSTD_insertDUBT1(const ZSTD_matchState_t* ms, ++void ZSTD_insertDUBT1(const ZSTD_MatchState_t* ms, U32 curr, const BYTE* inputEnd, U32 nbCompares, U32 btLow, const ZSTD_dictMode_e dictMode) -@@ -149,8 +160,9 @@ ZSTD_insertDUBT1(const ZSTD_matchState_t* ms, +@@ -149,9 +160,10 @@ ZSTD_insertDUBT1(const ZSTD_matchState_t* ms, } -static size_t -ZSTD_DUBT_findBetterDictMatch ( +- const ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_DUBT_findBetterDictMatch ( - const ZSTD_matchState_t* ms, ++ const ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, size_t* offsetPtr, + size_t bestLength, +@@ -159,7 +171,7 @@ ZSTD_DUBT_findBetterDictMatch ( + U32 const mls, + const ZSTD_dictMode_e dictMode) + { +- const ZSTD_matchState_t * const dms = ms->dictMatchState; ++ const ZSTD_MatchState_t * const dms = ms->dictMatchState; + const ZSTD_compressionParameters* const dmsCParams = &dms->cParams; + const U32 * const dictHashTable = dms->hashTable; + U32 const hashLog = dmsCParams->hashLog; @@ -197,8 +209,8 @@ ZSTD_DUBT_findBetterDictMatch ( U32 matchIndex = dictMatchIndex + dictIndexDelta; if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) { @@ -42104,7 +47175,7 @@ index 0298a01a7504..3e88d8a1a136 100644 -ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, ++size_t ZSTD_DUBT_findBestMatch(ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, - size_t* offsetPtr, + size_t* offBasePtr, @@ -42142,7 +47213,7 @@ index 0298a01a7504..3e88d8a1a136 100644 } return bestLength; } -@@ -378,17 +391,18 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, +@@ -378,24 +391,25 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, /* ZSTD_BtFindBestMatch() : Tree updater, providing best match */ @@ -42150,7 +47221,7 @@ index 0298a01a7504..3e88d8a1a136 100644 -ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, ++size_t ZSTD_BtFindBestMatch( ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, - size_t* offsetPtr, + size_t* offBasePtr, @@ -42165,6 +47236,23 @@ index 0298a01a7504..3e88d8a1a136 100644 } /* ********************************* + * Dedicated dict search + ***********************************/ + +-void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip) ++void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip) + { + const BYTE* const base = ms->window.base; + U32 const target = (U32)(ip - base); +@@ -514,7 +528,7 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B + */ + FORCE_INLINE_TEMPLATE + size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts, +- const ZSTD_matchState_t* const dms, ++ const ZSTD_MatchState_t* const dms, + const BYTE* const ip, const BYTE* const iLimit, + const BYTE* const prefixStart, const U32 curr, + const U32 dictLimit, const size_t ddsIdx) { @@ -561,7 +575,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb /* save best solution */ if (currentMl > ml) { @@ -42188,17 +47276,18 @@ index 0298a01a7504..3e88d8a1a136 100644 /* Update chains up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ -FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal( +- ZSTD_matchState_t* ms, +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_insertAndFindFirstIndex_internal( - ZSTD_matchState_t* ms, ++ ZSTD_MatchState_t* ms, const ZSTD_compressionParameters* const cParams, - const BYTE* ip, U32 const mls) + const BYTE* ip, U32 const mls, U32 const lazySkipping) { U32* const hashTable = ms->hashTable; const U32 hashLog = cParams->hashLog; -@@ -632,6 +648,9 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal( +@@ -632,21 +648,25 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal( NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; hashTable[h] = idx; idx++; @@ -42208,9 +47297,11 @@ index 0298a01a7504..3e88d8a1a136 100644 } ms->nextToUpdate = target; -@@ -640,11 +659,12 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal( + return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; + } - U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) { +-U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) { ++U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip) { const ZSTD_compressionParameters* const cParams = &ms->cParams; - return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch); + return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, /* lazySkipping*/ 0); @@ -42220,8 +47311,20 @@ index 0298a01a7504..3e88d8a1a136 100644 FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_HcFindBestMatch( - ZSTD_matchState_t* ms, +- ZSTD_matchState_t* ms, ++ ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 mls, const ZSTD_dictMode_e dictMode) +@@ -670,7 +690,7 @@ size_t ZSTD_HcFindBestMatch( + U32 nbAttempts = 1U << cParams->searchLog; + size_t ml=4-1; + +- const ZSTD_matchState_t* const dms = ms->dictMatchState; ++ const ZSTD_MatchState_t* const dms = ms->dictMatchState; + const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch + ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0; + const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch @@ -684,14 +704,15 @@ size_t ZSTD_HcFindBestMatch( } @@ -42355,7 +47458,7 @@ index 0298a01a7504..3e88d8a1a136 100644 -FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base, +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base, ++void ZSTD_row_fillHashCache(ZSTD_MatchState_t* ms, const BYTE* base, U32 const rowLog, U32 const mls, U32 idx, const BYTE* const iLimit) { @@ -42402,7 +47505,7 @@ index 0298a01a7504..3e88d8a1a136 100644 - U32 const rowMask, U32 const useCache) +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms, ++void ZSTD_row_update_internalImpl(ZSTD_MatchState_t* ms, + U32 updateStartIdx, U32 const updateEndIdx, + U32 const mls, U32 const rowLog, + U32 const rowMask, U32 const useCache) @@ -42442,13 +47545,20 @@ index 0298a01a7504..3e88d8a1a136 100644 - U32 const rowMask, U32 const useCache) +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip, ++void ZSTD_row_update_internal(ZSTD_MatchState_t* ms, const BYTE* ip, + U32 const mls, U32 const rowLog, + U32 const rowMask, U32 const useCache) { U32 idx = ms->nextToUpdate; const BYTE* const base = ms->window.base; -@@ -971,7 +953,35 @@ void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) { +@@ -965,13 +947,41 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const + * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary + * processing. + */ +-void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) { ++void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip) { + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); + const U32 rowMask = (1u << rowLog) - 1; const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */); DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog); @@ -42632,15 +47742,15 @@ index 0298a01a7504..3e88d8a1a136 100644 } } #endif -@@ -1103,20 +1124,21 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, +@@ -1103,29 +1124,30 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, /* The high-level approach of the SIMD row based match finder is as follows: * - Figure out where to insert the new entry: - * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag" - * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines -+ * - Generate a hash for current input posistion and split it into a one byte of tag and `rowHashLog` bits of index. -+ * - The hash is salted by a value that changes on every contex reset, so when the same table is used -+ * we will avoid collisions that would otherwise slow us down by intorducing phantom matches. ++ * - Generate a hash for current input position and split it into a one byte of tag and `rowHashLog` bits of index. ++ * - The hash is salted by a value that changes on every context reset, so when the same table is used ++ * we will avoid collisions that would otherwise slow us down by introducing phantom matches. + * - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines * which row to insert into. - * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can @@ -42661,9 +47771,11 @@ index 0298a01a7504..3e88d8a1a136 100644 FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_RowFindBestMatch( - ZSTD_matchState_t* ms, +- ZSTD_matchState_t* ms, ++ ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, -@@ -1125,7 +1147,7 @@ size_t ZSTD_RowFindBestMatch( + size_t* offsetPtr, + const U32 mls, const ZSTD_dictMode_e dictMode, const U32 rowLog) { U32* const hashTable = ms->hashTable; @@ -42672,7 +47784,7 @@ index 0298a01a7504..3e88d8a1a136 100644 U32* const hashCache = ms->hashCache; const U32 hashLog = ms->rowHashLog; const ZSTD_compressionParameters* const cParams = &ms->cParams; -@@ -1143,8 +1165,11 @@ size_t ZSTD_RowFindBestMatch( +@@ -1143,11 +1165,14 @@ size_t ZSTD_RowFindBestMatch( const U32 rowEntries = (1U << rowLog); const U32 rowMask = rowEntries - 1; const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */ @@ -42683,7 +47795,11 @@ index 0298a01a7504..3e88d8a1a136 100644 + U32 hash; /* DMS/DDS variables that may be referenced laster */ - const ZSTD_matchState_t* const dms = ms->dictMatchState; +- const ZSTD_matchState_t* const dms = ms->dictMatchState; ++ const ZSTD_MatchState_t* const dms = ms->dictMatchState; + + /* Initialize the following variables to satisfy static analyzer */ + size_t ddsIdx = 0; @@ -1168,7 +1193,7 @@ size_t ZSTD_RowFindBestMatch( if (dictMode == ZSTD_dictMatchState) { /* Prefetch DMS rows */ @@ -42804,19 +47920,66 @@ index 0298a01a7504..3e88d8a1a136 100644 if (ip+currentMl == iLimit) break; } } -@@ -1472,8 +1512,9 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax( +@@ -1301,7 +1341,7 @@ size_t ZSTD_RowFindBestMatch( + * ZSTD_searchMax() dispatches to the correct implementation function. + * + * TODO: The start of the search function involves loading and calculating a +- * bunch of constants from the ZSTD_matchState_t. These computations could be ++ * bunch of constants from the ZSTD_MatchState_t. These computations could be + * done in an initialization function, and saved somewhere in the match state. + * Then we could pass a pointer to the saved state instead of the match state, + * and avoid duplicate computations. +@@ -1325,7 +1365,7 @@ size_t ZSTD_RowFindBestMatch( + + #define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \ +- ZSTD_matchState_t* ms, \ ++ ZSTD_MatchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offBasePtr) \ + { \ +@@ -1335,7 +1375,7 @@ size_t ZSTD_RowFindBestMatch( + + #define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \ +- ZSTD_matchState_t* ms, \ ++ ZSTD_MatchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offsetPtr) \ + { \ +@@ -1345,7 +1385,7 @@ size_t ZSTD_RowFindBestMatch( + + #define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \ + ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \ +- ZSTD_matchState_t* ms, \ ++ ZSTD_MatchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offsetPtr) \ + { \ +@@ -1446,7 +1486,7 @@ typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searc + * If a match is found its offset is stored in @p offsetPtr. + */ + FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax( +- ZSTD_matchState_t* ms, ++ ZSTD_MatchState_t* ms, + const BYTE* ip, + const BYTE* iend, + size_t* offsetPtr, +@@ -1472,9 +1512,10 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax( * Common parser - lazy strategy *********************************/ -FORCE_INLINE_TEMPLATE size_t -ZSTD_compressBlock_lazy_generic( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_lazy_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, -@@ -1491,7 +1532,8 @@ ZSTD_compressBlock_lazy_generic( + const searchMethod_e searchMethod, const U32 depth, +@@ -1491,12 +1532,13 @@ ZSTD_compressBlock_lazy_generic( const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6); const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); @@ -42826,6 +47989,12 @@ index 0298a01a7504..3e88d8a1a136 100644 const int isDMS = dictMode == ZSTD_dictMatchState; const int isDDS = dictMode == ZSTD_dedicatedDictSearch; + const int isDxS = isDMS || isDDS; +- const ZSTD_matchState_t* const dms = ms->dictMatchState; ++ const ZSTD_MatchState_t* const dms = ms->dictMatchState; + const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0; + const BYTE* const dictBase = isDxS ? dms->window.base : NULL; + const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL; @@ -1512,8 +1554,8 @@ ZSTD_compressBlock_lazy_generic( U32 const curr = (U32)(ip - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog); @@ -42861,6 +48030,15 @@ index 0298a01a7504..3e88d8a1a136 100644 const BYTE* start=ip+1; DEBUGLOG(7, "search baseline (depth 0)"); +@@ -1548,7 +1591,7 @@ ZSTD_compressBlock_lazy_generic( + && repIndex < prefixLowestIndex) ? + dictBase + (repIndex - dictIndexDelta) : + base + repIndex; +- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) ++ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; @@ -1562,14 +1605,23 @@ ZSTD_compressBlock_lazy_generic( } @@ -42889,7 +48067,7 @@ index 0298a01a7504..3e88d8a1a136 100644 continue; } -@@ -1579,12 +1631,12 @@ ZSTD_compressBlock_lazy_generic( +@@ -1579,34 +1631,34 @@ ZSTD_compressBlock_lazy_generic( DEBUGLOG(7, "search depth 1"); ip ++; if ( (dictMode == ZSTD_noDict) @@ -42905,7 +48083,12 @@ index 0298a01a7504..3e88d8a1a136 100644 } if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; -@@ -1596,17 +1648,17 @@ ZSTD_compressBlock_lazy_generic( + const BYTE* repMatch = repIndex < prefixLowestIndex ? + dictBase + (repIndex - dictIndexDelta) : + base + repIndex; +- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) ++ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 3); @@ -42930,7 +48113,7 @@ index 0298a01a7504..3e88d8a1a136 100644 continue; /* search a better one */ } } -@@ -1615,12 +1667,12 @@ ZSTD_compressBlock_lazy_generic( +@@ -1615,34 +1667,34 @@ ZSTD_compressBlock_lazy_generic( DEBUGLOG(7, "search depth 2"); ip ++; if ( (dictMode == ZSTD_noDict) @@ -42946,7 +48129,12 @@ index 0298a01a7504..3e88d8a1a136 100644 } if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; -@@ -1632,17 +1684,17 @@ ZSTD_compressBlock_lazy_generic( + const BYTE* repMatch = repIndex < prefixLowestIndex ? + dictBase + (repIndex - dictIndexDelta) : + base + repIndex; +- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) ++ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 4); @@ -43011,7 +48199,12 @@ index 0298a01a7504..3e88d8a1a136 100644 /* check immediate repcode */ if (isDxS) { -@@ -1686,8 +1745,8 @@ ZSTD_compressBlock_lazy_generic( +@@ -1682,12 +1741,12 @@ ZSTD_compressBlock_lazy_generic( + const BYTE* repMatch = repIndex < prefixLowestIndex ? + dictBase - dictIndexDelta + repIndex : + base + repIndex; +- if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */) ++ if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex)) && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4; @@ -43022,7 +48215,7 @@ index 0298a01a7504..3e88d8a1a136 100644 ip += matchLength; anchor = ip; continue; -@@ -1701,166 +1760,181 @@ ZSTD_compressBlock_lazy_generic( +@@ -1701,168 +1760,183 @@ ZSTD_compressBlock_lazy_generic( && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) { /* store sequence */ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; @@ -43053,9 +48246,10 @@ index 0298a01a7504..3e88d8a1a136 100644 -size_t ZSTD_compressBlock_btlazy2( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_greedy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict); @@ -43063,8 +48257,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_lazy2( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict); @@ -43072,8 +48267,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_lazy( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict); @@ -43081,8 +48277,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_greedy( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict); @@ -43090,8 +48287,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_btlazy2_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState); @@ -43099,8 +48297,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_lazy2_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState); @@ -43109,9 +48308,10 @@ index 0298a01a7504..3e88d8a1a136 100644 +#endif -size_t ZSTD_compressBlock_lazy_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState); @@ -43119,8 +48319,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_greedy_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState); @@ -43129,8 +48330,9 @@ index 0298a01a7504..3e88d8a1a136 100644 - -size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch); @@ -43138,8 +48340,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch); @@ -43147,8 +48350,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch); @@ -43157,8 +48361,9 @@ index 0298a01a7504..3e88d8a1a136 100644 -/* Row-based matchfinder */ -size_t ZSTD_compressBlock_lazy2_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict); @@ -43167,9 +48372,10 @@ index 0298a01a7504..3e88d8a1a136 100644 +#endif -size_t ZSTD_compressBlock_lazy_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict); @@ -43177,8 +48383,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_greedy_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict); @@ -43186,8 +48393,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_lazy2_dictMatchState_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState); @@ -43195,8 +48403,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_lazy_dictMatchState_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState); @@ -43204,8 +48413,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_greedy_dictMatchState_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState); @@ -43214,7 +48424,8 @@ index 0298a01a7504..3e88d8a1a136 100644 - size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch); @@ -43222,9 +48433,10 @@ index 0298a01a7504..3e88d8a1a136 100644 +#endif -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btlazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch); @@ -43232,8 +48444,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btlazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { - return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch); @@ -43248,8 +48461,11 @@ index 0298a01a7504..3e88d8a1a136 100644 FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_lazy_extDict_generic( - ZSTD_matchState_t* ms, seqStore_t* seqStore, +- ZSTD_matchState_t* ms, seqStore_t* seqStore, ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize, + const searchMethod_e searchMethod, const U32 depth) @@ -1886,12 +1960,13 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod); @@ -43276,6 +48492,15 @@ index 0298a01a7504..3e88d8a1a136 100644 const BYTE* start=ip+1; U32 curr = (U32)(ip-base); +@@ -1912,7 +1987,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( + const U32 repIndex = (U32)(curr+1 - offset_1); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; +- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */ ++ if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) + & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */ + if (MEM_read32(ip+1) == MEM_read32(repMatch)) { + /* repcode detected we should take it */ @@ -1922,14 +1997,23 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( } } @@ -43304,7 +48529,7 @@ index 0298a01a7504..3e88d8a1a136 100644 continue; } -@@ -1939,7 +2023,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( +@@ -1939,30 +2023,30 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( ip ++; curr++; /* check repCode */ @@ -43313,7 +48538,12 @@ index 0298a01a7504..3e88d8a1a136 100644 const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); const U32 repIndex = (U32)(curr - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; -@@ -1951,18 +2035,18 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( + const BYTE* const repMatch = repBase + repIndex; +- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ ++ if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) + & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 3); @@ -43339,7 +48569,7 @@ index 0298a01a7504..3e88d8a1a136 100644 continue; /* search a better one */ } } -@@ -1971,7 +2055,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( +@@ -1971,50 +2055,57 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( ip ++; curr++; /* check repCode */ @@ -43348,7 +48578,12 @@ index 0298a01a7504..3e88d8a1a136 100644 const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); const U32 repIndex = (U32)(curr - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; -@@ -1983,38 +2067,45 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( + const BYTE* const repMatch = repBase + repIndex; +- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ ++ if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) + & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 4); @@ -43405,7 +48640,14 @@ index 0298a01a7504..3e88d8a1a136 100644 /* check immediate repcode */ while (ip <= ilimit) { -@@ -2029,8 +2120,8 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( +@@ -2023,14 +2114,14 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( + const U32 repIndex = repCurrent - offset_2; + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; +- if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ ++ if ( (ZSTD_index_overlap_check(dictLimit, repIndex)) + & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; @@ -43416,7 +48658,7 @@ index 0298a01a7504..3e88d8a1a136 100644 ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ -@@ -2045,8 +2136,9 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( +@@ -2045,58 +2136,65 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( /* Return the last literals size */ return (size_t)(iend - anchor); } @@ -43425,15 +48667,17 @@ index 0298a01a7504..3e88d8a1a136 100644 - +#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_greedy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) -@@ -2054,49 +2146,55 @@ size_t ZSTD_compressBlock_greedy_extDict( + { return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0); } -size_t ZSTD_compressBlock_lazy_extDict( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) - { @@ -43443,9 +48687,10 @@ index 0298a01a7504..3e88d8a1a136 100644 +#endif -size_t ZSTD_compressBlock_lazy2_extDict( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { @@ -43454,8 +48699,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_btlazy2_extDict( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { @@ -43465,9 +48711,10 @@ index 0298a01a7504..3e88d8a1a136 100644 +#endif -size_t ZSTD_compressBlock_greedy_extDict_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) + { @@ -43476,8 +48723,9 @@ index 0298a01a7504..3e88d8a1a136 100644 } -size_t ZSTD_compressBlock_lazy_extDict_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) - { @@ -43487,9 +48735,10 @@ index 0298a01a7504..3e88d8a1a136 100644 +#endif -size_t ZSTD_compressBlock_lazy2_extDict_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btlazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) { @@ -43498,7 +48747,7 @@ index 0298a01a7504..3e88d8a1a136 100644 } +#endif diff --git a/lib/zstd/compress/zstd_lazy.h b/lib/zstd/compress/zstd_lazy.h -index e5bdf4df8dde..22c9201f4e63 100644 +index e5bdf4df8dde..987a036d8bde 100644 --- a/lib/zstd/compress/zstd_lazy.h +++ b/lib/zstd/compress/zstd_lazy.h @@ -1,5 +1,6 @@ @@ -43509,60 +48758,77 @@ index e5bdf4df8dde..22c9201f4e63 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the -@@ -22,98 +23,175 @@ +@@ -11,7 +12,6 @@ + #ifndef ZSTD_LAZY_H + #define ZSTD_LAZY_H + +- + #include "zstd_compress_internal.h" + + /* +@@ -22,98 +22,173 @@ */ #define ZSTD_LAZY_DDSS_BUCKET_LOG 2 +-U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip); +-void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip); +#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ + +#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) - U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip); - void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip); ++U32 ZSTD_insertAndFindFirstIndex(ZSTD_MatchState_t* ms, const BYTE* ip); ++void ZSTD_row_update(ZSTD_MatchState_t* const ms, const BYTE* ip); - void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip); +-void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip); ++void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_MatchState_t* ms, const BYTE* const ip); void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */ +#endif -size_t ZSTD_compressBlock_btlazy2( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_greedy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_greedy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); -- --size_t ZSTD_compressBlock_btlazy2_dictMatchState( ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ void const* src, size_t srcSize); +size_t ZSTD_compressBlock_greedy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); --size_t ZSTD_compressBlock_lazy2_dictMatchState( -+ + +-size_t ZSTD_compressBlock_btlazy2_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#define ZSTD_COMPRESSBLOCK_GREEDY ZSTD_compressBlock_greedy +#define ZSTD_COMPRESSBLOCK_GREEDY_ROW ZSTD_compressBlock_greedy_row +#define ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE ZSTD_compressBlock_greedy_dictMatchState @@ -43584,38 +48850,50 @@ index e5bdf4df8dde..22c9201f4e63 100644 + +#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); --size_t ZSTD_compressBlock_lazy_dictMatchState( +-size_t ZSTD_compressBlock_lazy2_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); + size_t ZSTD_compressBlock_lazy_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dictMatchState( -+size_t ZSTD_compressBlock_lazy_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_dictMatchState_row( -+size_t ZSTD_compressBlock_lazy_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); --size_t ZSTD_compressBlock_lazy_dictMatchState_row( -+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + size_t ZSTD_compressBlock_lazy_dictMatchState_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dictMatchState_row( -+size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); - -size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( -+size_t ZSTD_compressBlock_lazy_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch( -+size_t ZSTD_compressBlock_lazy_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++size_t ZSTD_compressBlock_lazy_extDict( ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++size_t ZSTD_compressBlock_lazy_extDict_row( ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +-size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + +#define ZSTD_COMPRESSBLOCK_LAZY ZSTD_compressBlock_lazy +#define ZSTD_COMPRESSBLOCK_LAZY_ROW ZSTD_compressBlock_lazy_row @@ -43638,37 +48916,43 @@ index e5bdf4df8dde..22c9201f4e63 100644 + +#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_lazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); --size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( -+size_t ZSTD_compressBlock_lazy2_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row( -+size_t ZSTD_compressBlock_lazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++size_t ZSTD_compressBlock_lazy2_row( ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row( -+size_t ZSTD_compressBlock_lazy2_dictMatchState_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++size_t ZSTD_compressBlock_lazy2_dictMatchState( ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); - -size_t ZSTD_compressBlock_greedy_extDict( -+size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++size_t ZSTD_compressBlock_lazy2_dictMatchState_row( ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_extDict( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch( ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_lazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_greedy_extDict_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_lazy2_extDict_row( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy_extDict_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + +#define ZSTD_COMPRESSBLOCK_LAZY2 ZSTD_compressBlock_lazy2 +#define ZSTD_COMPRESSBLOCK_LAZY2_ROW ZSTD_compressBlock_lazy2_row @@ -43691,17 +48975,19 @@ index e5bdf4df8dde..22c9201f4e63 100644 + +#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btlazy2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_lazy2_extDict_row( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btlazy2_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btlazy2_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); - -+ + +#define ZSTD_COMPRESSBLOCK_BTLAZY2 ZSTD_compressBlock_btlazy2 +#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE ZSTD_compressBlock_btlazy2_dictMatchState +#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT ZSTD_compressBlock_btlazy2_extDict @@ -43710,12 +48996,10 @@ index e5bdf4df8dde..22c9201f4e63 100644 +#define ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE NULL +#define ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT NULL +#endif -+ - #endif /* ZSTD_LAZY_H */ diff --git a/lib/zstd/compress/zstd_ldm.c b/lib/zstd/compress/zstd_ldm.c -index dd86fc83e7dd..07f3bc6437ce 100644 +index dd86fc83e7dd..54eefad9cae6 100644 --- a/lib/zstd/compress/zstd_ldm.c +++ b/lib/zstd/compress/zstd_ldm.c @@ -1,5 +1,6 @@ @@ -43726,7 +49010,98 @@ index dd86fc83e7dd..07f3bc6437ce 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the -@@ -242,11 +243,15 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, +@@ -16,7 +17,7 @@ + #include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */ + #include "zstd_ldm_geartab.h" + +-#define LDM_BUCKET_SIZE_LOG 3 ++#define LDM_BUCKET_SIZE_LOG 4 + #define LDM_MIN_MATCH_LENGTH 64 + #define LDM_HASH_RLOG 7 + +@@ -133,21 +134,35 @@ static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state, + } + + void ZSTD_ldm_adjustParameters(ldmParams_t* params, +- ZSTD_compressionParameters const* cParams) ++ const ZSTD_compressionParameters* cParams) + { + params->windowLog = cParams->windowLog; + ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX); + DEBUGLOG(4, "ZSTD_ldm_adjustParameters"); +- if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG; +- if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH; ++ if (params->hashRateLog == 0) { ++ if (params->hashLog > 0) { ++ /* if params->hashLog is set, derive hashRateLog from it */ ++ assert(params->hashLog <= ZSTD_HASHLOG_MAX); ++ if (params->windowLog > params->hashLog) { ++ params->hashRateLog = params->windowLog - params->hashLog; ++ } ++ } else { ++ assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); ++ /* mapping from [fast, rate7] to [btultra2, rate4] */ ++ params->hashRateLog = 7 - (cParams->strategy/3); ++ } ++ } + if (params->hashLog == 0) { +- params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG); +- assert(params->hashLog <= ZSTD_HASHLOG_MAX); ++ params->hashLog = BOUNDED(ZSTD_HASHLOG_MIN, params->windowLog - params->hashRateLog, ZSTD_HASHLOG_MAX); + } +- if (params->hashRateLog == 0) { +- params->hashRateLog = params->windowLog < params->hashLog +- ? 0 +- : params->windowLog - params->hashLog; ++ if (params->minMatchLength == 0) { ++ params->minMatchLength = LDM_MIN_MATCH_LENGTH; ++ if (cParams->strategy >= ZSTD_btultra) ++ params->minMatchLength /= 2; ++ } ++ if (params->bucketSizeLog==0) { ++ assert(1 <= (int)cParams->strategy && (int)cParams->strategy <= 9); ++ params->bucketSizeLog = BOUNDED(LDM_BUCKET_SIZE_LOG, (U32)cParams->strategy, ZSTD_LDM_BUCKETSIZELOG_MAX); + } + params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog); + } +@@ -170,22 +185,22 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize) + /* ZSTD_ldm_getBucket() : + * Returns a pointer to the start of the bucket associated with hash. */ + static ldmEntry_t* ZSTD_ldm_getBucket( +- ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams) ++ const ldmState_t* ldmState, size_t hash, U32 const bucketSizeLog) + { +- return ldmState->hashTable + (hash << ldmParams.bucketSizeLog); ++ return ldmState->hashTable + (hash << bucketSizeLog); + } + + /* ZSTD_ldm_insertEntry() : + * Insert the entry with corresponding hash into the hash table */ + static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, + size_t const hash, const ldmEntry_t entry, +- ldmParams_t const ldmParams) ++ U32 const bucketSizeLog) + { + BYTE* const pOffset = ldmState->bucketOffsets + hash; + unsigned const offset = *pOffset; + +- *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry; +- *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1)); ++ *(ZSTD_ldm_getBucket(ldmState, hash, bucketSizeLog) + offset) = entry; ++ *pOffset = (BYTE)((offset + 1) & ((1u << bucketSizeLog) - 1)); + + } + +@@ -234,7 +249,7 @@ static size_t ZSTD_ldm_countBackwardsMatch_2segments( + * + * The tables for the other strategies are filled within their + * block compressors. */ +-static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, ++static size_t ZSTD_ldm_fillFastTables(ZSTD_MatchState_t* ms, + void const* end) + { + const BYTE* const iend = (const BYTE*)end; +@@ -242,11 +257,15 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, switch(ms->cParams.strategy) { case ZSTD_fast: @@ -43744,18 +49119,102 @@ index dd86fc83e7dd..07f3bc6437ce 100644 break; case ZSTD_greedy: -@@ -318,7 +323,9 @@ static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) +@@ -269,7 +288,8 @@ void ZSTD_ldm_fillHashTable( + const BYTE* iend, ldmParams_t const* params) + { + U32 const minMatchLength = params->minMatchLength; +- U32 const hBits = params->hashLog - params->bucketSizeLog; ++ U32 const bucketSizeLog = params->bucketSizeLog; ++ U32 const hBits = params->hashLog - bucketSizeLog; + BYTE const* const base = ldmState->window.base; + BYTE const* const istart = ip; + ldmRollingHashState_t hashState; +@@ -284,7 +304,7 @@ void ZSTD_ldm_fillHashTable( + unsigned n; + + numSplits = 0; +- hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits); ++ hashed = ZSTD_ldm_gear_feed(&hashState, ip, (size_t)(iend - ip), splits, &numSplits); + + for (n = 0; n < numSplits; n++) { + if (ip + splits[n] >= istart + minMatchLength) { +@@ -295,7 +315,7 @@ void ZSTD_ldm_fillHashTable( + + entry.offset = (U32)(split - base); + entry.checksum = (U32)(xxhash >> 32); +- ZSTD_ldm_insertEntry(ldmState, hash, entry, *params); ++ ZSTD_ldm_insertEntry(ldmState, hash, entry, params->bucketSizeLog); + } + } + +@@ -309,7 +329,7 @@ void ZSTD_ldm_fillHashTable( + * Sets cctx->nextToUpdate to a position corresponding closer to anchor + * if it is far way + * (after a long match, only update tables a limited amount). */ +-static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) ++static void ZSTD_ldm_limitTableUpdate(ZSTD_MatchState_t* ms, const BYTE* anchor) + { + U32 const curr = (U32)(anchor - ms->window.base); + if (curr > ms->nextToUpdate + 1024) { +@@ -318,8 +338,10 @@ static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) } } -static size_t ZSTD_ldm_generateSequences_internal( +- ldmState_t* ldmState, rawSeqStore_t* rawSeqStore, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_ldm_generateSequences_internal( - ldmState_t* ldmState, rawSeqStore_t* rawSeqStore, ++ ldmState_t* ldmState, RawSeqStore_t* rawSeqStore, ldmParams_t const* params, void const* src, size_t srcSize) { -@@ -549,7 +556,7 @@ size_t ZSTD_ldm_generateSequences( + /* LDM parameters */ +@@ -373,7 +395,7 @@ static size_t ZSTD_ldm_generateSequences_internal( + candidates[n].split = split; + candidates[n].hash = hash; + candidates[n].checksum = (U32)(xxhash >> 32); +- candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params); ++ candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, params->bucketSizeLog); + PREFETCH_L1(candidates[n].bucket); + } + +@@ -396,7 +418,7 @@ static size_t ZSTD_ldm_generateSequences_internal( + * the previous one, we merely register it in the hash table and + * move on */ + if (split < anchor) { +- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); ++ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog); + continue; + } + +@@ -443,7 +465,7 @@ static size_t ZSTD_ldm_generateSequences_internal( + /* No match found -- insert an entry into the hash table + * and process the next candidate match */ + if (bestEntry == NULL) { +- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); ++ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog); + continue; + } + +@@ -464,7 +486,7 @@ static size_t ZSTD_ldm_generateSequences_internal( + + /* Insert the current entry into the hash table --- it must be + * done after the previous block to avoid clobbering bestEntry */ +- ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params); ++ ZSTD_ldm_insertEntry(ldmState, hash, newEntry, params->bucketSizeLog); + + anchor = split + forwardMatchLength; + +@@ -503,7 +525,7 @@ static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size, + } + + size_t ZSTD_ldm_generateSequences( +- ldmState_t* ldmState, rawSeqStore_t* sequences, ++ ldmState_t* ldmState, RawSeqStore_t* sequences, + ldmParams_t const* params, void const* src, size_t srcSize) + { + U32 const maxDist = 1U << params->windowLog; +@@ -549,7 +571,7 @@ size_t ZSTD_ldm_generateSequences( * the window through early invalidation. * TODO: * Test the chunk size. * * Try invalidation after the sequence generation and test the @@ -43764,7 +49223,53 @@ index dd86fc83e7dd..07f3bc6437ce 100644 * * NOTE: Because of dictionaries + sequence splitting we MUST make sure * that any offset used is valid at the END of the sequence, since it may -@@ -689,7 +696,6 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, +@@ -580,7 +602,7 @@ size_t ZSTD_ldm_generateSequences( + } + + void +-ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) ++ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) + { + while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { + rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; +@@ -616,7 +638,7 @@ ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const min + * Returns the current sequence to handle, or if the rest of the block should + * be literals, it returns a sequence with offset == 0. + */ +-static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, ++static rawSeq maybeSplitSequence(RawSeqStore_t* rawSeqStore, + U32 const remaining, U32 const minMatch) + { + rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; +@@ -640,7 +662,7 @@ static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, + return sequence; + } + +-void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) { ++void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes) { + U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); + while (currPos && rawSeqStore->pos < rawSeqStore->size) { + rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; +@@ -657,14 +679,14 @@ void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) { + } + } + +-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_paramSwitch_e useRowMatchFinder, ++size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore, ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_ParamSwitch_e useRowMatchFinder, + void const* src, size_t srcSize) + { + const ZSTD_compressionParameters* const cParams = &ms->cParams; + unsigned const minMatch = cParams->minMatch; +- ZSTD_blockCompressor const blockCompressor = ++ ZSTD_BlockCompressor_f const blockCompressor = + ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms)); + /* Input bounds */ + BYTE const* const istart = (BYTE const*)src; +@@ -689,7 +711,6 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, /* maybeSplitSequence updates rawSeqStore->pos */ rawSeq const sequence = maybeSplitSequence(rawSeqStore, (U32)(iend - ip), minMatch); @@ -43772,7 +49277,7 @@ index dd86fc83e7dd..07f3bc6437ce 100644 /* End signal */ if (sequence.offset == 0) break; -@@ -702,6 +708,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, +@@ -702,6 +723,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, /* Run the block compressor */ DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength); { @@ -43780,7 +49285,7 @@ index dd86fc83e7dd..07f3bc6437ce 100644 size_t const newLitLength = blockCompressor(ms, seqStore, rep, ip, sequence.litLength); ip += sequence.litLength; -@@ -711,7 +718,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, +@@ -711,7 +733,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, rep[0] = sequence.offset; /* Store the sequence */ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, @@ -43790,7 +49295,7 @@ index dd86fc83e7dd..07f3bc6437ce 100644 ip += sequence.matchLength; } diff --git a/lib/zstd/compress/zstd_ldm.h b/lib/zstd/compress/zstd_ldm.h -index fbc6a5e88fd7..c540731abde7 100644 +index fbc6a5e88fd7..41400a7191b2 100644 --- a/lib/zstd/compress/zstd_ldm.h +++ b/lib/zstd/compress/zstd_ldm.h @@ -1,5 +1,6 @@ @@ -43801,6 +49306,60 @@ index fbc6a5e88fd7..c540731abde7 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the +@@ -11,7 +12,6 @@ + #ifndef ZSTD_LDM_H + #define ZSTD_LDM_H + +- + #include "zstd_compress_internal.h" /* ldmParams_t, U32 */ + #include /* ZSTD_CCtx, size_t */ + +@@ -40,7 +40,7 @@ void ZSTD_ldm_fillHashTable( + * sequences. + */ + size_t ZSTD_ldm_generateSequences( +- ldmState_t* ldms, rawSeqStore_t* sequences, ++ ldmState_t* ldms, RawSeqStore_t* sequences, + ldmParams_t const* params, void const* src, size_t srcSize); + + /* +@@ -61,9 +61,9 @@ size_t ZSTD_ldm_generateSequences( + * two. We handle that case correctly, and update `rawSeqStore` appropriately. + * NOTE: This function does not return any errors. + */ +-size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_paramSwitch_e useRowMatchFinder, ++size_t ZSTD_ldm_blockCompress(RawSeqStore_t* rawSeqStore, ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_ParamSwitch_e useRowMatchFinder, + void const* src, size_t srcSize); + + /* +@@ -73,7 +73,7 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, + * Avoids emitting matches less than `minMatch` bytes. + * Must be called for data that is not passed to ZSTD_ldm_blockCompress(). + */ +-void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, ++void ZSTD_ldm_skipSequences(RawSeqStore_t* rawSeqStore, size_t srcSize, + U32 const minMatch); + + /* ZSTD_ldm_skipRawSeqStoreBytes(): +@@ -81,7 +81,7 @@ void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, + * Not to be used in conjunction with ZSTD_ldm_skipSequences(). + * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). + */ +-void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes); ++void ZSTD_ldm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes); + + /* ZSTD_ldm_getTableSize() : + * Estimate the space needed for long distance matching tables or 0 if LDM is +@@ -107,5 +107,4 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize); + void ZSTD_ldm_adjustParameters(ldmParams_t* params, + ZSTD_compressionParameters const* cParams); + +- + #endif /* ZSTD_FAST_H */ diff --git a/lib/zstd/compress/zstd_ldm_geartab.h b/lib/zstd/compress/zstd_ldm_geartab.h index 647f865be290..cfccfc46f6f7 100644 --- a/lib/zstd/compress/zstd_ldm_geartab.h @@ -43814,7 +49373,7 @@ index 647f865be290..cfccfc46f6f7 100644 * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c -index fd82acfda62f..a87b66ac8d24 100644 +index fd82acfda62f..b62fd1b0d83e 100644 --- a/lib/zstd/compress/zstd_opt.c +++ b/lib/zstd/compress/zstd_opt.c @@ -1,5 +1,6 @@ @@ -44093,32 +49652,36 @@ index fd82acfda62f..a87b66ac8d24 100644 - const BYTE* const ip) +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms, ++U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_MatchState_t* ms, + U32* nextToUpdate3, + const BYTE* const ip) { U32* const hashTable3 = ms->hashTable3; U32 const hashLog3 = ms->hashLog3; -@@ -408,7 +438,9 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms, +@@ -408,8 +438,10 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms, * @param ip assumed <= iend-8 . * @param target The target of ZSTD_updateTree_internal() - we are filling to this position * @return : nb of positions added */ -static U32 ZSTD_insertBt1( +- const ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_insertBt1( - const ZSTD_matchState_t* ms, ++ const ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, U32 const target, -@@ -527,6 +559,7 @@ static U32 ZSTD_insertBt1( + U32 const mls, const int extDict) +@@ -527,15 +559,16 @@ static U32 ZSTD_insertBt1( } FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR void ZSTD_updateTree_internal( - ZSTD_matchState_t* ms, +- ZSTD_matchState_t* ms, ++ ZSTD_MatchState_t* ms, const BYTE* const ip, const BYTE* const iend, -@@ -535,7 +568,7 @@ void ZSTD_updateTree_internal( + const U32 mls, const ZSTD_dictMode_e dictMode) + { const BYTE* const base = ms->window.base; U32 const target = (U32)(ip - base); U32 idx = ms->nextToUpdate; @@ -44127,7 +49690,13 @@ index fd82acfda62f..a87b66ac8d24 100644 idx, target, dictMode); while(idx < target) { -@@ -553,15 +586,18 @@ void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { +@@ -548,20 +581,23 @@ void ZSTD_updateTree_internal( + ms->nextToUpdate = target; + } + +-void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { ++void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend) { + ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); } FORCE_INLINE_TEMPLATE @@ -44144,7 +49713,7 @@ index fd82acfda62f..a87b66ac8d24 100644 +U32 +ZSTD_insertBtAndGetAllMatches ( + ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ -+ ZSTD_matchState_t* ms, ++ ZSTD_MatchState_t* ms, + U32* nextToUpdate3, + const BYTE* const ip, const BYTE* const iLimit, + const ZSTD_dictMode_e dictMode, @@ -44155,6 +49724,31 @@ index fd82acfda62f..a87b66ac8d24 100644 { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); +@@ -590,7 +626,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( + U32 mnum = 0; + U32 nbCompares = 1U << cParams->searchLog; + +- const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; ++ const ZSTD_MatchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; + const ZSTD_compressionParameters* const dmsCParams = + dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL; + const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; +@@ -629,13 +665,13 @@ U32 ZSTD_insertBtAndGetAllMatches ( + assert(curr >= windowLow); + if ( dictMode == ZSTD_extDict + && ( ((repOffset-1) /*intentional overflow*/ < curr - windowLow) /* equivalent to `curr > repIndex >= windowLow` */ +- & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */) ++ & (ZSTD_index_overlap_check(dictLimit, repIndex)) ) + && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { + repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch; + } + if (dictMode == ZSTD_dictMatchState + && ( ((repOffset-1) /*intentional overflow*/ < curr - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `curr > repIndex >= dmsLowLimit` */ +- & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */ ++ & (ZSTD_index_overlap_check(dictLimit, repIndex)) ) + && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { + repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch; + } } @@ -644,7 +680,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u", repCode, ll0, repOffset, repLen); @@ -44206,7 +49800,16 @@ index fd82acfda62f..a87b66ac8d24 100644 matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) -@@ -792,7 +828,9 @@ typedef U32 (*ZSTD_getAllMatchesFn)( +@@ -784,7 +820,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( + + typedef U32 (*ZSTD_getAllMatchesFn)( + ZSTD_match_t*, +- ZSTD_matchState_t*, ++ ZSTD_MatchState_t*, + U32*, + const BYTE*, + const BYTE*, +@@ -792,9 +828,11 @@ typedef U32 (*ZSTD_getAllMatchesFn)( U32 const ll0, U32 const lengthToBeat); @@ -44215,10 +49818,63 @@ index fd82acfda62f..a87b66ac8d24 100644 +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +U32 ZSTD_btGetAllMatches_internal( ZSTD_match_t* matches, - ZSTD_matchState_t* ms, +- ZSTD_matchState_t* ms, ++ ZSTD_MatchState_t* ms, U32* nextToUpdate3, -@@ -960,7 +998,7 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, - const ZSTD_optLdm_t* optLdm, U32 currPosInBlock) + const BYTE* ip, + const BYTE* const iHighLimit, +@@ -817,7 +855,7 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal( + #define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \ + static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \ + ZSTD_match_t* matches, \ +- ZSTD_matchState_t* ms, \ ++ ZSTD_MatchState_t* ms, \ + U32* nextToUpdate3, \ + const BYTE* ip, \ + const BYTE* const iHighLimit, \ +@@ -849,7 +887,7 @@ GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState) + } + + static ZSTD_getAllMatchesFn +-ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode) ++ZSTD_selectBtGetAllMatches(ZSTD_MatchState_t const* ms, ZSTD_dictMode_e const dictMode) + { + ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = { + ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict), +@@ -868,7 +906,7 @@ ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const di + + /* Struct containing info needed to make decision about ldm inclusion */ + typedef struct { +- rawSeqStore_t seqStore; /* External match candidates store for this block */ ++ RawSeqStore_t seqStore; /* External match candidates store for this block */ + U32 startPosInBlock; /* Start position of the current match candidate */ + U32 endPosInBlock; /* End position of the current match candidate */ + U32 offset; /* Offset of the match candidate */ +@@ -878,7 +916,7 @@ typedef struct { + * Moves forward in @rawSeqStore by @nbBytes, + * which will update the fields 'pos' and 'posInSequence'. + */ +-static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) ++static void ZSTD_optLdm_skipRawSeqStoreBytes(RawSeqStore_t* rawSeqStore, size_t nbBytes) + { + U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); + while (currPos && rawSeqStore->pos < rawSeqStore->size) { +@@ -935,7 +973,7 @@ ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock + return; + } + +- /* Matches may be < MINMATCH by this process. In that case, we will reject them ++ /* Matches may be < minMatch by this process. In that case, we will reject them + when we are deciding whether or not to add the ldm */ + optLdm->startPosInBlock = currPosInBlock + literalsBytesRemaining; + optLdm->endPosInBlock = optLdm->startPosInBlock + matchBytesRemaining; +@@ -957,25 +995,26 @@ ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock + * into 'matches'. Maintains the correct ordering of 'matches'. + */ + static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, +- const ZSTD_optLdm_t* optLdm, U32 currPosInBlock) ++ const ZSTD_optLdm_t* optLdm, U32 currPosInBlock, ++ U32 minMatch) { U32 const posDiff = currPosInBlock - optLdm->startPosInBlock; - /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */ @@ -44226,7 +49882,11 @@ index fd82acfda62f..a87b66ac8d24 100644 U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; /* Ensure that current block position is not outside of the match */ -@@ -971,11 +1009,11 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, + if (currPosInBlock < optLdm->startPosInBlock + || currPosInBlock >= optLdm->endPosInBlock +- || candidateMatchLength < MINMATCH) { ++ || candidateMatchLength < minMatch) { + return; } if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) { @@ -44242,7 +49902,26 @@ index fd82acfda62f..a87b66ac8d24 100644 (*nbMatches)++; } } -@@ -1011,11 +1049,6 @@ ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, +@@ -986,7 +1025,8 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, + static void + ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, + ZSTD_match_t* matches, U32* nbMatches, +- U32 currPosInBlock, U32 remainingBytes) ++ U32 currPosInBlock, U32 remainingBytes, ++ U32 minMatch) + { + if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { + return; +@@ -1003,7 +1043,7 @@ ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, + } + ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes); + } +- ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock); ++ ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock, minMatch); + } + + +@@ -1011,11 +1051,6 @@ ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, * Optimal parser *********************************/ @@ -44254,11 +49933,13 @@ index fd82acfda62f..a87b66ac8d24 100644 #if 0 /* debug */ static void -@@ -1033,7 +1066,13 @@ listStats(const U32* table, int lastEltID) +@@ -1033,9 +1068,15 @@ listStats(const U32* table, int lastEltID) #endif -FORCE_INLINE_TEMPLATE size_t +-ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, +- seqStore_t* seqStore, +#define LIT_PRICE(_p) (int)ZSTD_rawLiteralsCost(_p, 1, optStatePtr, optLevel) +#define LL_PRICE(_l) (int)ZSTD_litLengthPrice(_l, optStatePtr, optLevel) +#define LL_INCPRICE(_l) (LL_PRICE(_l) - LL_PRICE(_l-1)) @@ -44266,10 +49947,12 @@ index fd82acfda62f..a87b66ac8d24 100644 +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t - ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, - seqStore_t* seqStore, ++ZSTD_compressBlock_opt_generic(ZSTD_MatchState_t* ms, ++ SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], -@@ -1059,9 +1098,11 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, + const void* src, size_t srcSize, + const int optLevel, +@@ -1059,9 +1100,11 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, ZSTD_optimal_t* const opt = optStatePtr->priceTable; ZSTD_match_t* const matches = optStatePtr->matchTable; @@ -44282,13 +49965,14 @@ index fd82acfda62f..a87b66ac8d24 100644 optLdm.seqStore = ms->ldmSeqStore ? *ms->ldmSeqStore : kNullRawSeqStore; optLdm.endPosInBlock = optLdm.startPosInBlock = optLdm.offset = 0; ZSTD_opt_getNextMatchAndUpdateSeqStore(&optLdm, (U32)(ip-istart), (U32)(iend-ip)); -@@ -1082,103 +1123,139 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, +@@ -1082,103 +1125,140 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, U32 const ll0 = !litlen; U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch); ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, - (U32)(ip-istart), (U32)(iend - ip)); - if (!nbMatches) { ip++; continue; } -+ (U32)(ip-istart), (U32)(iend-ip)); ++ (U32)(ip-istart), (U32)(iend-ip), ++ minMatch); + if (!nbMatches) { + DEBUGLOG(8, "no match found at cPos %u", (unsigned)(ip-istart)); + ip++; @@ -44390,7 +50074,7 @@ index fd82acfda62f..a87b66ac8d24 100644 - assert(cur < ZSTD_OPT_NUM); - DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur) + assert(cur <= ZSTD_OPT_NUM); -+ DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur); ++ DEBUGLOG(7, "cPos:%i==rPos:%u", (int)(inr-istart), cur); /* Fix current position with one literal if cheaper */ - { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1; @@ -44403,9 +50087,11 @@ index fd82acfda62f..a87b66ac8d24 100644 + + LL_INCPRICE(litlen); assert(price < 1000000000); /* overflow check */ if (price <= opt[cur].price) { +- DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", +- inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, + ZSTD_optimal_t const prevMatch = opt[cur]; - DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", - inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, ++ DEBUGLOG(7, "cPos:%i==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", ++ (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]); - opt[cur].mlen = 0; - opt[cur].off = 0; @@ -44426,13 +50112,13 @@ index fd82acfda62f..a87b66ac8d24 100644 + && (with1literal < opt[cur+1].price) ) { + /* update offset history - before it disappears */ + U32 const prev = cur - prevMatch.mlen; -+ repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen==0); ++ Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, prevMatch.off, opt[prev].litlen==0); + assert(cur >= prevMatch.mlen); + DEBUGLOG(7, "==> match+1lit is cheaper (%.2f < %.2f) (hist:%u,%u,%u) !", + ZSTD_fCost(with1literal), ZSTD_fCost(withMoreLiterals), + newReps.rep[0], newReps.rep[1], newReps.rep[2] ); + opt[cur+1] = prevMatch; /* mlen & offbase */ -+ ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(repcodes_t)); ++ ZSTD_memcpy(opt[cur+1].rep, &newReps, sizeof(Repcodes_t)); + opt[cur+1].litlen = 1; + opt[cur+1].price = with1literal; + if (last_pos < cur+1) last_pos = cur+1; @@ -44442,8 +50128,8 @@ index fd82acfda62f..a87b66ac8d24 100644 - DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)", - inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), - opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]); -+ DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f)", -+ inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price)); ++ DEBUGLOG(7, "cPos:%i==rPos:%u : literal would cost more (%.2f>%.2f)", ++ (int)(inr-istart), cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price)); } } @@ -44454,21 +50140,23 @@ index fd82acfda62f..a87b66ac8d24 100644 + /* Offset history is not updated during match comparison. + * Do it here, now that the match is selected and confirmed. */ - ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t)); +- ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t)); ++ ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(Repcodes_t)); assert(cur >= opt[cur].mlen); - if (opt[cur].mlen != 0) { + if (opt[cur].litlen == 0) { + /* just finished a match => alter offset history */ U32 const prev = cur - opt[cur].mlen; - repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0); -+ repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0); - ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t)); +- ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t)); - } else { - ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t)); ++ Repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[prev].litlen==0); ++ ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(Repcodes_t)); } /* last match must start at a minimum distance of 8 from oend */ -@@ -1188,15 +1265,14 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, +@@ -1188,38 +1268,37 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, if ( (optLevel==0) /*static_test*/ && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) { @@ -44488,7 +50176,13 @@ index fd82acfda62f..a87b66ac8d24 100644 U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch); U32 matchNb; -@@ -1208,18 +1284,17 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, + ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, +- (U32)(inr-istart), (U32)(iend-inr)); ++ (U32)(inr-istart), (U32)(iend-inr), ++ minMatch); + + if (!nbMatches) { + DEBUGLOG(7, "rPos:%u : no match found", cur); continue; } @@ -44505,8 +50199,8 @@ index fd82acfda62f..a87b66ac8d24 100644 - last_pos = cur + ZSTD_totalLen(lastSequence); - if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */ + { U32 const longestML = matches[nbMatches-1].len; -+ DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of longest ML=%u", -+ inr-istart, cur, nbMatches, longestML); ++ DEBUGLOG(7, "cPos:%i==rPos:%u, found %u matches, of longest ML=%u", ++ (int)(inr-istart), cur, nbMatches, longestML); + + if ( (longestML > sufficient_len) + || (cur + longestML >= ZSTD_OPT_NUM) @@ -44518,7 +50212,7 @@ index fd82acfda62f..a87b66ac8d24 100644 goto _shortestPath; } } -@@ -1230,20 +1305,25 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, +@@ -1230,20 +1309,25 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch; U32 mlen; @@ -44549,7 +50243,7 @@ index fd82acfda62f..a87b66ac8d24 100644 opt[pos].price = price; } else { DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)", -@@ -1251,52 +1331,86 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, +@@ -1251,55 +1335,89 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */ } } } } @@ -44586,11 +50280,11 @@ index fd82acfda62f..a87b66ac8d24 100644 + /* Update offset history */ + if (lastStretch.litlen == 0) { + /* finishing on a match : update offset history */ -+ repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0); -+ ZSTD_memcpy(rep, &reps, sizeof(repcodes_t)); ++ Repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastStretch.off, opt[cur].litlen==0); ++ ZSTD_memcpy(rep, &reps, sizeof(Repcodes_t)); } else { - ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t)); -+ ZSTD_memcpy(rep, lastStretch.rep, sizeof(repcodes_t)); ++ ZSTD_memcpy(rep, lastStretch.rep, sizeof(Repcodes_t)); + assert(cur >= lastStretch.litlen); + cur -= lastStretch.litlen; } @@ -44659,9 +50353,14 @@ index fd82acfda62f..a87b66ac8d24 100644 - U32 const offCode = opt[storePos].off; + U32 const offBase = opt[storePos].off; U32 const advance = llen + mlen; - DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", - anchor - istart, (unsigned)llen, (unsigned)mlen); -@@ -1308,11 +1422,14 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, +- DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", +- anchor - istart, (unsigned)llen, (unsigned)mlen); ++ DEBUGLOG(6, "considering seq starting at %i, llen=%u, mlen=%u", ++ (int)(anchor - istart), (unsigned)llen, (unsigned)mlen); + + if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */ + assert(storePos == storeEnd); /* must be last sequence */ +@@ -1308,11 +1426,14 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, } assert(anchor + llen <= iend); @@ -44678,7 +50377,7 @@ index fd82acfda62f..a87b66ac8d24 100644 ZSTD_setBasePrices(optStatePtr, optLevel); } } /* while (ip < ilimit) */ -@@ -1320,21 +1437,27 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, +@@ -1320,42 +1441,51 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, /* Return the last literals size */ return (size_t)(iend - anchor); } @@ -44686,7 +50385,8 @@ index fd82acfda62f..a87b66ac8d24 100644 +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR static size_t ZSTD_compressBlock_opt0( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode); @@ -44695,7 +50395,8 @@ index fd82acfda62f..a87b66ac8d24 100644 +#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR static size_t ZSTD_compressBlock_opt2( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode) { return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode); @@ -44704,9 +50405,10 @@ index fd82acfda62f..a87b66ac8d24 100644 +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_btopt( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) -@@ -1342,20 +1465,23 @@ size_t ZSTD_compressBlock_btopt( + { DEBUGLOG(5, "ZSTD_compressBlock_btopt"); return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict); } @@ -44729,14 +50431,14 @@ index fd82acfda62f..a87b66ac8d24 100644 - const void* src, size_t srcSize) +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR -+void ZSTD_initStats_ultra(ZSTD_matchState_t* ms, -+ seqStore_t* seqStore, ++void ZSTD_initStats_ultra(ZSTD_MatchState_t* ms, ++ SeqStore_t* seqStore, + U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) { U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */ ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep)); -@@ -1368,7 +1494,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms, +@@ -1368,7 +1498,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms, ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/ @@ -44745,7 +50447,23 @@ index fd82acfda62f..a87b66ac8d24 100644 ZSTD_resetSeqStore(seqStore); ms->window.base -= srcSize; ms->window.dictLimit += (U32)srcSize; -@@ -1392,10 +1518,10 @@ size_t ZSTD_compressBlock_btultra2( +@@ -1378,7 +1508,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms, + } + + size_t ZSTD_compressBlock_btultra( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) + { + DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize); +@@ -1386,16 +1516,16 @@ size_t ZSTD_compressBlock_btultra( + } + + size_t ZSTD_compressBlock_btultra2( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) + { U32 const curr = (U32)((const BYTE*)src - ms->window.base); DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize); @@ -44759,7 +50477,7 @@ index fd82acfda62f..a87b66ac8d24 100644 * Consequently, this can only work if no data has been previously loaded in tables, * aka, no dictionary, no prefix, no ldm preprocessing. * The compression ratio gain is generally small (~0.5% on first block), -@@ -1404,15 +1530,17 @@ size_t ZSTD_compressBlock_btultra2( +@@ -1404,42 +1534,47 @@ size_t ZSTD_compressBlock_btultra2( if ( (ms->opt.litLengthSum==0) /* first block */ && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */ && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */ @@ -44777,15 +50495,17 @@ index fd82acfda62f..a87b66ac8d24 100644 +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_btopt_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) -@@ -1420,18 +1548,20 @@ size_t ZSTD_compressBlock_btopt_dictMatchState( + { return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState); } -size_t ZSTD_compressBlock_btultra_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btopt_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState); @@ -44794,9 +50514,10 @@ index fd82acfda62f..a87b66ac8d24 100644 +#endif -size_t ZSTD_compressBlock_btopt_extDict( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btultra_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict); @@ -44804,7 +50525,9 @@ index fd82acfda62f..a87b66ac8d24 100644 } size_t ZSTD_compressBlock_btultra_extDict( -@@ -1440,6 +1570,7 @@ size_t ZSTD_compressBlock_btultra_extDict( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) { return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict); } @@ -44813,7 +50536,7 @@ index fd82acfda62f..a87b66ac8d24 100644 /* note : no btultra2 variant for extDict nor dictMatchState, * because btultra2 is not meant to work with dictionaries diff --git a/lib/zstd/compress/zstd_opt.h b/lib/zstd/compress/zstd_opt.h -index 22b862858ba7..ac1b743d27cd 100644 +index 22b862858ba7..fbdc540ec9d1 100644 --- a/lib/zstd/compress/zstd_opt.h +++ b/lib/zstd/compress/zstd_opt.h @@ -1,5 +1,6 @@ @@ -44824,28 +50547,35 @@ index 22b862858ba7..ac1b743d27cd 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the -@@ -14,30 +15,40 @@ +@@ -11,40 +12,62 @@ + #ifndef ZSTD_OPT_H + #define ZSTD_OPT_H +- #include "zstd_compress_internal.h" +#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \ + || !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR) /* used in ZSTD_loadDictionaryContent() */ - void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend); +-void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend); ++void ZSTD_updateTree(ZSTD_MatchState_t* ms, const BYTE* ip, const BYTE* iend); +#endif +#ifndef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR size_t ZSTD_compressBlock_btopt( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btultra( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btopt_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -size_t ZSTD_compressBlock_btultra2( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +size_t ZSTD_compressBlock_btopt_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); +#define ZSTD_COMPRESSBLOCK_BTOPT ZSTD_compressBlock_btopt @@ -44858,28 +50588,31 @@ index 22b862858ba7..ac1b743d27cd 100644 +#endif -size_t ZSTD_compressBlock_btopt_dictMatchState( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +#ifndef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR +size_t ZSTD_compressBlock_btultra( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); size_t ZSTD_compressBlock_btultra_dictMatchState( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize); +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], +- void const* src, size_t srcSize); - -size_t ZSTD_compressBlock_btopt_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], -- void const* src, size_t srcSize); - size_t ZSTD_compressBlock_btultra_extDict( - ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -@@ -45,6 +56,20 @@ size_t ZSTD_compressBlock_btultra_extDict( + size_t ZSTD_compressBlock_btultra_extDict( +- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + /* note : no btultra2 variant for extDict nor dictMatchState, * because btultra2 is not meant to work with dictionaries * and is only specific for the first block (no prefix) */ +size_t ZSTD_compressBlock_btultra2( -+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ++ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); -+ + +#define ZSTD_COMPRESSBLOCK_BTULTRA ZSTD_compressBlock_btultra +#define ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE ZSTD_compressBlock_btultra_dictMatchState +#define ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT ZSTD_compressBlock_btultra_extDict @@ -44891,8 +50624,292 @@ index 22b862858ba7..ac1b743d27cd 100644 +#define ZSTD_COMPRESSBLOCK_BTULTRA2 NULL +#endif - #endif /* ZSTD_OPT_H */ +diff --git a/lib/zstd/compress/zstd_preSplit.c b/lib/zstd/compress/zstd_preSplit.c +new file mode 100644 +index 000000000000..7d9403c9a3bc +--- /dev/null ++++ b/lib/zstd/compress/zstd_preSplit.c +@@ -0,0 +1,239 @@ ++// SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause ++/* ++ * Copyright (c) Meta Platforms, Inc. and affiliates. ++ * All rights reserved. ++ * ++ * This source code is licensed under both the BSD-style license (found in the ++ * LICENSE file in the root directory of this source tree) and the GPLv2 (found ++ * in the COPYING file in the root directory of this source tree). ++ * You may select, at your option, one of the above-listed licenses. ++ */ ++ ++#include "../common/compiler.h" /* ZSTD_ALIGNOF */ ++#include "../common/mem.h" /* S64 */ ++#include "../common/zstd_deps.h" /* ZSTD_memset */ ++#include "../common/zstd_internal.h" /* ZSTD_STATIC_ASSERT */ ++#include "hist.h" /* HIST_add */ ++#include "zstd_preSplit.h" ++ ++ ++#define BLOCKSIZE_MIN 3500 ++#define THRESHOLD_PENALTY_RATE 16 ++#define THRESHOLD_BASE (THRESHOLD_PENALTY_RATE - 2) ++#define THRESHOLD_PENALTY 3 ++ ++#define HASHLENGTH 2 ++#define HASHLOG_MAX 10 ++#define HASHTABLESIZE (1 << HASHLOG_MAX) ++#define HASHMASK (HASHTABLESIZE - 1) ++#define KNUTH 0x9e3779b9 ++ ++/* for hashLog > 8, hash 2 bytes. ++ * for hashLog == 8, just take the byte, no hashing. ++ * The speed of this method relies on compile-time constant propagation */ ++FORCE_INLINE_TEMPLATE unsigned hash2(const void *p, unsigned hashLog) ++{ ++ assert(hashLog >= 8); ++ if (hashLog == 8) return (U32)((const BYTE*)p)[0]; ++ assert(hashLog <= HASHLOG_MAX); ++ return (U32)(MEM_read16(p)) * KNUTH >> (32 - hashLog); ++} ++ ++ ++typedef struct { ++ unsigned events[HASHTABLESIZE]; ++ size_t nbEvents; ++} Fingerprint; ++typedef struct { ++ Fingerprint pastEvents; ++ Fingerprint newEvents; ++} FPStats; ++ ++static void initStats(FPStats* fpstats) ++{ ++ ZSTD_memset(fpstats, 0, sizeof(FPStats)); ++} ++ ++FORCE_INLINE_TEMPLATE void ++addEvents_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog) ++{ ++ const char* p = (const char*)src; ++ size_t limit = srcSize - HASHLENGTH + 1; ++ size_t n; ++ assert(srcSize >= HASHLENGTH); ++ for (n = 0; n < limit; n+=samplingRate) { ++ fp->events[hash2(p+n, hashLog)]++; ++ } ++ fp->nbEvents += limit/samplingRate; ++} ++ ++FORCE_INLINE_TEMPLATE void ++recordFingerprint_generic(Fingerprint* fp, const void* src, size_t srcSize, size_t samplingRate, unsigned hashLog) ++{ ++ ZSTD_memset(fp, 0, sizeof(unsigned) * ((size_t)1 << hashLog)); ++ fp->nbEvents = 0; ++ addEvents_generic(fp, src, srcSize, samplingRate, hashLog); ++} ++ ++typedef void (*RecordEvents_f)(Fingerprint* fp, const void* src, size_t srcSize); ++ ++#define FP_RECORD(_rate) ZSTD_recordFingerprint_##_rate ++ ++#define ZSTD_GEN_RECORD_FINGERPRINT(_rate, _hSize) \ ++ static void FP_RECORD(_rate)(Fingerprint* fp, const void* src, size_t srcSize) \ ++ { \ ++ recordFingerprint_generic(fp, src, srcSize, _rate, _hSize); \ ++ } ++ ++ZSTD_GEN_RECORD_FINGERPRINT(1, 10) ++ZSTD_GEN_RECORD_FINGERPRINT(5, 10) ++ZSTD_GEN_RECORD_FINGERPRINT(11, 9) ++ZSTD_GEN_RECORD_FINGERPRINT(43, 8) ++ ++ ++static U64 abs64(S64 s64) { return (U64)((s64 < 0) ? -s64 : s64); } ++ ++static U64 fpDistance(const Fingerprint* fp1, const Fingerprint* fp2, unsigned hashLog) ++{ ++ U64 distance = 0; ++ size_t n; ++ assert(hashLog <= HASHLOG_MAX); ++ for (n = 0; n < ((size_t)1 << hashLog); n++) { ++ distance += ++ abs64((S64)fp1->events[n] * (S64)fp2->nbEvents - (S64)fp2->events[n] * (S64)fp1->nbEvents); ++ } ++ return distance; ++} ++ ++/* Compare newEvents with pastEvents ++ * return 1 when considered "too different" ++ */ ++static int compareFingerprints(const Fingerprint* ref, ++ const Fingerprint* newfp, ++ int penalty, ++ unsigned hashLog) ++{ ++ assert(ref->nbEvents > 0); ++ assert(newfp->nbEvents > 0); ++ { U64 p50 = (U64)ref->nbEvents * (U64)newfp->nbEvents; ++ U64 deviation = fpDistance(ref, newfp, hashLog); ++ U64 threshold = p50 * (U64)(THRESHOLD_BASE + penalty) / THRESHOLD_PENALTY_RATE; ++ return deviation >= threshold; ++ } ++} ++ ++static void mergeEvents(Fingerprint* acc, const Fingerprint* newfp) ++{ ++ size_t n; ++ for (n = 0; n < HASHTABLESIZE; n++) { ++ acc->events[n] += newfp->events[n]; ++ } ++ acc->nbEvents += newfp->nbEvents; ++} ++ ++static void flushEvents(FPStats* fpstats) ++{ ++ size_t n; ++ for (n = 0; n < HASHTABLESIZE; n++) { ++ fpstats->pastEvents.events[n] = fpstats->newEvents.events[n]; ++ } ++ fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents; ++ ZSTD_memset(&fpstats->newEvents, 0, sizeof(fpstats->newEvents)); ++} ++ ++static void removeEvents(Fingerprint* acc, const Fingerprint* slice) ++{ ++ size_t n; ++ for (n = 0; n < HASHTABLESIZE; n++) { ++ assert(acc->events[n] >= slice->events[n]); ++ acc->events[n] -= slice->events[n]; ++ } ++ acc->nbEvents -= slice->nbEvents; ++} ++ ++#define CHUNKSIZE (8 << 10) ++static size_t ZSTD_splitBlock_byChunks(const void* blockStart, size_t blockSize, ++ int level, ++ void* workspace, size_t wkspSize) ++{ ++ static const RecordEvents_f records_fs[] = { ++ FP_RECORD(43), FP_RECORD(11), FP_RECORD(5), FP_RECORD(1) ++ }; ++ static const unsigned hashParams[] = { 8, 9, 10, 10 }; ++ const RecordEvents_f record_f = (assert(0<=level && level<=3), records_fs[level]); ++ FPStats* const fpstats = (FPStats*)workspace; ++ const char* p = (const char*)blockStart; ++ int penalty = THRESHOLD_PENALTY; ++ size_t pos = 0; ++ assert(blockSize == (128 << 10)); ++ assert(workspace != NULL); ++ assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0); ++ ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats)); ++ assert(wkspSize >= sizeof(FPStats)); (void)wkspSize; ++ ++ initStats(fpstats); ++ record_f(&fpstats->pastEvents, p, CHUNKSIZE); ++ for (pos = CHUNKSIZE; pos <= blockSize - CHUNKSIZE; pos += CHUNKSIZE) { ++ record_f(&fpstats->newEvents, p + pos, CHUNKSIZE); ++ if (compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, penalty, hashParams[level])) { ++ return pos; ++ } else { ++ mergeEvents(&fpstats->pastEvents, &fpstats->newEvents); ++ if (penalty > 0) penalty--; ++ } ++ } ++ assert(pos == blockSize); ++ return blockSize; ++ (void)flushEvents; (void)removeEvents; ++} ++ ++/* ZSTD_splitBlock_fromBorders(): very fast strategy : ++ * compare fingerprint from beginning and end of the block, ++ * derive from their difference if it's preferable to split in the middle, ++ * repeat the process a second time, for finer grained decision. ++ * 3 times did not brought improvements, so I stopped at 2. ++ * Benefits are good enough for a cheap heuristic. ++ * More accurate splitting saves more, but speed impact is also more perceptible. ++ * For better accuracy, use more elaborate variant *_byChunks. ++ */ ++static size_t ZSTD_splitBlock_fromBorders(const void* blockStart, size_t blockSize, ++ void* workspace, size_t wkspSize) ++{ ++#define SEGMENT_SIZE 512 ++ FPStats* const fpstats = (FPStats*)workspace; ++ Fingerprint* middleEvents = (Fingerprint*)(void*)((char*)workspace + 512 * sizeof(unsigned)); ++ assert(blockSize == (128 << 10)); ++ assert(workspace != NULL); ++ assert((size_t)workspace % ZSTD_ALIGNOF(FPStats) == 0); ++ ZSTD_STATIC_ASSERT(ZSTD_SLIPBLOCK_WORKSPACESIZE >= sizeof(FPStats)); ++ assert(wkspSize >= sizeof(FPStats)); (void)wkspSize; ++ ++ initStats(fpstats); ++ HIST_add(fpstats->pastEvents.events, blockStart, SEGMENT_SIZE); ++ HIST_add(fpstats->newEvents.events, (const char*)blockStart + blockSize - SEGMENT_SIZE, SEGMENT_SIZE); ++ fpstats->pastEvents.nbEvents = fpstats->newEvents.nbEvents = SEGMENT_SIZE; ++ if (!compareFingerprints(&fpstats->pastEvents, &fpstats->newEvents, 0, 8)) ++ return blockSize; ++ ++ HIST_add(middleEvents->events, (const char*)blockStart + blockSize/2 - SEGMENT_SIZE/2, SEGMENT_SIZE); ++ middleEvents->nbEvents = SEGMENT_SIZE; ++ { U64 const distFromBegin = fpDistance(&fpstats->pastEvents, middleEvents, 8); ++ U64 const distFromEnd = fpDistance(&fpstats->newEvents, middleEvents, 8); ++ U64 const minDistance = SEGMENT_SIZE * SEGMENT_SIZE / 3; ++ if (abs64((S64)distFromBegin - (S64)distFromEnd) < minDistance) ++ return 64 KB; ++ return (distFromBegin > distFromEnd) ? 32 KB : 96 KB; ++ } ++} ++ ++size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize, ++ int level, ++ void* workspace, size_t wkspSize) ++{ ++ DEBUGLOG(6, "ZSTD_splitBlock (level=%i)", level); ++ assert(0<=level && level<=4); ++ if (level == 0) ++ return ZSTD_splitBlock_fromBorders(blockStart, blockSize, workspace, wkspSize); ++ /* level >= 1*/ ++ return ZSTD_splitBlock_byChunks(blockStart, blockSize, level-1, workspace, wkspSize); ++} +diff --git a/lib/zstd/compress/zstd_preSplit.h b/lib/zstd/compress/zstd_preSplit.h +new file mode 100644 +index 000000000000..f98f797fe191 +--- /dev/null ++++ b/lib/zstd/compress/zstd_preSplit.h +@@ -0,0 +1,34 @@ ++/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ ++/* ++ * Copyright (c) Meta Platforms, Inc. and affiliates. ++ * All rights reserved. ++ * ++ * This source code is licensed under both the BSD-style license (found in the ++ * LICENSE file in the root directory of this source tree) and the GPLv2 (found ++ * in the COPYING file in the root directory of this source tree). ++ * You may select, at your option, one of the above-listed licenses. ++ */ ++ ++#ifndef ZSTD_PRESPLIT_H ++#define ZSTD_PRESPLIT_H ++ ++#include /* size_t */ ++ ++#define ZSTD_SLIPBLOCK_WORKSPACESIZE 8208 ++ ++/* ZSTD_splitBlock(): ++ * @level must be a value between 0 and 4. ++ * higher levels spend more energy to detect block boundaries. ++ * @workspace must be aligned for size_t. ++ * @wkspSize must be at least >= ZSTD_SLIPBLOCK_WORKSPACESIZE ++ * note: ++ * For the time being, this function only accepts full 128 KB blocks. ++ * Therefore, @blockSize must be == 128 KB. ++ * While this could be extended to smaller sizes in the future, ++ * it is not yet clear if this would be useful. TBD. ++ */ ++size_t ZSTD_splitBlock(const void* blockStart, size_t blockSize, ++ int level, ++ void* workspace, size_t wkspSize); ++ ++#endif /* ZSTD_PRESPLIT_H */ diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c index 60958afebc41..ac8b87f48f84 100644 --- a/lib/zstd/decompress/huf_decompress.c @@ -46339,7 +52356,7 @@ index 8c1a79d666f8..de459a0dacd1 100644 * * This source code is licensed under both the BSD-style license (found in the diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c -index 6b3177c94711..c9cbc45f6ed9 100644 +index 6b3177c94711..da8b4cf116e3 100644 --- a/lib/zstd/decompress/zstd_decompress.c +++ b/lib/zstd/decompress/zstd_decompress.c @@ -1,5 +1,6 @@ @@ -46407,8 +52424,9 @@ index 6b3177c94711..c9cbc45f6ed9 100644 * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, - * or an error code, which can be tested using ZSTD_isError() */ +-size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) +** or an error code, which can be tested using ZSTD_isError() */ - size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) ++size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) { const BYTE* ip = (const BYTE*)src; size_t const minInputSize = ZSTD_startingInputLength(format); @@ -46447,7 +52465,37 @@ index 6b3177c94711..c9cbc45f6ed9 100644 if ( (format != ZSTD_f_zstd1_magicless) && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) { if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { -@@ -540,61 +570,62 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize) +@@ -438,8 +468,10 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s + if (srcSize < ZSTD_SKIPPABLEHEADERSIZE) + return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */ + ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); +- zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE); + zfhPtr->frameType = ZSTD_skippableFrame; ++ zfhPtr->dictID = MEM_readLE32(src) - ZSTD_MAGIC_SKIPPABLE_START; ++ zfhPtr->headerSize = ZSTD_SKIPPABLEHEADERSIZE; ++ zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE); + return 0; + } + RETURN_ERROR(prefix_unknown, ""); +@@ -508,7 +540,7 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + * or an error code, which can be tested using ZSTD_isError() */ +-size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize) ++size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize) + { + return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1); + } +@@ -520,7 +552,7 @@ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t src + * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ + unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) + { +- { ZSTD_frameHeader zfh; ++ { ZSTD_FrameHeader zfh; + if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) + return ZSTD_CONTENTSIZE_ERROR; + if (zfh.frameType == ZSTD_skippableFrame) { +@@ -540,61 +572,62 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize) sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE); RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32, frameParameter_unsupported, ""); @@ -46536,7 +52584,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; -@@ -602,17 +633,17 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) +@@ -602,17 +635,17 @@ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) continue; } @@ -46562,7 +52610,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 src = (const BYTE *)src + frameSrcSize; srcSize -= frameSrcSize; -@@ -676,13 +707,13 @@ static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret) +@@ -676,13 +709,13 @@ static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret) return frameSizeInfo; } @@ -46578,8 +52626,12 @@ index 6b3177c94711..c9cbc45f6ed9 100644 && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); assert(ZSTD_isError(frameSizeInfo.compressedSize) || -@@ -696,7 +727,7 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize - ZSTD_frameHeader zfh; +@@ -693,10 +726,10 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize + const BYTE* const ipstart = ip; + size_t remainingSize = srcSize; + size_t nbBlocks = 0; +- ZSTD_frameHeader zfh; ++ ZSTD_FrameHeader zfh; /* Extract Frame Header */ - { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize); @@ -46587,7 +52639,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 if (ZSTD_isError(ret)) return ZSTD_errorFrameSizeInfo(ret); if (ret > 0) -@@ -730,23 +761,26 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize +@@ -730,28 +763,31 @@ static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize ip += 4; } @@ -46621,7 +52673,13 @@ index 6b3177c94711..c9cbc45f6ed9 100644 } /* ZSTD_decompressBound() : -@@ -760,7 +794,7 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) + * compatible with legacy mode +- * `src` must point to the start of a ZSTD frame or a skippeable frame ++ * `src` must point to the start of a ZSTD frame or a skippable frame + * `srcSize` must be at least as large as the frame contained + * @return : the maximum decompressed size of the compressed source + */ +@@ -760,7 +796,7 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) unsigned long long bound = 0; /* Iterate over each frame */ while (srcSize > 0) { @@ -46630,7 +52688,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 size_t const compressedSize = frameSizeInfo.compressedSize; unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) -@@ -773,6 +807,48 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) +@@ -773,6 +809,48 @@ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) return bound; } @@ -46644,7 +52702,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 + ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1); + size_t const compressedSize = frameSizeInfo.compressedSize; + unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; -+ ZSTD_frameHeader zfh; ++ ZSTD_FrameHeader zfh; + + FORWARD_IF_ERROR(ZSTD_getFrameHeader(&zfh, src, srcSize), ""); + if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) @@ -46679,7 +52737,16 @@ index 6b3177c94711..c9cbc45f6ed9 100644 /*-************************************************************* * Frame decoding -@@ -856,6 +932,10 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, +@@ -815,7 +893,7 @@ static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, + return regenSize; + } + +-static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming) ++static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, int streaming) + { + (void)dctx; + (void)uncompressedSize; +@@ -856,6 +934,10 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize; } @@ -46690,7 +52757,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 /* Loop on each block */ while (1) { BYTE* oBlockEnd = oend; -@@ -888,7 +968,8 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, +@@ -888,7 +970,8 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, switch(blockProperties.blockType) { case bt_compressed: @@ -46700,7 +52767,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 break; case bt_raw : /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */ -@@ -901,12 +982,14 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, +@@ -901,12 +984,14 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, default: RETURN_ERROR(corruption_detected, "invalid block type"); } @@ -46719,11 +52786,11 @@ index 6b3177c94711..c9cbc45f6ed9 100644 assert(ip != NULL); ip += cBlockSize; remainingSrcSize -= cBlockSize; -@@ -930,12 +1013,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, +@@ -930,12 +1015,15 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, } ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0); /* Allow caller to get size read */ -+ DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %zi, consuming %zi bytes of input", op-ostart, ip - (const BYTE*)*srcPtr); ++ DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %i, consuming %i bytes of input", (int)(op-ostart), (int)(ip - (const BYTE*)*srcPtr)); *srcPtr = ip; *srcSizePtr = remainingSrcSize; return (size_t)(op-ostart); @@ -46736,7 +52803,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, -@@ -955,17 +1041,18 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, +@@ -955,17 +1043,18 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, while (srcSize >= ZSTD_startingInputLength(dctx->format)) { @@ -46760,7 +52827,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 } } if (ddict) { -@@ -1061,8 +1148,8 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr +@@ -1061,8 +1150,8 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } /* @@ -46771,7 +52838,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 * be streamed. * * For blocks that can be streamed, this allows us to reduce the latency until we produce -@@ -1181,7 +1268,8 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c +@@ -1181,7 +1270,8 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c { case bt_compressed: DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed"); @@ -46781,7 +52848,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 dctx->expected = 0; /* Streaming not supported */ break; case bt_raw : -@@ -1250,6 +1338,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c +@@ -1250,6 +1340,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c case ZSTDds_decodeSkippableHeader: assert(src != NULL); assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE); @@ -46789,7 +52856,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */ dctx->stage = ZSTDds_skipFrame; -@@ -1262,7 +1351,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c +@@ -1262,7 +1353,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c default: assert(0); /* impossible */ @@ -46798,7 +52865,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 } } -@@ -1303,11 +1392,11 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, +@@ -1303,11 +1394,11 @@ ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, /* in minimal huffman, we always use X1 variants */ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, @@ -46812,7 +52879,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 #endif RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, ""); dictPtr += hSize; -@@ -1403,10 +1492,11 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) +@@ -1403,10 +1494,11 @@ size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) dctx->prefixStart = NULL; dctx->virtualStart = NULL; dctx->dictEnd = NULL; @@ -46825,7 +52892,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ dctx->LLTptr = dctx->entropy.LLTable; -@@ -1465,7 +1555,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) +@@ -1465,7 +1557,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) * This could for one of the following reasons : * - The frame does not require a dictionary (most common case). * - The frame was built with dictID intentionally removed. @@ -46834,16 +52901,16 @@ index 6b3177c94711..c9cbc45f6ed9 100644 * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, frame header could not be decoded. * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. -@@ -1474,7 +1564,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) +@@ -1474,7 +1566,7 @@ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) * ZSTD_getFrameHeader(), which will provide a more precise error code. */ unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) { - ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 }; -+ ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 }; ++ ZSTD_FrameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 }; size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); if (ZSTD_isError(hError)) return 0; return zfp.dictID; -@@ -1581,7 +1671,9 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di +@@ -1581,7 +1673,9 @@ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t di size_t ZSTD_initDStream(ZSTD_DStream* zds) { DEBUGLOG(4, "ZSTD_initDStream"); @@ -46854,7 +52921,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 } /* ZSTD_initDStream_usingDDict() : -@@ -1589,6 +1681,7 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds) +@@ -1589,6 +1683,7 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds) * this function cannot fail */ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) { @@ -46862,7 +52929,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , ""); return ZSTD_startingInputLength(dctx->format); -@@ -1599,6 +1692,7 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) +@@ -1599,6 +1694,7 @@ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) * this function cannot fail */ size_t ZSTD_resetDStream(ZSTD_DStream* dctx) { @@ -46870,7 +52937,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), ""); return ZSTD_startingInputLength(dctx->format); } -@@ -1670,6 +1764,15 @@ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) +@@ -1670,6 +1766,15 @@ ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict; bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts; return bounds; @@ -46886,7 +52953,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 default:; } bounds.error = ERROR(parameter_unsupported); -@@ -1710,6 +1813,12 @@ size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value +@@ -1710,6 +1815,12 @@ size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value case ZSTD_d_refMultipleDDicts: *value = (int)dctx->refMultipleDDicts; return 0; @@ -46899,7 +52966,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 default:; } RETURN_ERROR(parameter_unsupported, ""); -@@ -1743,6 +1852,14 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value +@@ -1743,6 +1854,14 @@ size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value } dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; return 0; @@ -46914,7 +52981,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 default:; } RETURN_ERROR(parameter_unsupported, ""); -@@ -1754,6 +1871,7 @@ size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset) +@@ -1754,6 +1873,7 @@ size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset) || (reset == ZSTD_reset_session_and_parameters) ) { dctx->streamStage = zdss_init; dctx->noForwardProgress = 0; @@ -46922,7 +52989,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 } if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { -@@ -1770,11 +1888,17 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) +@@ -1770,11 +1890,17 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) return ZSTD_sizeof_DCtx(dctx); } @@ -46944,7 +53011,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); size_t const minRBSize = (size_t) neededSize; RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize, -@@ -1782,6 +1906,11 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long +@@ -1782,6 +1908,11 @@ size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long return minRBSize; } @@ -46956,7 +53023,24 @@ index 6b3177c94711..c9cbc45f6ed9 100644 size_t ZSTD_estimateDStreamSize(size_t windowSize) { size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX); -@@ -1918,7 +2047,6 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB +@@ -1793,7 +1924,7 @@ size_t ZSTD_estimateDStreamSize(size_t windowSize) + size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize) + { + U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */ +- ZSTD_frameHeader zfh; ++ ZSTD_FrameHeader zfh; + size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize); + if (ZSTD_isError(err)) return err; + RETURN_ERROR_IF(err>0, srcSize_wrong, ""); +@@ -1888,6 +2019,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB + U32 someMoreWork = 1; + + DEBUGLOG(5, "ZSTD_decompressStream"); ++ assert(zds != NULL); + RETURN_ERROR_IF( + input->pos > input->size, + srcSize_wrong, +@@ -1918,7 +2050,6 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB if (zds->refMultipleDDicts && zds->ddictSet) { ZSTD_DCtx_selectFrameDDict(zds); } @@ -46964,7 +53048,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 if (ZSTD_isError(hSize)) { return hSize; /* error */ } -@@ -1932,6 +2060,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB +@@ -1932,6 +2063,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB zds->lhSize += remainingInput; } input->pos = input->size; @@ -46976,7 +53060,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ } assert(ip != NULL); -@@ -1943,14 +2076,15 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB +@@ -1943,14 +2079,15 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && zds->fParams.frameType != ZSTD_skippableFrame && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { @@ -46995,7 +53079,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 zds->expected = 0; zds->streamStage = zdss_init; someMoreWork = 0; -@@ -1969,7 +2103,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB +@@ -1969,7 +2106,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB DEBUGLOG(4, "Consume header"); FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), ""); @@ -47005,7 +53089,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE); zds->stage = ZSTDds_skipFrame; } else { -@@ -1985,11 +2120,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB +@@ -1985,11 +2123,13 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize, frameParameter_windowTooLarge, ""); @@ -47020,7 +53104,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 : 0; ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize); -@@ -2034,6 +2171,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB +@@ -2034,6 +2174,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB } if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), ""); @@ -47028,7 +53112,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 ip += neededInSize; /* Function modifies the stage so we must break */ break; -@@ -2048,7 +2186,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB +@@ -2048,7 +2189,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB int const isSkipFrame = ZSTD_isSkipFrame(zds); size_t loadedSize; /* At this point we shouldn't be decompressing a block that we can stream. */ @@ -47037,7 +53121,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 if (isSkipFrame) { loadedSize = MIN(toLoad, (size_t)(iend-ip)); } else { -@@ -2057,8 +2195,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB +@@ -2057,8 +2198,11 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB "should never happen"); loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip)); } @@ -47051,7 +53135,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ /* decode loaded input */ -@@ -2068,14 +2209,17 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB +@@ -2068,14 +2212,17 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB break; } case zdss_flush: @@ -47072,7 +53156,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)", (int)(zds->outBuffSize - zds->outStart), (U32)zds->fParams.blockSizeMax); -@@ -2089,7 +2233,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB +@@ -2089,7 +2236,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB default: assert(0); /* impossible */ @@ -47081,7 +53165,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 } } /* result */ -@@ -2102,8 +2246,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB +@@ -2102,8 +2249,8 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB if ((ip==istart) && (op==ostart)) { /* no forward progress */ zds->noForwardProgress ++; if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) { @@ -47092,7 +53176,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 assert(0); } } else { -@@ -2140,11 +2284,17 @@ size_t ZSTD_decompressStream_simpleArgs ( +@@ -2140,11 +2287,17 @@ size_t ZSTD_decompressStream_simpleArgs ( void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos) { @@ -47118,7 +53202,7 @@ index 6b3177c94711..c9cbc45f6ed9 100644 + } } diff --git a/lib/zstd/decompress/zstd_decompress_block.c b/lib/zstd/decompress/zstd_decompress_block.c -index c1913b8e7c89..9fe9a12c8a2c 100644 +index c1913b8e7c89..710eb0ffd5a3 100644 --- a/lib/zstd/decompress/zstd_decompress_block.c +++ b/lib/zstd/decompress/zstd_decompress_block.c @@ -1,5 +1,6 @@ @@ -47238,10 +53322,12 @@ index c1913b8e7c89..9fe9a12c8a2c 100644 const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */ void* dst, size_t dstCapacity, const streaming_operation streaming) { -@@ -125,6 +141,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, +@@ -124,7 +140,8 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, + RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, ""); { const BYTE* const istart = (const BYTE*) src; - symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); +- symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); ++ SymbolEncodingType_e const litEncType = (SymbolEncodingType_e)(istart[0] & 3); + size_t const blockSizeMax = ZSTD_blockSizeMax(dctx); switch(litEncType) @@ -47402,6 +53488,15 @@ index c1913b8e7c89..9fe9a12c8a2c 100644 /* Default FSE distribution tables. * These are pre-calculated FSE decoding tables using default distributions as defined in specification : * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions +@@ -317,7 +359,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, + * - start from default distributions, present in /lib/common/zstd_internal.h + * - generate tables normally, using ZSTD_buildFSETable() + * - printout the content of tables +- * - pretify output, report below, test with fuzzer to ensure it's correct */ ++ * - prettify output, report below, test with fuzzer to ensure it's correct */ + + /* Default FSE distribution table for Literal Lengths */ + static const ZSTD_seqSymbol LL_defaultDTable[(1< 0x7F) { if (nbSeq == 0xFF) { RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, ""); -@@ -681,8 +719,16 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, +@@ -681,11 +719,19 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, } *nbSeqPtr = nbSeq; @@ -47464,10 +53568,16 @@ index c1913b8e7c89..9fe9a12c8a2c 100644 + /* FSE table descriptors */ RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */ +- { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); +- symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); +- symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); + RETURN_ERROR_IF(*ip & 3, corruption_detected, ""); /* The last field, Reserved, must be all-zeroes. */ - { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); - symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); - symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); ++ { SymbolEncodingType_e const LLtype = (SymbolEncodingType_e)(*ip >> 6); ++ SymbolEncodingType_e const OFtype = (SymbolEncodingType_e)((*ip >> 4) & 3); ++ SymbolEncodingType_e const MLtype = (SymbolEncodingType_e)((*ip >> 2) & 3); + ip++; + + /* Build DTables */ @@ -829,7 +875,7 @@ static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, pt /* ZSTD_safecopyDstBeforeSrc(): * This version allows overlap with dst before src, or handles the non-overlap case with dst after src @@ -48217,7 +54327,7 @@ index c1913b8e7c89..9fe9a12c8a2c 100644 } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ -@@ -1873,10 +1928,9 @@ static BMI2_TARGET_ATTRIBUTE size_t +@@ -1873,50 +1928,40 @@ static BMI2_TARGET_ATTRIBUTE size_t ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, @@ -48230,14 +54340,15 @@ index c1913b8e7c89..9fe9a12c8a2c 100644 } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ -@@ -1886,37 +1940,34 @@ typedef size_t (*ZSTD_decompressSequences_t)( - ZSTD_DCtx* dctx, - void* dst, size_t maxDstSize, - const void* seqStart, size_t seqSize, int nbSeq, + #endif /* DYNAMIC_BMI2 */ + +-typedef size_t (*ZSTD_decompressSequences_t)( +- ZSTD_DCtx* dctx, +- void* dst, size_t maxDstSize, +- const void* seqStart, size_t seqSize, int nbSeq, - const ZSTD_longOffset_e isLongOffset, - const int frame); -+ const ZSTD_longOffset_e isLongOffset); - +- #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG static size_t ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, @@ -48275,7 +54386,7 @@ index c1913b8e7c89..9fe9a12c8a2c 100644 } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ -@@ -1931,69 +1982,114 @@ static size_t +@@ -1931,69 +1976,114 @@ static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, @@ -48426,7 +54537,7 @@ index c1913b8e7c89..9fe9a12c8a2c 100644 if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; -@@ -2001,6 +2097,23 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, +@@ -2001,6 +2091,23 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, /* Build Decoding Tables */ { @@ -48450,7 +54561,7 @@ index c1913b8e7c89..9fe9a12c8a2c 100644 /* These macros control at build-time which decompressor implementation * we use. If neither is defined, we do some inspection and dispatch at * runtime. -@@ -2008,6 +2121,11 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, +@@ -2008,6 +2115,11 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, #if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) int usePrefetchDecoder = dctx->ddictIsCold; @@ -48462,7 +54573,7 @@ index c1913b8e7c89..9fe9a12c8a2c 100644 #endif int nbSeq; size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); -@@ -2015,40 +2133,55 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, +@@ -2015,40 +2127,55 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, ip += seqHSize; srcSize -= seqHSize; @@ -48532,7 +54643,7 @@ index c1913b8e7c89..9fe9a12c8a2c 100644 void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize) { if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */ -@@ -2060,13 +2193,24 @@ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize) +@@ -2060,13 +2187,24 @@ void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize) } @@ -48594,7 +54705,7 @@ index 3d2d57a5d25a..becffbd89364 100644 #endif /* ZSTD_DEC_BLOCK_H */ diff --git a/lib/zstd/decompress/zstd_decompress_internal.h b/lib/zstd/decompress/zstd_decompress_internal.h -index 98102edb6a83..0f02526be774 100644 +index 98102edb6a83..2a225d1811c4 100644 --- a/lib/zstd/decompress/zstd_decompress_internal.h +++ b/lib/zstd/decompress/zstd_decompress_internal.h @@ -1,5 +1,6 @@ @@ -48620,14 +54731,25 @@ index 98102edb6a83..0f02526be774 100644 U32 rep[ZSTD_REP_NUM]; U32 workspace[ZSTD_BUILD_FSE_TABLE_WKSP_SIZE_U32]; } ZSTD_entropyDTables_t; -@@ -152,6 +154,7 @@ struct ZSTD_DCtx_s +@@ -135,7 +137,7 @@ struct ZSTD_DCtx_s + const void* virtualStart; /* virtual start of previous segment if it was just before current one */ + const void* dictEnd; /* end of previous segment */ + size_t expected; +- ZSTD_frameHeader fParams; ++ ZSTD_FrameHeader fParams; + U64 processedCSize; + U64 decodedSize; + blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */ +@@ -152,7 +154,8 @@ struct ZSTD_DCtx_s size_t litSize; size_t rleSize; size_t staticSize; +-#if DYNAMIC_BMI2 != 0 + int isFrameDecompression; - #if DYNAMIC_BMI2 != 0 ++#if DYNAMIC_BMI2 int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */ #endif + @@ -164,6 +167,8 @@ struct ZSTD_DCtx_s ZSTD_dictUses_e dictUses; ZSTD_DDictHashSet* ddictSet; /* Hash set for multiple ddicts */ @@ -48637,6 +54759,21 @@ index 98102edb6a83..0f02526be774 100644 /* streaming */ ZSTD_dStreamStage streamStage; +@@ -199,11 +204,11 @@ struct ZSTD_DCtx_s + }; /* typedef'd to ZSTD_DCtx within "zstd.h" */ + + MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) { +-#if DYNAMIC_BMI2 != 0 +- return dctx->bmi2; ++#if DYNAMIC_BMI2 ++ return dctx->bmi2; + #else + (void)dctx; +- return 0; ++ return 0; + #endif + } + diff --git a/lib/zstd/decompress_sources.h b/lib/zstd/decompress_sources.h index a06ca187aab5..8a47eb2a4514 100644 --- a/lib/zstd/decompress_sources.h @@ -48672,7 +54809,7 @@ index 22686e367e6f..466828e35752 100644 MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Zstd Common"); diff --git a/lib/zstd/zstd_compress_module.c b/lib/zstd/zstd_compress_module.c -index bd8784449b31..ceaf352d03e2 100644 +index bd8784449b31..a788ebfcb111 100644 --- a/lib/zstd/zstd_compress_module.c +++ b/lib/zstd/zstd_compress_module.c @@ -1,6 +1,6 @@ @@ -48683,6 +54820,106 @@ index bd8784449b31..ceaf352d03e2 100644 * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the +@@ -16,6 +16,7 @@ + + #include "common/zstd_deps.h" + #include "common/zstd_internal.h" ++#include "compress/zstd_compress_internal.h" + + #define ZSTD_FORWARD_IF_ERR(ret) \ + do { \ +@@ -85,6 +86,12 @@ zstd_parameters zstd_get_params(int level, + } + EXPORT_SYMBOL(zstd_get_params); + ++size_t zstd_cctx_set_param(zstd_cctx *cctx, ZSTD_cParameter param, int value) ++{ ++ return ZSTD_CCtx_setParameter(cctx, param, value); ++} ++EXPORT_SYMBOL(zstd_cctx_set_param); ++ + zstd_compression_parameters zstd_get_cparams(int level, + unsigned long long estimated_src_size, size_t dict_size) + { +@@ -98,6 +105,52 @@ size_t zstd_cctx_workspace_bound(const zstd_compression_parameters *cparams) + } + EXPORT_SYMBOL(zstd_cctx_workspace_bound); + ++// Used by zstd_cctx_workspace_bound_with_ext_seq_prod() ++static size_t dummy_external_sequence_producer( ++ void *sequenceProducerState, ++ ZSTD_Sequence *outSeqs, size_t outSeqsCapacity, ++ const void *src, size_t srcSize, ++ const void *dict, size_t dictSize, ++ int compressionLevel, ++ size_t windowSize) ++{ ++ (void)sequenceProducerState; ++ (void)outSeqs; (void)outSeqsCapacity; ++ (void)src; (void)srcSize; ++ (void)dict; (void)dictSize; ++ (void)compressionLevel; ++ (void)windowSize; ++ return ZSTD_SEQUENCE_PRODUCER_ERROR; ++} ++ ++static void init_cctx_params_from_compress_params( ++ ZSTD_CCtx_params *cctx_params, ++ const zstd_compression_parameters *compress_params) ++{ ++ ZSTD_parameters zstd_params; ++ memset(&zstd_params, 0, sizeof(zstd_params)); ++ zstd_params.cParams = *compress_params; ++ ZSTD_CCtxParams_init_advanced(cctx_params, zstd_params); ++} ++ ++size_t zstd_cctx_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *compress_params) ++{ ++ ZSTD_CCtx_params cctx_params; ++ init_cctx_params_from_compress_params(&cctx_params, compress_params); ++ ZSTD_CCtxParams_registerSequenceProducer(&cctx_params, NULL, dummy_external_sequence_producer); ++ return ZSTD_estimateCCtxSize_usingCCtxParams(&cctx_params); ++} ++EXPORT_SYMBOL(zstd_cctx_workspace_bound_with_ext_seq_prod); ++ ++size_t zstd_cstream_workspace_bound_with_ext_seq_prod(const zstd_compression_parameters *compress_params) ++{ ++ ZSTD_CCtx_params cctx_params; ++ init_cctx_params_from_compress_params(&cctx_params, compress_params); ++ ZSTD_CCtxParams_registerSequenceProducer(&cctx_params, NULL, dummy_external_sequence_producer); ++ return ZSTD_estimateCStreamSize_usingCCtxParams(&cctx_params); ++} ++EXPORT_SYMBOL(zstd_cstream_workspace_bound_with_ext_seq_prod); ++ + zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size) + { + if (workspace == NULL) +@@ -209,5 +262,25 @@ size_t zstd_end_stream(zstd_cstream *cstream, zstd_out_buffer *output) + } + EXPORT_SYMBOL(zstd_end_stream); + ++void zstd_register_sequence_producer( ++ zstd_cctx *cctx, ++ void* sequence_producer_state, ++ zstd_sequence_producer_f sequence_producer ++) { ++ ZSTD_registerSequenceProducer(cctx, sequence_producer_state, sequence_producer); ++} ++EXPORT_SYMBOL(zstd_register_sequence_producer); ++ ++size_t zstd_compress_sequences_and_literals(zstd_cctx *cctx, void* dst, size_t dst_capacity, ++ const zstd_sequence *in_seqs, size_t in_seqs_size, ++ const void* literals, size_t lit_size, size_t lit_capacity, ++ size_t decompressed_size) ++{ ++ return ZSTD_compressSequencesAndLiterals(cctx, dst, dst_capacity, in_seqs, ++ in_seqs_size, literals, lit_size, ++ lit_capacity, decompressed_size); ++} ++EXPORT_SYMBOL(zstd_compress_sequences_and_literals); ++ + MODULE_LICENSE("Dual BSD/GPL"); + MODULE_DESCRIPTION("Zstd Compressor"); diff --git a/lib/zstd/zstd_decompress_module.c b/lib/zstd/zstd_decompress_module.c index 469fc3059be0..0ae819f0c927 100644 --- a/lib/zstd/zstd_decompress_module.c