blob: e2173afb0a8b0c8c070033ac404f830157c27c6a [file] [log] [blame]
Nick Terrellac58c8d2020-03-26 15:19:05 -07001/* ******************************************************************
2 * Common functions of New Generation Entropy library
W. Felix Handte5d693cc2022-12-20 12:49:47 -05003 * Copyright (c) Meta Platforms, Inc. and affiliates.
Nick Terrellac58c8d2020-03-26 15:19:05 -07004 *
5 * You can contact the author at :
6 * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
7 * - Public forum : https://groups.google.com/forum/#!forum/lz4c
8 *
9 * This source code is licensed under both the BSD-style license (found in the
10 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11 * in the COPYING file in the root directory of this source tree).
12 * You may select, at your option, one of the above-listed licenses.
13****************************************************************** */
inikep63ecd742016-05-13 11:27:56 +020014
15/* *************************************
16* Dependencies
17***************************************/
inikep63ecd742016-05-13 11:27:56 +020018#include "mem.h"
Yann Colleta91ca622016-06-05 01:33:55 +020019#include "error_private.h" /* ERR_*, ERROR */
Yann Colletd0e2cd12016-06-05 00:58:01 +020020#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
Yann Collet38b75dd2016-07-24 15:35:59 +020021#include "fse.h"
Yann Collet38b75dd2016-07-24 15:35:59 +020022#include "huf.h"
Elliot Gorokhovskydb2f4a62022-01-21 11:29:14 -070023#include "bits.h" /* ZSDT_highbit32, ZSTD_countTrailingZeros32 */
inikep63ecd742016-05-13 11:27:56 +020024
25
Yann Collet1f2c95c2017-03-05 21:07:20 -080026/*=== Version ===*/
Yann Collet45960372017-02-15 12:00:03 -080027unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
28
29
Yann Collet1f2c95c2017-03-05 21:07:20 -080030/*=== Error Management ===*/
inikep63ecd742016-05-13 11:27:56 +020031unsigned FSE_isError(size_t code) { return ERR_isError(code); }
inikep63ecd742016-05-13 11:27:56 +020032const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
33
inikep63ecd742016-05-13 11:27:56 +020034unsigned HUF_isError(size_t code) { return ERR_isError(code); }
inikep63ecd742016-05-13 11:27:56 +020035const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
36
37
38/*-**************************************************************
39* FSE NCount encoding-decoding
40****************************************************************/
Nick Terrell612e9472020-08-17 13:44:49 -070041FORCE_INLINE_TEMPLATE
42size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
43 const void* headerBuffer, size_t hbSize)
inikep63ecd742016-05-13 11:27:56 +020044{
45 const BYTE* const istart = (const BYTE*) headerBuffer;
46 const BYTE* const iend = istart + hbSize;
47 const BYTE* ip = istart;
48 int nbBits;
49 int remaining;
50 int threshold;
51 U32 bitStream;
52 int bitCount;
53 unsigned charnum = 0;
Nick Terrell6004c112020-08-14 15:28:59 -070054 unsigned const maxSV1 = *maxSVPtr + 1;
inikep63ecd742016-05-13 11:27:56 +020055 int previous0 = 0;
56
Nick Terrell8f8bd2d2020-08-18 16:57:35 -070057 if (hbSize < 8) {
Nick Terrell8def0e52020-08-24 12:24:45 -070058 /* This function only works when hbSize >= 8 */
Nick Terrell8f8bd2d2020-08-18 16:57:35 -070059 char buffer[8] = {0};
Nick Terrellc465f242020-08-10 12:46:38 -070060 ZSTD_memcpy(buffer, headerBuffer, hbSize);
Nick Terrellf2d09242018-05-23 14:58:58 -070061 { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
62 buffer, sizeof(buffer));
63 if (FSE_isError(countSize)) return countSize;
64 if (countSize > hbSize) return ERROR(corruption_detected);
65 return countSize;
66 } }
Nick Terrell8def0e52020-08-24 12:24:45 -070067 assert(hbSize >= 8);
Nick Terrella97e9a62018-05-23 12:16:00 -070068
Yann Colletff773bf2018-06-26 17:24:41 -070069 /* init */
Nick Terrellc465f242020-08-10 12:46:38 -070070 ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
inikep63ecd742016-05-13 11:27:56 +020071 bitStream = MEM_readLE32(ip);
72 nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
73 if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
74 bitStream >>= 4;
75 bitCount = 4;
76 *tableLogPtr = nbBits;
77 remaining = (1<<nbBits)+1;
78 threshold = 1<<nbBits;
79 nbBits++;
80
Nick Terrell6004c112020-08-14 15:28:59 -070081 for (;;) {
inikep63ecd742016-05-13 11:27:56 +020082 if (previous0) {
Nick Terrell8def0e52020-08-24 12:24:45 -070083 /* Count the number of repeats. Each time the
84 * 2-bit repeat code is 0b11 there is another
85 * repeat.
86 * Avoid UB by setting the high bit to 1.
87 */
Elliot Gorokhovskydb2f4a62022-01-21 11:29:14 -070088 int repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
Nick Terrell6004c112020-08-14 15:28:59 -070089 while (repeats >= 12) {
90 charnum += 3 * 12;
Nick Terrell41936382020-08-25 11:37:41 -070091 if (LIKELY(ip <= iend-7)) {
Nick Terrell6004c112020-08-14 15:28:59 -070092 ip += 3;
inikep63ecd742016-05-13 11:27:56 +020093 } else {
Nick Terrell41936382020-08-25 11:37:41 -070094 bitCount -= (int)(8 * (iend - 7 - ip));
95 bitCount &= 31;
96 ip = iend - 4;
Nick Terrell6004c112020-08-14 15:28:59 -070097 }
Nick Terrell41936382020-08-25 11:37:41 -070098 bitStream = MEM_readLE32(ip) >> bitCount;
Elliot Gorokhovskydb2f4a62022-01-21 11:29:14 -070099 repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
inikep63ecd742016-05-13 11:27:56 +0200100 }
Nick Terrell6004c112020-08-14 15:28:59 -0700101 charnum += 3 * repeats;
102 bitStream >>= 2 * repeats;
103 bitCount += 2 * repeats;
104
Nick Terrell8def0e52020-08-24 12:24:45 -0700105 /* Add the final repeat which isn't 0b11. */
Nick Terrell41936382020-08-25 11:37:41 -0700106 assert((bitStream & 3) < 3);
Nick Terrell6004c112020-08-14 15:28:59 -0700107 charnum += bitStream & 3;
inikep63ecd742016-05-13 11:27:56 +0200108 bitCount += 2;
Nick Terrell6004c112020-08-14 15:28:59 -0700109
110 /* This is an error, but break and return an error
111 * at the end, because returning out of a loop makes
112 * it harder for the compiler to optimize.
113 */
114 if (charnum >= maxSV1) break;
115
116 /* We don't need to set the normalized count to 0
117 * because we already memset the whole buffer to 0.
118 */
119
Nick Terrell41936382020-08-25 11:37:41 -0700120 if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
Nick Terrella97e9a62018-05-23 12:16:00 -0700121 assert((bitCount >> 3) <= 3); /* For first condition to work */
inikep63ecd742016-05-13 11:27:56 +0200122 ip += bitCount>>3;
123 bitCount &= 7;
Yann Collet38b75dd2016-07-24 15:35:59 +0200124 } else {
Nick Terrell41936382020-08-25 11:37:41 -0700125 bitCount -= (int)(8 * (iend - 4 - ip));
126 bitCount &= 31;
127 ip = iend - 4;
Nick Terrell6004c112020-08-14 15:28:59 -0700128 }
Nick Terrell41936382020-08-25 11:37:41 -0700129 bitStream = MEM_readLE32(ip) >> bitCount;
Nick Terrell6004c112020-08-14 15:28:59 -0700130 }
131 {
132 int const max = (2*threshold-1) - remaining;
Yann Collet45960372017-02-15 12:00:03 -0800133 int count;
inikep63ecd742016-05-13 11:27:56 +0200134
135 if ((bitStream & (threshold-1)) < (U32)max) {
Yann Collet45960372017-02-15 12:00:03 -0800136 count = bitStream & (threshold-1);
137 bitCount += nbBits-1;
inikep63ecd742016-05-13 11:27:56 +0200138 } else {
Yann Collet45960372017-02-15 12:00:03 -0800139 count = bitStream & (2*threshold-1);
inikep63ecd742016-05-13 11:27:56 +0200140 if (count >= threshold) count -= max;
Yann Collet45960372017-02-15 12:00:03 -0800141 bitCount += nbBits;
inikep63ecd742016-05-13 11:27:56 +0200142 }
143
144 count--; /* extra accuracy */
Nick Terrell6004c112020-08-14 15:28:59 -0700145 /* When it matters (small blocks), this is a
146 * predictable branch, because we don't use -1.
147 */
148 if (count >= 0) {
149 remaining -= count;
150 } else {
151 assert(count == -1);
152 remaining += count;
153 }
Yann Collet45960372017-02-15 12:00:03 -0800154 normalizedCounter[charnum++] = (short)count;
inikep63ecd742016-05-13 11:27:56 +0200155 previous0 = !count;
inikep63ecd742016-05-13 11:27:56 +0200156
Nick Terrell6004c112020-08-14 15:28:59 -0700157 assert(threshold > 1);
158 if (remaining < threshold) {
159 /* This branch can be folded into the
160 * threshold update condition because we
161 * know that threshold > 1.
162 */
163 if (remaining <= 1) break;
Elliot Gorokhovskydb2f4a62022-01-21 11:29:14 -0700164 nbBits = ZSTD_highbit32(remaining) + 1;
Nick Terrell6004c112020-08-14 15:28:59 -0700165 threshold = 1 << (nbBits - 1);
166 }
167 if (charnum >= maxSV1) break;
168
Nick Terrell41936382020-08-25 11:37:41 -0700169 if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
inikep63ecd742016-05-13 11:27:56 +0200170 ip += bitCount>>3;
171 bitCount &= 7;
172 } else {
173 bitCount -= (int)(8 * (iend - 4 - ip));
Nick Terrell41936382020-08-25 11:37:41 -0700174 bitCount &= 31;
inikep63ecd742016-05-13 11:27:56 +0200175 ip = iend - 4;
176 }
Nick Terrell41936382020-08-25 11:37:41 -0700177 bitStream = MEM_readLE32(ip) >> bitCount;
Nick Terrell6004c112020-08-14 15:28:59 -0700178 } }
Yann Collet38b75dd2016-07-24 15:35:59 +0200179 if (remaining != 1) return ERROR(corruption_detected);
Nick Terrell6004c112020-08-14 15:28:59 -0700180 /* Only possible when there are too many zeros. */
181 if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
Yann Colletcbc5e9d2016-07-24 18:02:04 +0200182 if (bitCount > 32) return ERROR(corruption_detected);
inikep63ecd742016-05-13 11:27:56 +0200183 *maxSVPtr = charnum-1;
184
185 ip += (bitCount+7)>>3;
inikep63ecd742016-05-13 11:27:56 +0200186 return ip-istart;
187}
Yann Colleta91ca622016-06-05 01:33:55 +0200188
Nick Terrell6d2f7502020-08-24 14:44:33 -0700189/* Avoids the FORCE_INLINE of the _body() function. */
Nick Terrell612e9472020-08-17 13:44:49 -0700190static size_t FSE_readNCount_body_default(
191 short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
192 const void* headerBuffer, size_t hbSize)
193{
194 return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
195}
196
197#if DYNAMIC_BMI2
Nick Terrell5414dd72021-11-30 17:43:28 -0800198BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2(
Nick Terrell612e9472020-08-17 13:44:49 -0700199 short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
200 const void* headerBuffer, size_t hbSize)
201{
202 return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
203}
204#endif
205
206size_t FSE_readNCount_bmi2(
207 short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
208 const void* headerBuffer, size_t hbSize, int bmi2)
209{
210#if DYNAMIC_BMI2
211 if (bmi2) {
212 return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
213 }
214#endif
215 (void)bmi2;
216 return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
217}
218
219size_t FSE_readNCount(
220 short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
221 const void* headerBuffer, size_t hbSize)
222{
223 return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
224}
225
226
Yann Colleta91ca622016-06-05 01:33:55 +0200227/*! HUF_readStats() :
228 Read compact Huffman tree, saved by HUF_writeCTable().
229 `huffWeight` is destination buffer.
Yann Colletb89af202016-12-01 18:24:59 -0800230 `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
Yann Colleta91ca622016-06-05 01:33:55 +0200231 @return : size read from `src` , or an error Code .
Yann Collet38b75dd2016-07-24 15:35:59 +0200232 Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
Yann Colleta91ca622016-06-05 01:33:55 +0200233*/
234size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
235 U32* nbSymbolsPtr, U32* tableLogPtr,
236 const void* src, size_t srcSize)
237{
Nick Terrellba1fd172020-08-16 22:22:33 -0700238 U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
Nick Terrell8957fef2023-01-13 16:34:52 -0800239 return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* flags */ 0);
Nick Terrellba1fd172020-08-16 22:22:33 -0700240}
241
Yann Collet68c14bd2020-12-04 16:33:39 -0800242FORCE_INLINE_TEMPLATE size_t
243HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
244 U32* nbSymbolsPtr, U32* tableLogPtr,
245 const void* src, size_t srcSize,
246 void* workSpace, size_t wkspSize,
247 int bmi2)
Nick Terrellba1fd172020-08-16 22:22:33 -0700248{
Yann Colleta91ca622016-06-05 01:33:55 +0200249 U32 weightTotal;
250 const BYTE* ip = (const BYTE*) src;
Nick Terrellccfcc642016-10-17 11:28:02 -0700251 size_t iSize;
Yann Colleta91ca622016-06-05 01:33:55 +0200252 size_t oSize;
253
Nick Terrellccfcc642016-10-17 11:28:02 -0700254 if (!srcSize) return ERROR(srcSize_wrong);
255 iSize = ip[0];
Nick Terrellc465f242020-08-10 12:46:38 -0700256 /* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
Yann Colleta91ca622016-06-05 01:33:55 +0200257
Yann Collet7ed5e332016-07-24 14:26:11 +0200258 if (iSize >= 128) { /* special header */
Yann Collet38b75dd2016-07-24 15:35:59 +0200259 oSize = iSize - 127;
260 iSize = ((oSize+1)/2);
261 if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
262 if (oSize >= hwSize) return ERROR(corruption_detected);
263 ip += 1;
264 { U32 n;
265 for (n=0; n<oSize; n+=2) {
266 huffWeight[n] = ip[n/2] >> 4;
267 huffWeight[n+1] = ip[n/2] & 15;
268 } } }
Yann Colleta91ca622016-06-05 01:33:55 +0200269 else { /* header compressed with FSE (normal case) */
270 if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
Yann Collet68c14bd2020-12-04 16:33:39 -0800271 /* max (hwSize-1) values decoded, as last one is implied */
272 oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
Yann Colleta91ca622016-06-05 01:33:55 +0200273 if (FSE_isError(oSize)) return oSize;
274 }
275
276 /* collect weight stats */
Nick Terrellc465f242020-08-10 12:46:38 -0700277 ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
Yann Colleta91ca622016-06-05 01:33:55 +0200278 weightTotal = 0;
279 { U32 n; for (n=0; n<oSize; n++) {
Nick Terrelld8a07972021-08-02 21:02:31 -0700280 if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
Yann Colleta91ca622016-06-05 01:33:55 +0200281 rankStats[huffWeight[n]]++;
282 weightTotal += (1 << huffWeight[n]) >> 1;
283 } }
Nick Terrelld7605292016-10-19 11:19:54 -0700284 if (weightTotal == 0) return ERROR(corruption_detected);
Yann Colleta91ca622016-06-05 01:33:55 +0200285
286 /* get last non-null symbol weight (implied, total must be 2^n) */
Elliot Gorokhovskydb2f4a62022-01-21 11:29:14 -0700287 { U32 const tableLog = ZSTD_highbit32(weightTotal) + 1;
Yann Colletb89af202016-12-01 18:24:59 -0800288 if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
Yann Colleta91ca622016-06-05 01:33:55 +0200289 *tableLogPtr = tableLog;
290 /* determine last weight */
291 { U32 const total = 1 << tableLog;
292 U32 const rest = total - weightTotal;
Elliot Gorokhovskydb2f4a62022-01-21 11:29:14 -0700293 U32 const verif = 1 << ZSTD_highbit32(rest);
294 U32 const lastWeight = ZSTD_highbit32(rest) + 1;
Yann Colleta91ca622016-06-05 01:33:55 +0200295 if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
296 huffWeight[oSize] = (BYTE)lastWeight;
297 rankStats[lastWeight]++;
298 } }
299
300 /* check tree construction validity */
301 if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
302
303 /* results */
304 *nbSymbolsPtr = (U32)(oSize+1);
305 return iSize+1;
306}
Nick Terrell612e9472020-08-17 13:44:49 -0700307
Nick Terrell6d2f7502020-08-24 14:44:33 -0700308/* Avoids the FORCE_INLINE of the _body() function. */
Nick Terrell612e9472020-08-17 13:44:49 -0700309static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
310 U32* nbSymbolsPtr, U32* tableLogPtr,
311 const void* src, size_t srcSize,
312 void* workSpace, size_t wkspSize)
313{
314 return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
315}
316
317#if DYNAMIC_BMI2
Nick Terrell5414dd72021-11-30 17:43:28 -0800318static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
Nick Terrell612e9472020-08-17 13:44:49 -0700319 U32* nbSymbolsPtr, U32* tableLogPtr,
320 const void* src, size_t srcSize,
321 void* workSpace, size_t wkspSize)
322{
323 return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
324}
325#endif
326
327size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
328 U32* nbSymbolsPtr, U32* tableLogPtr,
329 const void* src, size_t srcSize,
330 void* workSpace, size_t wkspSize,
Nick Terrell8957fef2023-01-13 16:34:52 -0800331 int flags)
Nick Terrell612e9472020-08-17 13:44:49 -0700332{
333#if DYNAMIC_BMI2
Nick Terrell8957fef2023-01-13 16:34:52 -0800334 if (flags & HUF_flags_bmi2) {
Nick Terrell612e9472020-08-17 13:44:49 -0700335 return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
336 }
337#endif
Nick Terrell8957fef2023-01-13 16:34:52 -0800338 (void)flags;
Nick Terrell612e9472020-08-17 13:44:49 -0700339 return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
340}