GV7LUZ3G2NUVQFKZOMYBRGB6AWC667EXD27MGHXC4KAYERGEMAGAC
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_LEGACY_H
#define ZSTD_LEGACY_H
#if defined (__cplusplus)
extern "C" {
#endif
/* *************************************
* Includes
***************************************/
#include "../common/mem.h" /* MEM_STATIC */
#include "../common/error_private.h" /* ERROR */
#include "../common/zstd_internal.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTD_frameSizeInfo */
#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)
# undef ZSTD_LEGACY_SUPPORT
# define ZSTD_LEGACY_SUPPORT 8
#endif
#if (ZSTD_LEGACY_SUPPORT <= 1)
# include "zstd_v01.h"
#endif
#if (ZSTD_LEGACY_SUPPORT <= 2)
# include "zstd_v02.h"
#endif
#if (ZSTD_LEGACY_SUPPORT <= 3)
# include "zstd_v03.h"
#endif
#if (ZSTD_LEGACY_SUPPORT <= 4)
# include "zstd_v04.h"
#endif
#if (ZSTD_LEGACY_SUPPORT <= 5)
# include "zstd_v05.h"
#endif
#if (ZSTD_LEGACY_SUPPORT <= 6)
# include "zstd_v06.h"
#endif
#if (ZSTD_LEGACY_SUPPORT <= 7)
# include "zstd_v07.h"
#endif
/** ZSTD_isLegacy() :
@return : > 0 if supported by legacy decoder. 0 otherwise.
return value is the version.
*/
MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize)
{
U32 magicNumberLE;
if (srcSize<4) return 0;
magicNumberLE = MEM_readLE32(src);
switch(magicNumberLE)
{
#if (ZSTD_LEGACY_SUPPORT <= 1)
case ZSTDv01_magicNumberLE:return 1;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 2)
case ZSTDv02_magicNumber : return 2;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 3)
case ZSTDv03_magicNumber : return 3;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 4)
case ZSTDv04_magicNumber : return 4;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 5)
case ZSTDv05_MAGICNUMBER : return 5;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 6)
case ZSTDv06_MAGICNUMBER : return 6;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 7)
case ZSTDv07_MAGICNUMBER : return 7;
#endif
default : return 0;
}
}
MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize)
{
U32 const version = ZSTD_isLegacy(src, srcSize);
if (version < 5) return 0; /* no decompressed size in frame header, or not a legacy format */
#if (ZSTD_LEGACY_SUPPORT <= 5)
if (version==5) {
ZSTDv05_parameters fParams;
size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize);
if (frResult != 0) return 0;
return fParams.srcSize;
}
#endif
#if (ZSTD_LEGACY_SUPPORT <= 6)
if (version==6) {
ZSTDv06_frameParams fParams;
size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize);
if (frResult != 0) return 0;
return fParams.frameContentSize;
}
#endif
#if (ZSTD_LEGACY_SUPPORT <= 7)
if (version==7) {
ZSTDv07_frameParams fParams;
size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize);
if (frResult != 0) return 0;
return fParams.frameContentSize;
}
#endif
return 0; /* should not be possible */
}
MEM_STATIC size_t ZSTD_decompressLegacy(
void* dst, size_t dstCapacity,
const void* src, size_t compressedSize,
const void* dict,size_t dictSize)
{
U32 const version = ZSTD_isLegacy(src, compressedSize);
(void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */
switch(version)
{
#if (ZSTD_LEGACY_SUPPORT <= 1)
case 1 :
return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize);
#endif
#if (ZSTD_LEGACY_SUPPORT <= 2)
case 2 :
return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize);
#endif
#if (ZSTD_LEGACY_SUPPORT <= 3)
case 3 :
return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize);
#endif
#if (ZSTD_LEGACY_SUPPORT <= 4)
case 4 :
return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize);
#endif
#if (ZSTD_LEGACY_SUPPORT <= 5)
case 5 :
{ size_t result;
ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx();
if (zd==NULL) return ERROR(memory_allocation);
result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
ZSTDv05_freeDCtx(zd);
return result;
}
#endif
#if (ZSTD_LEGACY_SUPPORT <= 6)
case 6 :
{ size_t result;
ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx();
if (zd==NULL) return ERROR(memory_allocation);
result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
ZSTDv06_freeDCtx(zd);
return result;
}
#endif
#if (ZSTD_LEGACY_SUPPORT <= 7)
case 7 :
{ size_t result;
ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx();
if (zd==NULL) return ERROR(memory_allocation);
result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize);
ZSTDv07_freeDCtx(zd);
return result;
}
#endif
default :
return ERROR(prefix_unknown);
}
}
MEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size_t srcSize)
{
ZSTD_frameSizeInfo frameSizeInfo;
U32 const version = ZSTD_isLegacy(src, srcSize);
switch(version)
{
#if (ZSTD_LEGACY_SUPPORT <= 1)
case 1 :
ZSTDv01_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 2)
case 2 :
ZSTDv02_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 3)
case 3 :
ZSTDv03_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 4)
case 4 :
ZSTDv04_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 5)
case 5 :
ZSTDv05_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 6)
case 6 :
ZSTDv06_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
#endif
#if (ZSTD_LEGACY_SUPPORT <= 7)
case 7 :
ZSTDv07_findFrameSizeInfoLegacy(src, srcSize,
&frameSizeInfo.compressedSize,
&frameSizeInfo.decompressedBound);
break;
#endif
default :
frameSizeInfo.compressedSize = ERROR(prefix_unknown);
frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
break;
}
if (!ZSTD_isError(frameSizeInfo.compressedSize) && frameSizeInfo.compressedSize > srcSize) {
frameSizeInfo.compressedSize = ERROR(srcSize_wrong);
frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR;
}
return frameSizeInfo;
}
MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src, size_t srcSize)
{
ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfoLegacy(src, srcSize);
return frameSizeInfo.compressedSize;
}
MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
{
switch(version)
{
default :
case 1 :
case 2 :
case 3 :
(void)legacyContext;
return ERROR(version_unsupported);
#if (ZSTD_LEGACY_SUPPORT <= 4)
case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext);
#endif
#if (ZSTD_LEGACY_SUPPORT <= 5)
case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext);
#endif
#if (ZSTD_LEGACY_SUPPORT <= 6)
case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext);
#endif
#if (ZSTD_LEGACY_SUPPORT <= 7)
case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext);
#endif
}
}
MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion,
const void* dict, size_t dictSize)
{
DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion);
if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion);
switch(newVersion)
{
default :
case 1 :
case 2 :
case 3 :
(void)dict; (void)dictSize;
return 0;
#if (ZSTD_LEGACY_SUPPORT <= 4)
case 4 :
{
ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext;
if (dctx==NULL) return ERROR(memory_allocation);
ZBUFFv04_decompressInit(dctx);
ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize);
*legacyContext = dctx;
return 0;
}
#endif
#if (ZSTD_LEGACY_SUPPORT <= 5)
case 5 :
{
ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext;
if (dctx==NULL) return ERROR(memory_allocation);
ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize);
*legacyContext = dctx;
return 0;
}
#endif
#if (ZSTD_LEGACY_SUPPORT <= 6)
case 6 :
{
ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext;
if (dctx==NULL) return ERROR(memory_allocation);
ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize);
*legacyContext = dctx;
return 0;
}
#endif
#if (ZSTD_LEGACY_SUPPORT <= 7)
case 7 :
{
ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext;
if (dctx==NULL) return ERROR(memory_allocation);
ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize);
*legacyContext = dctx;
return 0;
}
#endif
}
}
MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version);
switch(version)
{
default :
case 1 :
case 2 :
case 3 :
(void)legacyContext; (void)output; (void)input;
return ERROR(version_unsupported);
#if (ZSTD_LEGACY_SUPPORT <= 4)
case 4 :
{
ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext;
const void* src = (const char*)input->src + input->pos;
size_t readSize = input->size - input->pos;
void* dst = (char*)output->dst + output->pos;
size_t decodedSize = output->size - output->pos;
size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
output->pos += decodedSize;
input->pos += readSize;
return hintSize;
}
#endif
#if (ZSTD_LEGACY_SUPPORT <= 5)
case 5 :
{
ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext;
const void* src = (const char*)input->src + input->pos;
size_t readSize = input->size - input->pos;
void* dst = (char*)output->dst + output->pos;
size_t decodedSize = output->size - output->pos;
size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
output->pos += decodedSize;
input->pos += readSize;
return hintSize;
}
#endif
#if (ZSTD_LEGACY_SUPPORT <= 6)
case 6 :
{
ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext;
const void* src = (const char*)input->src + input->pos;
size_t readSize = input->size - input->pos;
void* dst = (char*)output->dst + output->pos;
size_t decodedSize = output->size - output->pos;
size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
output->pos += decodedSize;
input->pos += readSize;
return hintSize;
}
#endif
#if (ZSTD_LEGACY_SUPPORT <= 7)
case 7 :
{
ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext;
const void* src = (const char*)input->src + input->pos;
size_t readSize = input->size - input->pos;
void* dst = (char*)output->dst + output->pos;
size_t decodedSize = output->size - output->pos;
size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize);
output->pos += decodedSize;
input->pos += readSize;
return hintSize;
}
#endif
}
}
#if defined (__cplusplus)
}
#endif
#endif /* ZSTD_LEGACY_H */
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/******************************************
* Includes
******************************************/
#include <stddef.h> /* size_t, ptrdiff_t */
#include "zstd_v01.h"
#include "../common/error_private.h"
/******************************************
* Static allocation
******************************************/
/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
/* You can statically allocate Huff0 DTable as a table of unsigned short using below macro */
#define HUF_DTABLE_SIZE_U16(maxTableLog) (1 + (1<<maxTableLog))
#define HUF_CREATE_STATIC_DTABLE(DTable, maxTableLog) \
unsigned short DTable[HUF_DTABLE_SIZE_U16(maxTableLog)] = { maxTableLog }
/******************************************
* Error Management
******************************************/
#define FSE_LIST_ERRORS(ITEM) \
ITEM(FSE_OK_NoError) ITEM(FSE_ERROR_GENERIC) \
ITEM(FSE_ERROR_tableLog_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooLarge) ITEM(FSE_ERROR_maxSymbolValue_tooSmall) \
ITEM(FSE_ERROR_dstSize_tooSmall) ITEM(FSE_ERROR_srcSize_wrong)\
ITEM(FSE_ERROR_corruptionDetected) \
ITEM(FSE_ERROR_maxCode)
#define FSE_GENERATE_ENUM(ENUM) ENUM,
typedef enum { FSE_LIST_ERRORS(FSE_GENERATE_ENUM) } FSE_errorCodes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
/******************************************
* FSE symbol compression API
******************************************/
/*
This API consists of small unitary functions, which highly benefit from being inlined.
You will want to enable link-time-optimization to ensure these functions are properly inlined in your binary.
Visual seems to do it automatically.
For gcc or clang, you'll need to add -flto flag at compilation and linking stages.
If none of these solutions is applicable, include "fse.c" directly.
*/
typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
typedef struct
{
size_t bitContainer;
int bitPos;
char* startPtr;
char* ptr;
char* endPtr;
} FSE_CStream_t;
typedef struct
{
ptrdiff_t value;
const void* stateTable;
const void* symbolTT;
unsigned stateLog;
} FSE_CState_t;
typedef struct
{
size_t bitContainer;
unsigned bitsConsumed;
const char* ptr;
const char* start;
} FSE_DStream_t;
typedef struct
{
size_t state;
const void* table; /* precise table may vary, depending on U16 */
} FSE_DState_t;
typedef enum { FSE_DStream_unfinished = 0,
FSE_DStream_endOfBuffer = 1,
FSE_DStream_completed = 2,
FSE_DStream_tooFar = 3 } FSE_DStream_status; /* result of FSE_reloadDStream() */
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... ?! */
/****************************************************************
* Tuning parameters
****************************************************************/
/* MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
#define FSE_MAX_MEMORY_USAGE 14
#define FSE_DEFAULT_MEMORY_USAGE 13
/* FSE_MAX_SYMBOL_VALUE :
* Maximum symbol value authorized.
* Required for proper stack allocation */
#define FSE_MAX_SYMBOL_VALUE 255
/****************************************************************
* template functions type & suffix
****************************************************************/
#define FSE_FUNCTION_TYPE BYTE
#define FSE_FUNCTION_EXTENSION
/****************************************************************
* Byte symbol type
****************************************************************/
typedef struct
{
unsigned short newState;
unsigned char symbol;
unsigned char nbBits;
} FSE_decode_t; /* size == U32 */
/****************************************************************
* Compiler specifics
****************************************************************/
#ifdef _MSC_VER /* Visual Studio */
# define FORCE_INLINE static __forceinline
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
#else
# define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline))
# else
# define FORCE_INLINE static inline
# endif
# else
# define FORCE_INLINE static
# endif /* __STDC_VERSION__ */
#endif
/****************************************************************
* Includes
****************************************************************/
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
#ifndef MEM_ACCESS_MODULE
#define MEM_ACCESS_MODULE
/****************************************************************
* Basic Types
*****************************************************************/
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# include <stdint.h>
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef signed short S16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
typedef signed long long S64;
#endif
#endif /* MEM_ACCESS_MODULE */
/****************************************************************
* Memory I/O
*****************************************************************/
/* FSE_FORCE_MEMORY_ACCESS
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets generating assembly depending on alignment.
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define FSE_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
# define FSE_FORCE_MEMORY_ACCESS 1
# endif
#endif
static unsigned FSE_32bits(void)
{
return sizeof(void*)==4;
}
static unsigned FSE_isLittleEndian(void)
{
const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2)
static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; }
static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; }
static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; }
#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1)
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
#else
static U16 FSE_read16(const void* memPtr)
{
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
static U32 FSE_read32(const void* memPtr)
{
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
static U64 FSE_read64(const void* memPtr)
{
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
#endif /* FSE_FORCE_MEMORY_ACCESS */
static U16 FSE_readLE16(const void* memPtr)
{
if (FSE_isLittleEndian())
return FSE_read16(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U16)(p[0] + (p[1]<<8));
}
}
static U32 FSE_readLE32(const void* memPtr)
{
if (FSE_isLittleEndian())
return FSE_read32(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
}
}
static U64 FSE_readLE64(const void* memPtr)
{
if (FSE_isLittleEndian())
return FSE_read64(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+ ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
}
}
static size_t FSE_readLEST(const void* memPtr)
{
if (FSE_32bits())
return (size_t)FSE_readLE32(memPtr);
else
return (size_t)FSE_readLE64(memPtr);
}
/****************************************************************
* Constants
*****************************************************************/
#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
#define FSE_MIN_TABLELOG 5
#define FSE_TABLELOG_ABSOLUTE_MAX 15
#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
#endif
/****************************************************************
* Error Management
****************************************************************/
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/****************************************************************
* Complex types
****************************************************************/
typedef struct
{
int deltaFindState;
U32 deltaNbBits;
} FSE_symbolCompressionTransform; /* total 8 bytes */
typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
/****************************************************************
* Internal functions
****************************************************************/
FORCE_INLINE unsigned FSE_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r;
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
unsigned r;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
return r;
# endif
}
/****************************************************************
* Templates
****************************************************************/
/*
designed to be included
for type-specific functions (template emulation in C)
Objective is to write these functions only once, for improved maintenance
*/
/* safety checks */
#ifndef FSE_FUNCTION_EXTENSION
# error "FSE_FUNCTION_EXTENSION must be defined"
#endif
#ifndef FSE_FUNCTION_TYPE
# error "FSE_FUNCTION_TYPE must be defined"
#endif
/* Function names */
#define FSE_CAT(X,Y) X##Y
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
#define FSE_DECODE_TYPE FSE_decode_t
typedef struct {
U16 tableLog;
U16 fastMode;
} FSE_DTableHeader; /* sizeof U32 */
static size_t FSE_buildDTable
(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)(ptr) + 1; /* because dt is unsigned, 32-bits aligned on 32-bits */
const U32 tableSize = 1 << tableLog;
const U32 tableMask = tableSize-1;
const U32 step = FSE_tableStep(tableSize);
U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
U32 position = 0;
U32 highThreshold = tableSize-1;
const S16 largeLimit= (S16)(1 << (tableLog-1));
U32 noLarge = 1;
U32 s;
/* Sanity Checks */
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return (size_t)-FSE_ERROR_maxSymbolValue_tooLarge;
if (tableLog > FSE_MAX_TABLELOG) return (size_t)-FSE_ERROR_tableLog_tooLarge;
/* Init, lay down lowprob symbols */
DTableH[0].tableLog = (U16)tableLog;
for (s=0; s<=maxSymbolValue; s++)
{
if (normalizedCounter[s]==-1)
{
tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
symbolNext[s] = 1;
}
else
{
if (normalizedCounter[s] >= largeLimit) noLarge=0;
symbolNext[s] = normalizedCounter[s];
}
}
/* Spread symbols */
for (s=0; s<=maxSymbolValue; s++)
{
int i;
for (i=0; i<normalizedCounter[s]; i++)
{
tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
}
}
if (position!=0) return (size_t)-FSE_ERROR_GENERIC; /* position must reach all cells once, otherwise normalizedCounter is incorrect */
/* Build Decoding table */
{
U32 i;
for (i=0; i<tableSize; i++)
{
FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
U16 nextState = symbolNext[symbol]++;
tableDecode[i].nbBits = (BYTE) (tableLog - FSE_highbit32 ((U32)nextState) );
tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
}
}
DTableH->fastMode = (U16)noLarge;
return 0;
}
/******************************************
* FSE byte symbol
******************************************/
#ifndef FSE_COMMONDEFS_ONLY
static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_maxCode)); }
static short FSE_abs(short a)
{
return a<0? -a : a;
}
/****************************************************************
* Header bitstream management
****************************************************************/
static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
const BYTE* const istart = (const BYTE*) headerBuffer;
const BYTE* const iend = istart + hbSize;
const BYTE* ip = istart;
int nbBits;
int remaining;
int threshold;
U32 bitStream;
int bitCount;
unsigned charnum = 0;
int previous0 = 0;
if (hbSize < 4) return (size_t)-FSE_ERROR_srcSize_wrong;
bitStream = FSE_readLE32(ip);
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return (size_t)-FSE_ERROR_tableLog_tooLarge;
bitStream >>= 4;
bitCount = 4;
*tableLogPtr = nbBits;
remaining = (1<<nbBits)+1;
threshold = 1<<nbBits;
nbBits++;
while ((remaining>1) && (charnum<=*maxSVPtr))
{
if (previous0)
{
unsigned n0 = charnum;
while ((bitStream & 0xFFFF) == 0xFFFF)
{
n0+=24;
if (ip < iend-5)
{
ip+=2;
bitStream = FSE_readLE32(ip) >> bitCount;
}
else
{
bitStream >>= 16;
bitCount+=16;
}
}
while ((bitStream & 3) == 3)
{
n0+=3;
bitStream>>=2;
bitCount+=2;
}
n0 += bitStream & 3;
bitCount += 2;
if (n0 > *maxSVPtr) return (size_t)-FSE_ERROR_maxSymbolValue_tooSmall;
while (charnum < n0) normalizedCounter[charnum++] = 0;
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
{
ip += bitCount>>3;
bitCount &= 7;
bitStream = FSE_readLE32(ip) >> bitCount;
}
else
bitStream >>= 2;
}
{
const short max = (short)((2*threshold-1)-remaining);
short count;
if ((bitStream & (threshold-1)) < (U32)max)
{
count = (short)(bitStream & (threshold-1));
bitCount += nbBits-1;
}
else
{
count = (short)(bitStream & (2*threshold-1));
if (count >= threshold) count -= max;
bitCount += nbBits;
}
count--; /* extra accuracy */
remaining -= FSE_abs(count);
normalizedCounter[charnum++] = count;
previous0 = !count;
while (remaining < threshold)
{
nbBits--;
threshold >>= 1;
}
{
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
{
ip += bitCount>>3;
bitCount &= 7;
}
else
{
bitCount -= (int)(8 * (iend - 4 - ip));
ip = iend - 4;
}
bitStream = FSE_readLE32(ip) >> (bitCount & 31);
}
}
}
if (remaining != 1) return (size_t)-FSE_ERROR_GENERIC;
*maxSVPtr = charnum-1;
ip += (bitCount+7)>>3;
if ((size_t)(ip-istart) > hbSize) return (size_t)-FSE_ERROR_srcSize_wrong;
return ip-istart;
}
/*********************************************************
* Decompression (Byte symbols)
*********************************************************/
static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
DTableH->tableLog = 0;
DTableH->fastMode = 0;
cell->newState = 0;
cell->symbol = symbolValue;
cell->nbBits = 0;
return 0;
}
static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSymbolValue = tableMask;
unsigned s;
/* Sanity checks */
if (nbBits < 1) return (size_t)-FSE_ERROR_GENERIC; /* min size */
/* Build Decoding Table */
DTableH->tableLog = (U16)nbBits;
DTableH->fastMode = 1;
for (s=0; s<=maxSymbolValue; s++)
{
dinfo[s].newState = 0;
dinfo[s].symbol = (BYTE)s;
dinfo[s].nbBits = (BYTE)nbBits;
}
return 0;
}
/* FSE_initDStream
* Initialize a FSE_DStream_t.
* srcBuffer must point at the beginning of an FSE block.
* The function result is the size of the FSE_block (== srcSize).
* If srcSize is too small, the function will return an errorCode;
*/
static size_t FSE_initDStream(FSE_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
if (srcSize < 1) return (size_t)-FSE_ERROR_srcSize_wrong;
if (srcSize >= sizeof(size_t))
{
U32 contain32;
bitD->start = (const char*)srcBuffer;
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
bitD->bitContainer = FSE_readLEST(bitD->ptr);
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */
bitD->bitsConsumed = 8 - FSE_highbit32(contain32);
}
else
{
U32 contain32;
bitD->start = (const char*)srcBuffer;
bitD->ptr = bitD->start;
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
/* fallthrough */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
/* fallthrough */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
/* fallthrough */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
/* fallthrough */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
/* fallthrough */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8;
/* fallthrough */
default:;
}
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */
bitD->bitsConsumed = 8 - FSE_highbit32(contain32);
bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
}
return srcSize;
}
/*!FSE_lookBits
* Provides next n bits from the bitContainer.
* bitContainer is not modified (bits are still present for next read/look)
* On 32-bits, maxNbBits==25
* On 64-bits, maxNbBits==57
* return : value extracted.
*/
static size_t FSE_lookBits(FSE_DStream_t* bitD, U32 nbBits)
{
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
}
static size_t FSE_lookBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */
{
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
}
static void FSE_skipBits(FSE_DStream_t* bitD, U32 nbBits)
{
bitD->bitsConsumed += nbBits;
}
/*!FSE_readBits
* Read next n bits from the bitContainer.
* On 32-bits, don't read more than maxNbBits==25
* On 64-bits, don't read more than maxNbBits==57
* Use the fast variant *only* if n >= 1.
* return : value extracted.
*/
static size_t FSE_readBits(FSE_DStream_t* bitD, U32 nbBits)
{
size_t value = FSE_lookBits(bitD, nbBits);
FSE_skipBits(bitD, nbBits);
return value;
}
static size_t FSE_readBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */
{
size_t value = FSE_lookBitsFast(bitD, nbBits);
FSE_skipBits(bitD, nbBits);
return value;
}
static unsigned FSE_reloadDStream(FSE_DStream_t* bitD)
{
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
return FSE_DStream_tooFar;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
{
bitD->ptr -= bitD->bitsConsumed >> 3;
bitD->bitsConsumed &= 7;
bitD->bitContainer = FSE_readLEST(bitD->ptr);
return FSE_DStream_unfinished;
}
if (bitD->ptr == bitD->start)
{
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return FSE_DStream_endOfBuffer;
return FSE_DStream_completed;
}
{
U32 nbBytes = bitD->bitsConsumed >> 3;
U32 result = FSE_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start)
{
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
result = FSE_DStream_endOfBuffer;
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes*8;
bitD->bitContainer = FSE_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
return result;
}
}
static void FSE_initDState(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD, const FSE_DTable* dt)
{
const void* ptr = dt;
const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
DStatePtr->state = FSE_readBits(bitD, DTableH->tableLog);
FSE_reloadDStream(bitD);
DStatePtr->table = dt + 1;
}
static BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)
{
const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
const U32 nbBits = DInfo.nbBits;
BYTE symbol = DInfo.symbol;
size_t lowBits = FSE_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
static BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD)
{
const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
const U32 nbBits = DInfo.nbBits;
BYTE symbol = DInfo.symbol;
size_t lowBits = FSE_readBitsFast(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
/* FSE_endOfDStream
Tells if bitD has reached end of bitStream or not */
static unsigned FSE_endOfDStream(const FSE_DStream_t* bitD)
{
return ((bitD->ptr == bitD->start) && (bitD->bitsConsumed == sizeof(bitD->bitContainer)*8));
}
static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
{
return DStatePtr->state == 0;
}
FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt, const unsigned fast)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const omax = op + maxDstSize;
BYTE* const olimit = omax-3;
FSE_DStream_t bitD;
FSE_DState_t state1;
FSE_DState_t state2;
size_t errorCode;
/* Init */
errorCode = FSE_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
if (FSE_isError(errorCode)) return errorCode;
FSE_initDState(&state1, &bitD, dt);
FSE_initDState(&state2, &bitD, dt);
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
/* 4 symbols per loop */
for ( ; (FSE_reloadDStream(&bitD)==FSE_DStream_unfinished) && (op<olimit) ; op+=4)
{
op[0] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
FSE_reloadDStream(&bitD);
op[1] = FSE_GETSYMBOL(&state2);
if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
{ if (FSE_reloadDStream(&bitD) > FSE_DStream_unfinished) { op+=2; break; } }
op[2] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
FSE_reloadDStream(&bitD);
op[3] = FSE_GETSYMBOL(&state2);
}
/* tail */
/* note : FSE_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly FSE_DStream_completed */
while (1)
{
if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
break;
*op++ = FSE_GETSYMBOL(&state1);
if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
break;
*op++ = FSE_GETSYMBOL(&state2);
}
/* end ? */
if (FSE_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
return op-ostart;
if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall; /* dst buffer is full, but cSrc unfinished */
return (size_t)-FSE_ERROR_corruptionDetected;
}
static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt)
{
FSE_DTableHeader DTableH;
memcpy(&DTableH, dt, sizeof(DTableH)); /* memcpy() into local variable, to avoid strict aliasing warning */
/* select fast mode (static) */
if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
}
static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* const istart = (const BYTE*)cSrc;
const BYTE* ip = istart;
short counting[FSE_MAX_SYMBOL_VALUE+1];
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
unsigned tableLog;
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
size_t errorCode;
if (cSrcSize<2) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */
/* normal FSE decoding mode */
errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
if (FSE_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */
ip += errorCode;
cSrcSize -= errorCode;
errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
if (FSE_isError(errorCode)) return errorCode;
/* always return, even if it is an error code */
return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
}
/* *******************************************************
* Huff0 : Huffman block compression
*********************************************************/
#define HUF_MAX_SYMBOL_VALUE 255
#define HUF_DEFAULT_TABLELOG 12 /* used by default, when not specified */
#define HUF_MAX_TABLELOG 12 /* max possible tableLog; for allocation purpose; can be modified */
#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
# error "HUF_MAX_TABLELOG is too large !"
#endif
typedef struct HUF_CElt_s {
U16 val;
BYTE nbBits;
} HUF_CElt ;
typedef struct nodeElt_s {
U32 count;
U16 parent;
BYTE byte;
BYTE nbBits;
} nodeElt;
/* *******************************************************
* Huff0 : Huffman block decompression
*********************************************************/
typedef struct {
BYTE byte;
BYTE nbBits;
} HUF_DElt;
static size_t HUF_readDTable (U16* DTable, const void* src, size_t srcSize)
{
BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
U32 weightTotal;
U32 maxBits;
const BYTE* ip = (const BYTE*) src;
size_t iSize;
size_t oSize;
U32 n;
U32 nextRankStart;
void* ptr = DTable+1;
HUF_DElt* const dt = (HUF_DElt*)ptr;
if (!srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
iSize = ip[0];
FSE_STATIC_ASSERT(sizeof(HUF_DElt) == sizeof(U16)); /* if compilation fails here, assertion is false */
//memset(huffWeight, 0, sizeof(huffWeight)); /* should not be necessary, but some analyzer complain ... */
if (iSize >= 128) /* special header */
{
if (iSize >= (242)) /* RLE */
{
static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
oSize = l[iSize-242];
memset(huffWeight, 1, sizeof(huffWeight));
iSize = 0;
}
else /* Incompressible */
{
oSize = iSize - 127;
iSize = ((oSize+1)/2);
if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
ip += 1;
for (n=0; n<oSize; n+=2)
{
huffWeight[n] = ip[n/2] >> 4;
huffWeight[n+1] = ip[n/2] & 15;
}
}
}
else /* header compressed with FSE (normal case) */
{
if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
oSize = FSE_decompress(huffWeight, HUF_MAX_SYMBOL_VALUE, ip+1, iSize); /* max 255 values decoded, last one is implied */
if (FSE_isError(oSize)) return oSize;
}
/* collect weight stats */
memset(rankVal, 0, sizeof(rankVal));
weightTotal = 0;
for (n=0; n<oSize; n++)
{
if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return (size_t)-FSE_ERROR_corruptionDetected;
rankVal[huffWeight[n]]++;
weightTotal += (1 << huffWeight[n]) >> 1;
}
if (weightTotal == 0) return (size_t)-FSE_ERROR_corruptionDetected;
/* get last non-null symbol weight (implied, total must be 2^n) */
maxBits = FSE_highbit32(weightTotal) + 1;
if (maxBits > DTable[0]) return (size_t)-FSE_ERROR_tableLog_tooLarge; /* DTable is too small */
DTable[0] = (U16)maxBits;
{
U32 total = 1 << maxBits;
U32 rest = total - weightTotal;
U32 verif = 1 << FSE_highbit32(rest);
U32 lastWeight = FSE_highbit32(rest) + 1;
if (verif != rest) return (size_t)-FSE_ERROR_corruptionDetected; /* last value must be a clean power of 2 */
huffWeight[oSize] = (BYTE)lastWeight;
rankVal[lastWeight]++;
}
/* check tree construction validity */
if ((rankVal[1] < 2) || (rankVal[1] & 1)) return (size_t)-FSE_ERROR_corruptionDetected; /* by construction : at least 2 elts of rank 1, must be even */
/* Prepare ranks */
nextRankStart = 0;
for (n=1; n<=maxBits; n++)
{
U32 current = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = current;
}
/* fill DTable */
for (n=0; n<=oSize; n++)
{
const U32 w = huffWeight[n];
const U32 length = (1 << w) >> 1;
U32 i;
HUF_DElt D;
D.byte = (BYTE)n; D.nbBits = (BYTE)(maxBits + 1 - w);
for (i = rankVal[w]; i < rankVal[w] + length; i++)
dt[i] = D;
rankVal[w] += length;
}
return iSize+1;
}
static BYTE HUF_decodeSymbol(FSE_DStream_t* Dstream, const HUF_DElt* dt, const U32 dtLog)
{
const size_t val = FSE_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
const BYTE c = dt[val].byte;
FSE_skipBits(Dstream, dt[val].nbBits);
return c;
}
static size_t HUF_decompress_usingDTable( /* -3% slower when non static */
void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const U16* DTable)
{
if (cSrcSize < 6) return (size_t)-FSE_ERROR_srcSize_wrong;
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const omax = op + maxDstSize;
BYTE* const olimit = maxDstSize < 15 ? op : omax-15;
const void* ptr = DTable;
const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1;
const U32 dtLog = DTable[0];
size_t errorCode;
U32 reloadStatus;
/* Init */
const U16* jumpTable = (const U16*)cSrc;
const size_t length1 = FSE_readLE16(jumpTable);
const size_t length2 = FSE_readLE16(jumpTable+1);
const size_t length3 = FSE_readLE16(jumpTable+2);
const size_t length4 = cSrcSize - 6 - length1 - length2 - length3; /* check coherency !! */
const char* const start1 = (const char*)(cSrc) + 6;
const char* const start2 = start1 + length1;
const char* const start3 = start2 + length2;
const char* const start4 = start3 + length3;
FSE_DStream_t bitD1, bitD2, bitD3, bitD4;
if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
errorCode = FSE_initDStream(&bitD1, start1, length1);
if (FSE_isError(errorCode)) return errorCode;
errorCode = FSE_initDStream(&bitD2, start2, length2);
if (FSE_isError(errorCode)) return errorCode;
errorCode = FSE_initDStream(&bitD3, start3, length3);
if (FSE_isError(errorCode)) return errorCode;
errorCode = FSE_initDStream(&bitD4, start4, length4);
if (FSE_isError(errorCode)) return errorCode;
reloadStatus=FSE_reloadDStream(&bitD2);
/* 16 symbols per loop */
for ( ; (reloadStatus<FSE_DStream_completed) && (op<olimit); /* D2-3-4 are supposed to be synchronized and finish together */
op+=16, reloadStatus = FSE_reloadDStream(&bitD2) | FSE_reloadDStream(&bitD3) | FSE_reloadDStream(&bitD4), FSE_reloadDStream(&bitD1))
{
#define HUF_DECODE_SYMBOL_0(n, Dstream) \
op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog);
#define HUF_DECODE_SYMBOL_1(n, Dstream) \
op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
if (FSE_32bits() && (HUF_MAX_TABLELOG>12)) FSE_reloadDStream(&Dstream)
#define HUF_DECODE_SYMBOL_2(n, Dstream) \
op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \
if (FSE_32bits()) FSE_reloadDStream(&Dstream)
HUF_DECODE_SYMBOL_1( 0, bitD1);
HUF_DECODE_SYMBOL_1( 1, bitD2);
HUF_DECODE_SYMBOL_1( 2, bitD3);
HUF_DECODE_SYMBOL_1( 3, bitD4);
HUF_DECODE_SYMBOL_2( 4, bitD1);
HUF_DECODE_SYMBOL_2( 5, bitD2);
HUF_DECODE_SYMBOL_2( 6, bitD3);
HUF_DECODE_SYMBOL_2( 7, bitD4);
HUF_DECODE_SYMBOL_1( 8, bitD1);
HUF_DECODE_SYMBOL_1( 9, bitD2);
HUF_DECODE_SYMBOL_1(10, bitD3);
HUF_DECODE_SYMBOL_1(11, bitD4);
HUF_DECODE_SYMBOL_0(12, bitD1);
HUF_DECODE_SYMBOL_0(13, bitD2);
HUF_DECODE_SYMBOL_0(14, bitD3);
HUF_DECODE_SYMBOL_0(15, bitD4);
}
if (reloadStatus!=FSE_DStream_completed) /* not complete : some bitStream might be FSE_DStream_unfinished */
return (size_t)-FSE_ERROR_corruptionDetected;
/* tail */
{
/* bitTail = bitD1; */ /* *much* slower : -20% !??! */
FSE_DStream_t bitTail;
bitTail.ptr = bitD1.ptr;
bitTail.bitsConsumed = bitD1.bitsConsumed;
bitTail.bitContainer = bitD1.bitContainer; /* required in case of FSE_DStream_endOfBuffer */
bitTail.start = start1;
for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op<omax) ; op++)
{
HUF_DECODE_SYMBOL_0(0, bitTail);
}
if (FSE_endOfDStream(&bitTail))
return op-ostart;
}
if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall; /* dst buffer is full, but cSrc unfinished */
return (size_t)-FSE_ERROR_corruptionDetected;
}
}
static size_t HUF_decompress (void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLE(DTable, HUF_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t errorCode;
errorCode = HUF_readDTable (DTable, cSrc, cSrcSize);
if (FSE_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong;
ip += errorCode;
cSrcSize -= errorCode;
return HUF_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, DTable);
}
#endif /* FSE_COMMONDEFS_ONLY */
/*
zstd - standard compression library
Copyright (C) 2014-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
- ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
*/
/****************************************************************
* Tuning parameters
*****************************************************************/
/* MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect */
#define ZSTD_MEMORY_USAGE 17
/**************************************
CPU Feature Detection
**************************************/
/*
* Automated efficient unaligned memory access detection
* Based on known hardware architectures
* This list will be updated thanks to feedbacks
*/
#if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \
|| defined(__ARM_FEATURE_UNALIGNED) \
|| defined(__i386__) || defined(__x86_64__) \
|| defined(_M_IX86) || defined(_M_X64) \
|| defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \
|| (defined(_M_ARM) && (_M_ARM >= 7))
# define ZSTD_UNALIGNED_ACCESS 1
#else
# define ZSTD_UNALIGNED_ACCESS 0
#endif
/********************************************************
* Includes
*********************************************************/
#include <stdlib.h> /* calloc */
#include <string.h> /* memcpy, memmove */
#include <stdio.h> /* debug : printf */
/********************************************************
* Compiler specifics
*********************************************************/
#ifdef __AVX2__
# include <immintrin.h> /* AVX2 intrinsics */
#endif
#ifdef _MSC_VER /* Visual Studio */
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
#endif
#ifndef MEM_ACCESS_MODULE
#define MEM_ACCESS_MODULE
/********************************************************
* Basic Types
*********************************************************/
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# if defined(_AIX)
# include <inttypes.h>
# else
# include <stdint.h> /* intptr_t */
# endif
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef signed short S16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
#endif
#endif /* MEM_ACCESS_MODULE */
/********************************************************
* Constants
*********************************************************/
static const U32 ZSTD_magicNumber = 0xFD2FB51E; /* 3rd version : seqNb header */
#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
#define HASH_TABLESIZE (1 << HASH_LOG)
#define HASH_MASK (HASH_TABLESIZE - 1)
#define KNUTH 2654435761
#define BIT7 128
#define BIT6 64
#define BIT5 32
#define BIT4 16
#define KB *(1 <<10)
#define MB *(1 <<20)
#define GB *(1U<<30)
#define BLOCKSIZE (128 KB) /* define, for static allocation */
#define WORKPLACESIZE (BLOCKSIZE*3)
#define MINMATCH 4
#define MLbits 7
#define LLbits 6
#define Offbits 5
#define MaxML ((1<<MLbits )-1)
#define MaxLL ((1<<LLbits )-1)
#define MaxOff ((1<<Offbits)-1)
#define LitFSELog 11
#define MLFSELog 10
#define LLFSELog 10
#define OffFSELog 9
#define MAX(a,b) ((a)<(b)?(b):(a))
#define MaxSeq MAX(MaxLL, MaxML)
#define LITERAL_NOENTROPY 63
#define COMMAND_NOENTROPY 7 /* to remove */
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
static const size_t ZSTD_blockHeaderSize = 3;
static const size_t ZSTD_frameHeaderSize = 4;
/********************************************************
* Memory operations
*********************************************************/
static unsigned ZSTD_32bits(void) { return sizeof(void*)==4; }
static unsigned ZSTD_isLittleEndian(void)
{
const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
static U16 ZSTD_read16(const void* p) { U16 r; memcpy(&r, p, sizeof(r)); return r; }
static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
{
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
while (op < oend) COPY8(op, ip);
}
static U16 ZSTD_readLE16(const void* memPtr)
{
if (ZSTD_isLittleEndian()) return ZSTD_read16(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U16)((U16)p[0] + ((U16)p[1]<<8));
}
}
static U32 ZSTD_readLE24(const void* memPtr)
{
return ZSTD_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
}
static U32 ZSTD_readBE32(const void* memPtr)
{
const BYTE* p = (const BYTE*)memPtr;
return (U32)(((U32)p[0]<<24) + ((U32)p[1]<<16) + ((U32)p[2]<<8) + ((U32)p[3]<<0));
}
/**************************************
* Local structures
***************************************/
typedef struct ZSTD_Cctx_s ZSTD_Cctx;
typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
typedef struct
{
blockType_t blockType;
U32 origSize;
} blockProperties_t;
typedef struct {
void* buffer;
U32* offsetStart;
U32* offset;
BYTE* offCodeStart;
BYTE* offCode;
BYTE* litStart;
BYTE* lit;
BYTE* litLengthStart;
BYTE* litLength;
BYTE* matchLengthStart;
BYTE* matchLength;
BYTE* dumpsStart;
BYTE* dumps;
} seqStore_t;
typedef struct ZSTD_Cctx_s
{
const BYTE* base;
U32 current;
U32 nextUpdate;
seqStore_t seqStore;
#ifdef __AVX2__
__m256i hashTable[HASH_TABLESIZE>>3];
#else
U32 hashTable[HASH_TABLESIZE];
#endif
BYTE buffer[WORKPLACESIZE];
} cctxi_t;
/**************************************
* Error Management
**************************************/
/* published entry point */
unsigned ZSTDv01_isError(size_t code) { return ERR_isError(code); }
/**************************************
* Tool functions
**************************************/
#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */
#define ZSTD_VERSION_MINOR 1 /* for new (non-breaking) interface capabilities */
#define ZSTD_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
/**************************************************************
* Decompression code
**************************************************************/
static size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
{
const BYTE* const in = (const BYTE* const)src;
BYTE headerFlags;
U32 cSize;
if (srcSize < 3) return ERROR(srcSize_wrong);
headerFlags = *in;
cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
bpPtr->blockType = (blockType_t)(headerFlags >> 6);
bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
if (bpPtr->blockType == bt_end) return 0;
if (bpPtr->blockType == bt_rle) return 1;
return cSize;
}
static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
if (srcSize > 0) {
memcpy(dst, src, srcSize);
}
return srcSize;
}
static size_t ZSTD_decompressLiterals(void* ctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize)
{
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + maxDstSize;
const BYTE* ip = (const BYTE*)src;
size_t errorCode;
size_t litSize;
/* check : minimum 2, for litSize, +1, for content */
if (srcSize <= 3) return ERROR(corruption_detected);
litSize = ip[1] + (ip[0]<<8);
litSize += ((ip[-3] >> 3) & 7) << 16; /* mmmmh.... */
op = oend - litSize;
(void)ctx;
if (litSize > maxDstSize) return ERROR(dstSize_tooSmall);
errorCode = HUF_decompress(op, litSize, ip+2, srcSize-2);
if (FSE_isError(errorCode)) return ERROR(GENERIC);
return litSize;
}
static size_t ZSTDv01_decodeLiteralsBlock(void* ctx,
void* dst, size_t maxDstSize,
const BYTE** litStart, size_t* litSize,
const void* src, size_t srcSize)
{
const BYTE* const istart = (const BYTE* const)src;
const BYTE* ip = istart;
BYTE* const ostart = (BYTE* const)dst;
BYTE* const oend = ostart + maxDstSize;
blockProperties_t litbp;
size_t litcSize = ZSTDv01_getcBlockSize(src, srcSize, &litbp);
if (ZSTDv01_isError(litcSize)) return litcSize;
if (litcSize > srcSize - ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
ip += ZSTD_blockHeaderSize;
switch(litbp.blockType)
{
case bt_raw:
*litStart = ip;
ip += litcSize;
*litSize = litcSize;
break;
case bt_rle:
{
size_t rleSize = litbp.origSize;
if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall);
if (!srcSize) return ERROR(srcSize_wrong);
if (rleSize > 0) {
memset(oend - rleSize, *ip, rleSize);
}
*litStart = oend - rleSize;
*litSize = rleSize;
ip++;
break;
}
case bt_compressed:
{
size_t decodedLitSize = ZSTD_decompressLiterals(ctx, dst, maxDstSize, ip, litcSize);
if (ZSTDv01_isError(decodedLitSize)) return decodedLitSize;
*litStart = oend - decodedLitSize;
*litSize = decodedLitSize;
ip += litcSize;
break;
}
case bt_end:
default:
return ERROR(GENERIC);
}
return ip-istart;
}
static size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
const void* src, size_t srcSize)
{
const BYTE* const istart = (const BYTE* const)src;
const BYTE* ip = istart;
const BYTE* const iend = istart + srcSize;
U32 LLtype, Offtype, MLtype;
U32 LLlog, Offlog, MLlog;
size_t dumpsLength;
/* check */
if (srcSize < 5) return ERROR(srcSize_wrong);
/* SeqHead */
*nbSeq = ZSTD_readLE16(ip); ip+=2;
LLtype = *ip >> 6;
Offtype = (*ip >> 4) & 3;
MLtype = (*ip >> 2) & 3;
if (*ip & 2)
{
dumpsLength = ip[2];
dumpsLength += ip[1] << 8;
ip += 3;
}
else
{
dumpsLength = ip[1];
dumpsLength += (ip[0] & 1) << 8;
ip += 2;
}
*dumpsPtr = ip;
ip += dumpsLength;
*dumpsLengthPtr = dumpsLength;
/* check */
if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
/* sequences */
{
S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */
size_t headerSize;
/* Build DTables */
switch(LLtype)
{
case bt_rle :
LLlog = 0;
FSE_buildDTable_rle(DTableLL, *ip++); break;
case bt_raw :
LLlog = LLbits;
FSE_buildDTable_raw(DTableLL, LLbits); break;
default :
{ U32 max = MaxLL;
headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (LLlog > LLFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableLL, norm, max, LLlog);
} }
switch(Offtype)
{
case bt_rle :
Offlog = 0;
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
FSE_buildDTable_rle(DTableOffb, *ip++); break;
case bt_raw :
Offlog = Offbits;
FSE_buildDTable_raw(DTableOffb, Offbits); break;
default :
{ U32 max = MaxOff;
headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (Offlog > OffFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableOffb, norm, max, Offlog);
} }
switch(MLtype)
{
case bt_rle :
MLlog = 0;
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
FSE_buildDTable_rle(DTableML, *ip++); break;
case bt_raw :
MLlog = MLbits;
FSE_buildDTable_raw(DTableML, MLbits); break;
default :
{ U32 max = MaxML;
headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (MLlog > MLFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableML, norm, max, MLlog);
} } }
return ip-istart;
}
typedef struct {
size_t litLength;
size_t offset;
size_t matchLength;
} seq_t;
typedef struct {
FSE_DStream_t DStream;
FSE_DState_t stateLL;
FSE_DState_t stateOffb;
FSE_DState_t stateML;
size_t prevOffset;
const BYTE* dumps;
const BYTE* dumpsEnd;
} seqState_t;
static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
{
size_t litLength;
size_t prevOffset;
size_t offset;
size_t matchLength;
const BYTE* dumps = seqState->dumps;
const BYTE* const de = seqState->dumpsEnd;
/* Literal length */
litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
prevOffset = litLength ? seq->offset : seqState->prevOffset;
seqState->prevOffset = seq->offset;
if (litLength == MaxLL)
{
const U32 add = dumps<de ? *dumps++ : 0;
if (add < 255) litLength += add;
else
{
if (dumps<=(de-3))
{
litLength = ZSTD_readLE24(dumps);
dumps += 3;
}
}
}
/* Offset */
{
U32 offsetCode, nbBits;
offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream));
if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));
nbBits = offsetCode - 1;
if (offsetCode==0) nbBits = 0; /* cmove */
offset = ((size_t)1 << (nbBits & ((sizeof(offset)*8)-1))) + FSE_readBits(&(seqState->DStream), nbBits);
if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream));
if (offsetCode==0) offset = prevOffset;
}
/* MatchLength */
matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
if (matchLength == MaxML)
{
const U32 add = dumps<de ? *dumps++ : 0;
if (add < 255) matchLength += add;
else
{
if (dumps<=(de-3))
{
matchLength = ZSTD_readLE24(dumps);
dumps += 3;
}
}
}
matchLength += MINMATCH;
/* save result */
seq->litLength = litLength;
seq->offset = offset;
seq->matchLength = matchLength;
seqState->dumps = dumps;
}
static size_t ZSTD_execSequence(BYTE* op,
seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
BYTE* const base, BYTE* const oend)
{
static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */
const BYTE* const ostart = op;
const size_t litLength = sequence.litLength;
BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */
const BYTE* const litEnd = *litPtr + litLength;
/* check */
if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
if (litEnd > litLimit) return ERROR(corruption_detected);
if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */
/* copy Literals */
if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8))
memmove(op, *litPtr, litLength); /* overwrite risk */
else
ZSTD_wildcopy(op, *litPtr, litLength);
op += litLength;
*litPtr = litEnd; /* update for next sequence */
/* check : last match must be at a minimum distance of 8 from end of dest buffer */
if (oend-op < 8) return ERROR(dstSize_tooSmall);
/* copy Match */
{
const U32 overlapRisk = (((size_t)(litEnd - endMatch)) < 12);
const BYTE* match = op - sequence.offset; /* possible underflow at op - offset ? */
size_t qutt = 12;
U64 saved[2];
/* check */
if (match < base) return ERROR(corruption_detected);
if (sequence.offset > (size_t)base) return ERROR(corruption_detected);
/* save beginning of literal sequence, in case of write overlap */
if (overlapRisk)
{
if ((endMatch + qutt) > oend) qutt = oend-endMatch;
memcpy(saved, endMatch, qutt);
}
if (sequence.offset < 8)
{
const int dec64 = dec64table[sequence.offset];
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
match += dec32table[sequence.offset];
ZSTD_copy4(op+4, match);
match -= dec64;
} else { ZSTD_copy8(op, match); }
op += 8; match += 8;
if (endMatch > oend-(16-MINMATCH))
{
if (op < oend-8)
{
ZSTD_wildcopy(op, match, (oend-8) - op);
match += (oend-8) - op;
op = oend-8;
}
while (op<endMatch) *op++ = *match++;
}
else
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
/* restore, in case of overlap */
if (overlapRisk) memcpy(endMatch, saved, qutt);
}
return endMatch-ostart;
}
typedef struct ZSTDv01_Dctx_s
{
U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
void* previousDstEnd;
void* base;
size_t expected;
blockType_t bType;
U32 phase;
} dctx_t;
static size_t ZSTD_decompressSequences(
void* ctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize,
const BYTE* litStart, size_t litSize)
{
dctx_t* dctx = (dctx_t*)ctx;
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE* const)dst;
BYTE* op = ostart;
BYTE* const oend = ostart + maxDstSize;
size_t errorCode, dumpsLength;
const BYTE* litPtr = litStart;
const BYTE* const litEnd = litStart + litSize;
int nbSeq;
const BYTE* dumps;
U32* DTableLL = dctx->LLTable;
U32* DTableML = dctx->MLTable;
U32* DTableOffb = dctx->OffTable;
BYTE* const base = (BYTE*) (dctx->base);
/* Build Decoding Tables */
errorCode = ZSTDv01_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
DTableLL, DTableML, DTableOffb,
ip, iend-ip);
if (ZSTDv01_isError(errorCode)) return errorCode;
ip += errorCode;
/* Regen sequences */
{
seq_t sequence;
seqState_t seqState;
memset(&sequence, 0, sizeof(sequence));
seqState.dumps = dumps;
seqState.dumpsEnd = dumps + dumpsLength;
seqState.prevOffset = 1;
errorCode = FSE_initDStream(&(seqState.DStream), ip, iend-ip);
if (FSE_isError(errorCode)) return ERROR(corruption_detected);
FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
for ( ; (FSE_reloadDStream(&(seqState.DStream)) <= FSE_DStream_completed) && (nbSeq>0) ; )
{
size_t oneSeqSize;
nbSeq--;
ZSTD_decodeSequence(&sequence, &seqState);
oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
if (ZSTDv01_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
}
/* check if reached exact end */
if ( !FSE_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */
if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */
/* last literal segment */
{
size_t lastLLSize = litEnd - litPtr;
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
if (lastLLSize > 0) {
if (op != litPtr) memmove(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
}
return op-ostart;
}
static size_t ZSTD_decompressBlock(
void* ctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize)
{
/* blockType == blockCompressed, srcSize is trusted */
const BYTE* ip = (const BYTE*)src;
const BYTE* litPtr = NULL;
size_t litSize = 0;
size_t errorCode;
/* Decode literals sub-block */
errorCode = ZSTDv01_decodeLiteralsBlock(ctx, dst, maxDstSize, &litPtr, &litSize, src, srcSize);
if (ZSTDv01_isError(errorCode)) return errorCode;
ip += errorCode;
srcSize -= errorCode;
return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize, litPtr, litSize);
}
size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const BYTE* iend = ip + srcSize;
BYTE* const ostart = (BYTE* const)dst;
BYTE* op = ostart;
BYTE* const oend = ostart + maxDstSize;
size_t remainingSize = srcSize;
U32 magicNumber;
size_t errorCode=0;
blockProperties_t blockProperties;
/* Frame Header */
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
magicNumber = ZSTD_readBE32(src);
if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
/* Loop on each block */
while (1)
{
size_t blockSize = ZSTDv01_getcBlockSize(ip, iend-ip, &blockProperties);
if (ZSTDv01_isError(blockSize)) return blockSize;
ip += ZSTD_blockHeaderSize;
remainingSize -= ZSTD_blockHeaderSize;
if (blockSize > remainingSize) return ERROR(srcSize_wrong);
switch(blockProperties.blockType)
{
case bt_compressed:
errorCode = ZSTD_decompressBlock(ctx, op, oend-op, ip, blockSize);
break;
case bt_raw :
errorCode = ZSTD_copyUncompressedBlock(op, oend-op, ip, blockSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet supported */
break;
case bt_end :
/* end of frame */
if (remainingSize) return ERROR(srcSize_wrong);
break;
default:
return ERROR(GENERIC);
}
if (blockSize == 0) break; /* bt_end */
if (ZSTDv01_isError(errorCode)) return errorCode;
op += errorCode;
ip += blockSize;
remainingSize -= blockSize;
}
return op-ostart;
}
size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
dctx_t ctx;
ctx.base = dst;
return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
}
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
const BYTE* ip = (const BYTE*)src;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
U32 magicNumber;
blockProperties_t blockProperties;
/* Frame Header */
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
magicNumber = ZSTD_readBE32(src);
if (magicNumber != ZSTD_magicNumber) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
/* Loop on each block */
while (1)
{
size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTDv01_isError(blockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, blockSize);
return;
}
ip += ZSTD_blockHeaderSize;
remainingSize -= ZSTD_blockHeaderSize;
if (blockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
if (blockSize == 0) break; /* bt_end */
ip += blockSize;
remainingSize -= blockSize;
nbBlocks++;
}
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * BLOCKSIZE;
}
/*******************************
* Streaming Decompression API
*******************************/
size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx)
{
dctx->expected = ZSTD_frameHeaderSize;
dctx->phase = 0;
dctx->previousDstEnd = NULL;
dctx->base = NULL;
return 0;
}
ZSTDv01_Dctx* ZSTDv01_createDCtx(void)
{
ZSTDv01_Dctx* dctx = (ZSTDv01_Dctx*)malloc(sizeof(ZSTDv01_Dctx));
if (dctx==NULL) return NULL;
ZSTDv01_resetDCtx(dctx);
return dctx;
}
size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx)
{
free(dctx);
return 0;
}
size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx)
{
return ((dctx_t*)dctx)->expected;
}
size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
dctx_t* ctx = (dctx_t*)dctx;
/* Sanity check */
if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
if (dst != ctx->previousDstEnd) /* not contiguous */
ctx->base = dst;
/* Decompress : frame header */
if (ctx->phase == 0)
{
/* Check frame magic header */
U32 magicNumber = ZSTD_readBE32(src);
if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
ctx->phase = 1;
ctx->expected = ZSTD_blockHeaderSize;
return 0;
}
/* Decompress : block header */
if (ctx->phase == 1)
{
blockProperties_t bp;
size_t blockSize = ZSTDv01_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
if (ZSTDv01_isError(blockSize)) return blockSize;
if (bp.blockType == bt_end)
{
ctx->expected = 0;
ctx->phase = 0;
}
else
{
ctx->expected = blockSize;
ctx->bType = bp.blockType;
ctx->phase = 2;
}
return 0;
}
/* Decompress : block content */
{
size_t rSize;
switch(ctx->bType)
{
case bt_compressed:
rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
break;
case bt_raw :
rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet handled */
break;
case bt_end : /* should never happen (filtered at phase 1) */
rSize = 0;
break;
default:
return ERROR(GENERIC);
}
ctx->phase = 1;
ctx->expected = ZSTD_blockHeaderSize;
ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
return rSize;
}
}
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V01_H_28739879432
#define ZSTD_V01_H_28739879432
#if defined (__cplusplus)
extern "C" {
#endif
/* *************************************
* Includes
***************************************/
#include <stddef.h> /* size_t */
/* *************************************
* Simple one-step function
***************************************/
/**
ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format
compressedSize : is the exact source size
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
It must be equal or larger than originalSize, otherwise decompression will fail.
return : the number of bytes decompressed into destination buffer (originalSize)
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
*/
size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize);
/**
ZSTDv01_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.1.x format
srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
cSize (output parameter) : the number of bytes that would be read to decompress this frame
or an error code if it fails (which can be tested using ZSTDv01_isError())
dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
or ZSTD_CONTENTSIZE_ERROR if an error occurs
note : assumes `cSize` and `dBound` are _not_ NULL.
*/
void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
/**
ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error
*/
unsigned ZSTDv01_isError(size_t code);
/* *************************************
* Advanced functions
***************************************/
typedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx;
ZSTDv01_Dctx* ZSTDv01_createDCtx(void);
size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx);
size_t ZSTDv01_decompressDCtx(void* ctx,
void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize);
/* *************************************
* Streaming functions
***************************************/
size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx);
size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx);
size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
/**
Use above functions alternatively.
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
Result is the number of bytes regenerated within 'dst'.
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
*/
/* *************************************
* Prefix - version detection
***************************************/
#define ZSTDv01_magicNumber 0xFD2FB51E /* Big Endian version */
#define ZSTDv01_magicNumberLE 0x1EB52FFD /* Little Endian version */
#if defined (__cplusplus)
}
#endif
#endif /* ZSTD_V01_H_28739879432 */
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include <stddef.h> /* size_t, ptrdiff_t */
#include "zstd_v02.h"
#include "../common/error_private.h"
/******************************************
* Compiler-specific
******************************************/
#if defined(_MSC_VER) /* Visual Studio */
# include <stdlib.h> /* _byteswap_ulong */
# include <intrin.h> /* _byteswap_* */
#endif
/* ******************************************************************
mem.h
low-level memory access routines
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef MEM_H_MODULE
#define MEM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/******************************************
* Includes
******************************************/
#include <stddef.h> /* size_t, ptrdiff_t */
#include <string.h> /* memcpy */
/******************************************
* Compiler-specific
******************************************/
#if defined(__GNUC__)
# define MEM_STATIC static __attribute__((unused))
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define MEM_STATIC static inline
#elif defined(_MSC_VER)
# define MEM_STATIC static __inline
#else
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif
/****************************************************************
* Basic Types
*****************************************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# if defined(_AIX)
# include <inttypes.h>
# else
# include <stdint.h> /* intptr_t */
# endif
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef signed short S16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
typedef signed long long S64;
#endif
/****************************************************************
* Memory I/O
*****************************************************************/
/* MEM_FORCE_MEMORY_ACCESS
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets generating assembly depending on alignment.
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define MEM_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
# define MEM_FORCE_MEMORY_ACCESS 1
# endif
#endif
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
MEM_STATIC unsigned MEM_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
/* violates C standard on structure alignment.
Only use if no other choice to achieve best performance on target platform */
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
#else
/* default method, safe and standard.
can sometimes prove slower */
MEM_STATIC U16 MEM_read16(const void* memPtr)
{
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U32 MEM_read32(const void* memPtr)
{
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U64 MEM_read64(const void* memPtr)
{
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
{
memcpy(memPtr, &value, sizeof(value));
}
#endif /* MEM_FORCE_MEMORY_ACCESS */
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read16(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U16)(p[0] + (p[1]<<8));
}
}
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
{
if (MEM_isLittleEndian())
{
MEM_write16(memPtr, val);
}
else
{
BYTE* p = (BYTE*)memPtr;
p[0] = (BYTE)val;
p[1] = (BYTE)(val>>8);
}
}
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
{
return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
}
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read32(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
}
}
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read64(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+ ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
}
}
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
{
if (MEM_32bits())
return (size_t)MEM_readLE32(memPtr);
else
return (size_t)MEM_readLE64(memPtr);
}
#if defined (__cplusplus)
}
#endif
#endif /* MEM_H_MODULE */
/* ******************************************************************
bitstream
Part of NewGen Entropy library
header file (to include)
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef BITSTREAM_H_MODULE
#define BITSTREAM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/*
* This API consists of small unitary functions, which highly benefit from being inlined.
* Since link-time-optimization is not available for all compilers,
* these functions are defined into a .h to be included.
*/
/**********************************************
* bitStream decompression API (read backward)
**********************************************/
typedef struct
{
size_t bitContainer;
unsigned bitsConsumed;
const char* ptr;
const char* start;
} BIT_DStream_t;
typedef enum { BIT_DStream_unfinished = 0,
BIT_DStream_endOfBuffer = 1,
BIT_DStream_completed = 2,
BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
/******************************************
* unsafe API
******************************************/
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
/* faster, but works only if nbBits >= 1 */
/****************************************************************
* Helper functions
****************************************************************/
MEM_STATIC unsigned BIT_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r=0;
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
unsigned r;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
return r;
# endif
}
/**********************************************************
* bitStream decoding
**********************************************************/
/*!BIT_initDStream
* Initialize a BIT_DStream_t.
* @bitD : a pointer to an already allocated BIT_DStream_t structure
* @srcBuffer must point at the beginning of a bitStream
* @srcSize must be the exact size of the bitStream
* @result : size of stream (== srcSize) or an errorCode if a problem is detected
*/
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
if (srcSize >= sizeof(size_t)) /* normal case */
{
U32 contain32;
bitD->start = (const char*)srcBuffer;
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
}
else
{
U32 contain32;
bitD->start = (const char*)srcBuffer;
bitD->ptr = bitD->start;
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
/* fallthrough */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
/* fallthrough */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
/* fallthrough */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
/* fallthrough */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
/* fallthrough */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8;
/* fallthrough */
default:;
}
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
}
return srcSize;
}
MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
{
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
}
/*! BIT_lookBitsFast :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
{
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
}
MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
{
bitD->bitsConsumed += nbBits;
}
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
{
size_t value = BIT_lookBits(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
/*!BIT_readBitsFast :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
{
size_t value = BIT_lookBitsFast(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
return BIT_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
{
bitD->ptr -= bitD->bitsConsumed >> 3;
bitD->bitsConsumed &= 7;
bitD->bitContainer = MEM_readLEST(bitD->ptr);
return BIT_DStream_unfinished;
}
if (bitD->ptr == bitD->start)
{
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
return BIT_DStream_completed;
}
{
U32 nbBytes = bitD->bitsConsumed >> 3;
BIT_DStream_status result = BIT_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start)
{
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
result = BIT_DStream_endOfBuffer;
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes*8;
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
return result;
}
}
/*! BIT_endOfDStream
* @return Tells if DStream has reached its exact end
*/
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
{
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
}
#if defined (__cplusplus)
}
#endif
#endif /* BITSTREAM_H_MODULE */
/* ******************************************************************
Error codes and messages
Copyright (C) 2013-2015, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef ERROR_H_MODULE
#define ERROR_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/******************************************
* Compiler-specific
******************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define ERR_STATIC static inline
#elif defined(_MSC_VER)
# define ERR_STATIC static __inline
#elif defined(__GNUC__)
# define ERR_STATIC static __attribute__((unused))
#else
# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif
/******************************************
* Error Management
******************************************/
#define PREFIX(name) ZSTD_error_##name
#define ERROR(name) (size_t)-PREFIX(name)
#define ERROR_LIST(ITEM) \
ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \
ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \
ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \
ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \
ITEM(PREFIX(maxCode))
#define ERROR_GENERATE_ENUM(ENUM) ENUM,
typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
#define ERROR_CONVERTTOSTRING(STRING) #STRING,
#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR)
static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) };
ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
ERR_STATIC const char* ERR_getErrorName(size_t code)
{
static const char* codeError = "Unspecified error code";
if (ERR_isError(code)) return ERR_strings[-(int)(code)];
return codeError;
}
#if defined (__cplusplus)
}
#endif
#endif /* ERROR_H_MODULE */
/*
Constructor and Destructor of type FSE_CTable
Note that its size depends on 'tableLog' and 'maxSymbolValue' */
typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
/* ******************************************************************
FSE : Finite State Entropy coder
header file for static linking (only)
Copyright (C) 2013-2015, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#if defined (__cplusplus)
extern "C" {
#endif
/******************************************
* Static allocation
******************************************/
/* FSE buffer bounds */
#define FSE_NCOUNTBOUND 512
#define FSE_BLOCKBOUND(size) (size + (size>>7))
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
/******************************************
* FSE advanced API
******************************************/
static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
/* build a fake FSE_DTable, designed to always generate the same symbolValue */
/******************************************
* FSE symbol decompression API
******************************************/
typedef struct
{
size_t state;
const void* table; /* precise table may vary, depending on U16 */
} FSE_DState_t;
static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
/******************************************
* FSE unsafe API
******************************************/
static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
/******************************************
* Implementation of inline functions
******************************************/
/* decompression */
typedef struct {
U16 tableLog;
U16 fastMode;
} FSE_DTableHeader; /* sizeof U32 */
typedef struct
{
unsigned short newState;
unsigned char symbol;
unsigned char nbBits;
} FSE_decode_t; /* size == U32 */
MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
{
FSE_DTableHeader DTableH;
memcpy(&DTableH, dt, sizeof(DTableH));
DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
BIT_reloadDStream(bitD);
DStatePtr->table = dt + 1;
}
MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
{
const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
const U32 nbBits = DInfo.nbBits;
BYTE symbol = DInfo.symbol;
size_t lowBits = BIT_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
{
const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
const U32 nbBits = DInfo.nbBits;
BYTE symbol = DInfo.symbol;
size_t lowBits = BIT_readBitsFast(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
{
return DStatePtr->state == 0;
}
#if defined (__cplusplus)
}
#endif
/* ******************************************************************
Huff0 : Huffman coder, part of New Generation Entropy library
header file for static linking (only)
Copyright (C) 2013-2015, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#if defined (__cplusplus)
extern "C" {
#endif
/******************************************
* Static allocation macros
******************************************/
/* Huff0 buffer bounds */
#define HUF_CTABLEBOUND 129
#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* static allocation of Huff0's DTable */
#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog)) /* nb Cells; use unsigned short for X2, unsigned int for X4 */
#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
/******************************************
* Advanced functions
******************************************/
static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* quad-symbols decoder */
#if defined (__cplusplus)
}
#endif
/*
zstd - standard compression library
Header File
Copyright (C) 2014-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
- ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
*/
#if defined (__cplusplus)
extern "C" {
#endif
/* *************************************
* Includes
***************************************/
#include <stddef.h> /* size_t */
/* *************************************
* Version
***************************************/
#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */
#define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */
#define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
/* *************************************
* Advanced functions
***************************************/
typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */
#if defined (__cplusplus)
}
#endif
/*
zstd - standard compression library
Header File for static linking only
Copyright (C) 2014-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
- ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
*/
/* The objects defined into this file should be considered experimental.
* They are not labelled stable, as their prototype may change in the future.
* You can use them for tests, provide feedback, or if you can endure risk of future changes.
*/
#if defined (__cplusplus)
extern "C" {
#endif
/* *************************************
* Streaming functions
***************************************/
typedef struct ZSTD_DCtx_s ZSTD_DCtx;
/*
Use above functions alternatively.
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
Result is the number of bytes regenerated within 'dst'.
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
*/
/* *************************************
* Prefix - version detection
***************************************/
#define ZSTD_magicNumber 0xFD2FB522 /* v0.2 (current)*/
#if defined (__cplusplus)
}
#endif
/* ******************************************************************
FSE : Finite State Entropy coder
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef FSE_COMMONDEFS_ONLY
/****************************************************************
* Tuning parameters
****************************************************************/
/* MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
#define FSE_MAX_MEMORY_USAGE 14
#define FSE_DEFAULT_MEMORY_USAGE 13
/* FSE_MAX_SYMBOL_VALUE :
* Maximum symbol value authorized.
* Required for proper stack allocation */
#define FSE_MAX_SYMBOL_VALUE 255
/****************************************************************
* template functions type & suffix
****************************************************************/
#define FSE_FUNCTION_TYPE BYTE
#define FSE_FUNCTION_EXTENSION
/****************************************************************
* Byte symbol type
****************************************************************/
#endif /* !FSE_COMMONDEFS_ONLY */
/****************************************************************
* Compiler specifics
****************************************************************/
#ifdef _MSC_VER /* Visual Studio */
# define FORCE_INLINE static __forceinline
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
#else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline))
# else
# define FORCE_INLINE static inline
# endif
# else
# define FORCE_INLINE static
# endif /* __STDC_VERSION__ */
#endif
/****************************************************************
* Includes
****************************************************************/
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
/****************************************************************
* Constants
*****************************************************************/
#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
#define FSE_MIN_TABLELOG 5
#define FSE_TABLELOG_ABSOLUTE_MAX 15
#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
#endif
/****************************************************************
* Error Management
****************************************************************/
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/****************************************************************
* Complex types
****************************************************************/
typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
/****************************************************************
* Templates
****************************************************************/
/*
designed to be included
for type-specific functions (template emulation in C)
Objective is to write these functions only once, for improved maintenance
*/
/* safety checks */
#ifndef FSE_FUNCTION_EXTENSION
# error "FSE_FUNCTION_EXTENSION must be defined"
#endif
#ifndef FSE_FUNCTION_TYPE
# error "FSE_FUNCTION_TYPE must be defined"
#endif
/* Function names */
#define FSE_CAT(X,Y) X##Y
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
/* Function templates */
#define FSE_DECODE_TYPE FSE_decode_t
static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
static size_t FSE_buildDTable
(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
void* ptr = dt+1;
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr;
FSE_DTableHeader DTableH;
const U32 tableSize = 1 << tableLog;
const U32 tableMask = tableSize-1;
const U32 step = FSE_tableStep(tableSize);
U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
U32 position = 0;
U32 highThreshold = tableSize-1;
const S16 largeLimit= (S16)(1 << (tableLog-1));
U32 noLarge = 1;
U32 s;
/* Sanity Checks */
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
/* Init, lay down lowprob symbols */
DTableH.tableLog = (U16)tableLog;
for (s=0; s<=maxSymbolValue; s++)
{
if (normalizedCounter[s]==-1)
{
tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
symbolNext[s] = 1;
}
else
{
if (normalizedCounter[s] >= largeLimit) noLarge=0;
symbolNext[s] = normalizedCounter[s];
}
}
/* Spread symbols */
for (s=0; s<=maxSymbolValue; s++)
{
int i;
for (i=0; i<normalizedCounter[s]; i++)
{
tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
}
}
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
/* Build Decoding table */
{
U32 i;
for (i=0; i<tableSize; i++)
{
FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
U16 nextState = symbolNext[symbol]++;
tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
}
}
DTableH.fastMode = (U16)noLarge;
memcpy(dt, &DTableH, sizeof(DTableH)); /* memcpy(), to avoid strict aliasing warnings */
return 0;
}
#ifndef FSE_COMMONDEFS_ONLY
/******************************************
* FSE helper functions
******************************************/
static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
/****************************************************************
* FSE NCount encoding-decoding
****************************************************************/
static short FSE_abs(short a)
{
return (short)(a<0 ? -a : a);
}
static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
const BYTE* const istart = (const BYTE*) headerBuffer;
const BYTE* const iend = istart + hbSize;
const BYTE* ip = istart;
int nbBits;
int remaining;
int threshold;
U32 bitStream;
int bitCount;
unsigned charnum = 0;
int previous0 = 0;
if (hbSize < 4) return ERROR(srcSize_wrong);
bitStream = MEM_readLE32(ip);
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
bitStream >>= 4;
bitCount = 4;
*tableLogPtr = nbBits;
remaining = (1<<nbBits)+1;
threshold = 1<<nbBits;
nbBits++;
while ((remaining>1) && (charnum<=*maxSVPtr))
{
if (previous0)
{
unsigned n0 = charnum;
while ((bitStream & 0xFFFF) == 0xFFFF)
{
n0+=24;
if (ip < iend-5)
{
ip+=2;
bitStream = MEM_readLE32(ip) >> bitCount;
}
else
{
bitStream >>= 16;
bitCount+=16;
}
}
while ((bitStream & 3) == 3)
{
n0+=3;
bitStream>>=2;
bitCount+=2;
}
n0 += bitStream & 3;
bitCount += 2;
if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
while (charnum < n0) normalizedCounter[charnum++] = 0;
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
{
ip += bitCount>>3;
bitCount &= 7;
bitStream = MEM_readLE32(ip) >> bitCount;
}
else
bitStream >>= 2;
}
{
const short max = (short)((2*threshold-1)-remaining);
short count;
if ((bitStream & (threshold-1)) < (U32)max)
{
count = (short)(bitStream & (threshold-1));
bitCount += nbBits-1;
}
else
{
count = (short)(bitStream & (2*threshold-1));
if (count >= threshold) count -= max;
bitCount += nbBits;
}
count--; /* extra accuracy */
remaining -= FSE_abs(count);
normalizedCounter[charnum++] = count;
previous0 = !count;
while (remaining < threshold)
{
nbBits--;
threshold >>= 1;
}
{
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
{
ip += bitCount>>3;
bitCount &= 7;
}
else
{
bitCount -= (int)(8 * (iend - 4 - ip));
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
}
}
}
if (remaining != 1) return ERROR(GENERIC);
*maxSVPtr = charnum-1;
ip += (bitCount+7)>>3;
if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
return ip-istart;
}
/*********************************************************
* Decompression (Byte symbols)
*********************************************************/
static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
DTableH->tableLog = 0;
DTableH->fastMode = 0;
cell->newState = 0;
cell->symbol = symbolValue;
cell->nbBits = 0;
return 0;
}
static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSymbolValue = tableMask;
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* Build Decoding Table */
DTableH->tableLog = (U16)nbBits;
DTableH->fastMode = 1;
for (s=0; s<=maxSymbolValue; s++)
{
dinfo[s].newState = 0;
dinfo[s].symbol = (BYTE)s;
dinfo[s].nbBits = (BYTE)nbBits;
}
return 0;
}
FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt, const unsigned fast)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const omax = op + maxDstSize;
BYTE* const olimit = omax-3;
BIT_DStream_t bitD;
FSE_DState_t state1;
FSE_DState_t state2;
size_t errorCode;
/* Init */
errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
if (FSE_isError(errorCode)) return errorCode;
FSE_initDState(&state1, &bitD, dt);
FSE_initDState(&state2, &bitD, dt);
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
/* 4 symbols per loop */
for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
{
op[0] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BIT_reloadDStream(&bitD);
op[1] = FSE_GETSYMBOL(&state2);
if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
{ if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
op[2] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BIT_reloadDStream(&bitD);
op[3] = FSE_GETSYMBOL(&state2);
}
/* tail */
/* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
while (1)
{
if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
break;
*op++ = FSE_GETSYMBOL(&state1);
if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
break;
*op++ = FSE_GETSYMBOL(&state2);
}
/* end ? */
if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
return op-ostart;
if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */
return ERROR(corruption_detected);
}
static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt)
{
FSE_DTableHeader DTableH;
memcpy(&DTableH, dt, sizeof(DTableH));
/* select fast mode (static) */
if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
}
static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* const istart = (const BYTE*)cSrc;
const BYTE* ip = istart;
short counting[FSE_MAX_SYMBOL_VALUE+1];
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
unsigned tableLog;
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
size_t errorCode;
if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
/* normal FSE decoding mode */
errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
if (FSE_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
ip += errorCode;
cSrcSize -= errorCode;
errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
if (FSE_isError(errorCode)) return errorCode;
/* always return, even if it is an error code */
return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
}
#endif /* FSE_COMMONDEFS_ONLY */
/* ******************************************************************
Huff0 : Huffman coder, part of New Generation Entropy library
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/****************************************************************
* Compiler specifics
****************************************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
/* inline is defined */
#elif defined(_MSC_VER)
# define inline __inline
#else
# define inline /* disable inline */
#endif
#ifdef _MSC_VER /* Visual Studio */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif
/****************************************************************
* Includes
****************************************************************/
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
/****************************************************************
* Error Management
****************************************************************/
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/******************************************
* Helper functions
******************************************/
static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
#define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
#define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */
#define HUF_MAX_SYMBOL_VALUE 255
#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
# error "HUF_MAX_TABLELOG is too large !"
#endif
/*********************************************************
* Huff0 : Huffman block decompression
*********************************************************/
typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
/*! HUF_readStats
Read compact Huffman tree, saved by HUF_writeCTable
@huffWeight : destination buffer
@return : size read from `src`
*/
static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize)
{
U32 weightTotal;
U32 tableLog;
const BYTE* ip = (const BYTE*) src;
size_t iSize;
size_t oSize;
U32 n;
if (!srcSize) return ERROR(srcSize_wrong);
iSize = ip[0];
//memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
if (iSize >= 128) /* special header */
{
if (iSize >= (242)) /* RLE */
{
static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
oSize = l[iSize-242];
memset(huffWeight, 1, hwSize);
iSize = 0;
}
else /* Incompressible */
{
oSize = iSize - 127;
iSize = ((oSize+1)/2);
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
if (oSize >= hwSize) return ERROR(corruption_detected);
ip += 1;
for (n=0; n<oSize; n+=2)
{
huffWeight[n] = ip[n/2] >> 4;
huffWeight[n+1] = ip[n/2] & 15;
}
}
}
else /* header compressed with FSE (normal case) */
{
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
if (FSE_isError(oSize)) return oSize;
}
/* collect weight stats */
memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
weightTotal = 0;
for (n=0; n<oSize; n++)
{
if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
rankStats[huffWeight[n]]++;
weightTotal += (1 << huffWeight[n]) >> 1;
}
if (weightTotal == 0) return ERROR(corruption_detected);
/* get last non-null symbol weight (implied, total must be 2^n) */
tableLog = BIT_highbit32(weightTotal) + 1;
if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
{
U32 total = 1 << tableLog;
U32 rest = total - weightTotal;
U32 verif = 1 << BIT_highbit32(rest);
U32 lastWeight = BIT_highbit32(rest) + 1;
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
huffWeight[oSize] = (BYTE)lastWeight;
rankStats[lastWeight]++;
}
/* check tree construction validity */
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
/* results */
*nbSymbolsPtr = (U32)(oSize+1);
*tableLogPtr = tableLog;
return iSize+1;
}
/**************************/
/* single-symbol decoding */
/**************************/
static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
{
BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
const BYTE* ip = (const BYTE*) src;
size_t iSize = ip[0];
U32 nbSymbols = 0;
U32 n;
U32 nextRankStart;
void* ptr = DTable+1;
HUF_DEltX2* const dt = (HUF_DEltX2*)ptr;
HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
//memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
/* Prepare ranks */
nextRankStart = 0;
for (n=1; n<=tableLog; n++)
{
U32 current = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = current;
}
/* fill DTable */
for (n=0; n<nbSymbols; n++)
{
const U32 w = huffWeight[n];
const U32 length = (1 << w) >> 1;
U32 i;
HUF_DEltX2 D;
D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
for (i = rankVal[w]; i < rankVal[w] + length; i++)
dt[i] = D;
rankVal[w] += length;
}
return iSize;
}
static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
{
const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
const BYTE c = dt[val].byte;
BIT_skipBits(Dstream, dt[val].nbBits);
return c;
}
#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
*ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 4 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
{
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
}
/* closer to the end */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
/* no more data to retrieve from bitstream, hence no need to reload */
while (p < pEnd)
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
return pEnd-pStart;
}
static size_t HUF_decompress4X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U16* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* ptr = DTable;
const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1;
const U32 dtLog = DTable[0];
size_t errorCode;
/* Init */
BIT_DStream_t bitD1;
BIT_DStream_t bitD2;
BIT_DStream_t bitD3;
BIT_DStream_t bitD4;
const size_t length1 = MEM_readLE16(istart);
const size_t length2 = MEM_readLE16(istart+2);
const size_t length3 = MEM_readLE16(istart+4);
size_t length4;
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
length4 = cSrcSize - (length1 + length2 + length3 + 6);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
errorCode = BIT_initDStream(&bitD1, istart1, length1);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD2, istart2, length2);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD3, istart3, length3);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD4, istart4, length4);
if (HUF_isError(errorCode)) return errorCode;
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
{
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t errorCode;
errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
if (HUF_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
ip += errorCode;
cSrcSize -= errorCode;
return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
/***************************/
/* double-symbols decoding */
/***************************/
static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
const U32* rankValOrigin, const int minWeight,
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
U32 nbBitsBaseline, U16 baseSeq)
{
HUF_DEltX4 DElt;
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
U32 s;
/* get pre-calculated rankVal */
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill skipped values */
if (minWeight>1)
{
U32 i, skipSize = rankVal[minWeight];
MEM_writeLE16(&(DElt.sequence), baseSeq);
DElt.nbBits = (BYTE)(consumed);
DElt.length = 1;
for (i = 0; i < skipSize; i++)
DTable[i] = DElt;
}
/* fill DTable */
for (s=0; s<sortedListSize; s++) /* note : sortedSymbols already skipped */
{
const U32 symbol = sortedSymbols[s].symbol;
const U32 weight = sortedSymbols[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 length = 1 << (sizeLog-nbBits);
const U32 start = rankVal[weight];
U32 i = start;
const U32 end = start + length;
MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
DElt.nbBits = (BYTE)(nbBits + consumed);
DElt.length = 2;
do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
rankVal[weight] += length;
}
}
typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList, const U32 sortedListSize,
const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
const U32 nbBitsBaseline)
{
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
const U32 minBits = nbBitsBaseline - maxWeight;
U32 s;
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill DTable */
for (s=0; s<sortedListSize; s++)
{
const U16 symbol = sortedList[s].symbol;
const U32 weight = sortedList[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 start = rankVal[weight];
const U32 length = 1 << (targetLog-nbBits);
if (targetLog-nbBits >= minBits) /* enough room for a second symbol */
{
U32 sortedRank;
int minWeight = nbBits + scaleLog;
if (minWeight < 1) minWeight = 1;
sortedRank = rankStart[minWeight];
HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
rankValOrigin[nbBits], minWeight,
sortedList+sortedRank, sortedListSize-sortedRank,
nbBitsBaseline, symbol);
}
else
{
U32 i;
const U32 end = start + length;
HUF_DEltX4 DElt;
MEM_writeLE16(&(DElt.sequence), symbol);
DElt.nbBits = (BYTE)(nbBits);
DElt.length = 1;
for (i = start; i < end; i++)
DTable[i] = DElt;
}
rankVal[weight] += length;
}
}
static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
{
BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
U32* const rankStart = rankStart0+1;
rankVal_t rankVal;
U32 tableLog, maxW, sizeOfSort, nbSymbols;
const U32 memLog = DTable[0];
const BYTE* ip = (const BYTE*) src;
size_t iSize = ip[0];
void* ptr = DTable;
HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1;
HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */
if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
//memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
/* find maxWeight */
for (maxW = tableLog; rankStats[maxW]==0; maxW--)
{if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */
/* Get start index of each weight */
{
U32 w, nextRankStart = 0;
for (w=1; w<=maxW; w++)
{
U32 current = nextRankStart;
nextRankStart += rankStats[w];
rankStart[w] = current;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
sizeOfSort = nextRankStart;
}
/* sort symbols by weight */
{
U32 s;
for (s=0; s<nbSymbols; s++)
{
U32 w = weightList[s];
U32 r = rankStart[w]++;
sortedSymbol[r].symbol = (BYTE)s;
sortedSymbol[r].weight = (BYTE)w;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
/* Build rankVal */
{
const U32 minBits = tableLog+1 - maxW;
U32 nextRankVal = 0;
U32 w, consumed;
const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
U32* rankVal0 = rankVal[0];
for (w=1; w<=maxW; w++)
{
U32 current = nextRankVal;
nextRankVal += rankStats[w] << (w+rescale);
rankVal0[w] = current;
}
for (consumed = minBits; consumed <= memLog - minBits; consumed++)
{
U32* rankValPtr = rankVal[consumed];
for (w = 1; w <= maxW; w++)
{
rankValPtr[w] = rankVal0[w] >> consumed;
}
}
}
HUF_fillDTableX4(dt, memLog,
sortedSymbol, sizeOfSort,
rankStart0, rankVal, maxW,
tableLog+1);
return iSize;
}
static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
{
const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 2);
BIT_skipBits(DStream, dt[val].nbBits);
return dt[val].length;
}
static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
{
const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 1);
if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
else
{
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
{
BIT_skipBits(DStream, dt[val].nbBits);
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
}
}
return 1;
}
#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 8 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
{
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
}
/* closer to the end */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
while (p <= pEnd-2)
HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
if (p < pEnd)
p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
return p-pStart;
}
static size_t HUF_decompress4X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U32* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* ptr = DTable;
const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1;
const U32 dtLog = DTable[0];
size_t errorCode;
/* Init */
BIT_DStream_t bitD1;
BIT_DStream_t bitD2;
BIT_DStream_t bitD3;
BIT_DStream_t bitD4;
const size_t length1 = MEM_readLE16(istart);
const size_t length2 = MEM_readLE16(istart+2);
const size_t length3 = MEM_readLE16(istart+4);
size_t length4;
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
length4 = cSrcSize - (length1 + length2 + length3 + 6);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
errorCode = BIT_initDStream(&bitD1, istart1, length1);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD2, istart2, length2);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD3, istart3, length3);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD4, istart4, length4);
if (HUF_isError(errorCode)) return errorCode;
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
{
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize;
cSrcSize -= hSize;
return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
/**********************************/
/* quad-symbol decoding */
/**********************************/
typedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6;
typedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6;
/* recursive, up to level 3; may benefit from <template>-like strategy to nest each level inline */
static void HUF_fillDTableX6LevelN(HUF_DDescX6* DDescription, HUF_DSeqX6* DSequence, int sizeLog,
const rankVal_t rankValOrigin, const U32 consumed, const int minWeight, const U32 maxWeight,
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, const U32* rankStart,
const U32 nbBitsBaseline, HUF_DSeqX6 baseSeq, HUF_DDescX6 DDesc)
{
const int scaleLog = nbBitsBaseline - sizeLog; /* note : targetLog >= (nbBitsBaseline-1), hence scaleLog <= 1 */
const int minBits = nbBitsBaseline - maxWeight;
const U32 level = DDesc.nbBytes;
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
U32 symbolStartPos, s;
/* local rankVal, will be modified */
memcpy(rankVal, rankValOrigin[consumed], sizeof(rankVal));
/* fill skipped values */
if (minWeight>1)
{
U32 i;
const U32 skipSize = rankVal[minWeight];
for (i = 0; i < skipSize; i++)
{
DSequence[i] = baseSeq;
DDescription[i] = DDesc;
}
}
/* fill DTable */
DDesc.nbBytes++;
symbolStartPos = rankStart[minWeight];
for (s=symbolStartPos; s<sortedListSize; s++)
{
const BYTE symbol = sortedSymbols[s].symbol;
const U32 weight = sortedSymbols[s].weight; /* >= 1 (sorted) */
const int nbBits = nbBitsBaseline - weight; /* >= 1 (by construction) */
const int totalBits = consumed+nbBits;
const U32 start = rankVal[weight];
const U32 length = 1 << (sizeLog-nbBits);
baseSeq.byte[level] = symbol;
DDesc.nbBits = (BYTE)totalBits;
if ((level<3) && (sizeLog-totalBits >= minBits)) /* enough room for another symbol */
{
int nextMinWeight = totalBits + scaleLog;
if (nextMinWeight < 1) nextMinWeight = 1;
HUF_fillDTableX6LevelN(DDescription+start, DSequence+start, sizeLog-nbBits,
rankValOrigin, totalBits, nextMinWeight, maxWeight,
sortedSymbols, sortedListSize, rankStart,
nbBitsBaseline, baseSeq, DDesc); /* recursive (max : level 3) */
}
else
{
U32 i;
const U32 end = start + length;
for (i = start; i < end; i++)
{
DDescription[i] = DDesc;
DSequence[i] = baseSeq;
}
}
rankVal[weight] += length;
}
}
/* note : same preparation as X4 */
static size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)
{
BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
U32* const rankStart = rankStart0+1;
U32 tableLog, maxW, sizeOfSort, nbSymbols;
rankVal_t rankVal;
const U32 memLog = DTable[0];
const BYTE* ip = (const BYTE*) src;
size_t iSize = ip[0];
if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
//memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable is too small */
/* find maxWeight */
for (maxW = tableLog; rankStats[maxW]==0; maxW--)
{ if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */
/* Get start index of each weight */
{
U32 w, nextRankStart = 0;
for (w=1; w<=maxW; w++)
{
U32 current = nextRankStart;
nextRankStart += rankStats[w];
rankStart[w] = current;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
sizeOfSort = nextRankStart;
}
/* sort symbols by weight */
{
U32 s;
for (s=0; s<nbSymbols; s++)
{
U32 w = weightList[s];
U32 r = rankStart[w]++;
sortedSymbol[r].symbol = (BYTE)s;
sortedSymbol[r].weight = (BYTE)w;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
/* Build rankVal */
{
const U32 minBits = tableLog+1 - maxW;
U32 nextRankVal = 0;
U32 w, consumed;
const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
U32* rankVal0 = rankVal[0];
for (w=1; w<=maxW; w++)
{
U32 current = nextRankVal;
nextRankVal += rankStats[w] << (w+rescale);
rankVal0[w] = current;
}
for (consumed = minBits; consumed <= memLog - minBits; consumed++)
{
U32* rankValPtr = rankVal[consumed];
for (w = 1; w <= maxW; w++)
{
rankValPtr[w] = rankVal0[w] >> consumed;
}
}
}
/* fill tables */
{
void* ptr = DTable+1;
HUF_DDescX6* DDescription = (HUF_DDescX6*)(ptr);
void* dSeqStart = DTable + 1 + ((size_t)1<<(memLog-1));
HUF_DSeqX6* DSequence = (HUF_DSeqX6*)(dSeqStart);
HUF_DSeqX6 DSeq;
HUF_DDescX6 DDesc;
DSeq.sequence = 0;
DDesc.nbBits = 0;
DDesc.nbBytes = 0;
HUF_fillDTableX6LevelN(DDescription, DSequence, memLog,
(const U32 (*)[HUF_ABSOLUTEMAX_TABLELOG + 1])rankVal, 0, 1, maxW,
sortedSymbol, sizeOfSort, rankStart0,
tableLog+1, DSeq, DDesc);
}
return iSize;
}
static U32 HUF_decodeSymbolX6(void* op, BIT_DStream_t* DStream, const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)
{
const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, ds+val, sizeof(HUF_DSeqX6));
BIT_skipBits(DStream, dd[val].nbBits);
return dd[val].nbBytes;
}
static U32 HUF_decodeLastSymbolsX6(void* op, const U32 maxL, BIT_DStream_t* DStream,
const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)
{
const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
U32 length = dd[val].nbBytes;
if (length <= maxL)
{
memcpy(op, ds+val, length);
BIT_skipBits(DStream, dd[val].nbBits);
return length;
}
memcpy(op, ds+val, maxL);
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
{
BIT_skipBits(DStream, dd[val].nbBits);
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
}
return maxL;
}
#define HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr) \
ptr += HUF_decodeSymbolX6(ptr, DStreamPtr, dd, ds, dtLog)
#define HUF_DECODE_SYMBOLX6_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)
#define HUF_DECODE_SYMBOLX6_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)
static inline size_t HUF_decodeStreamX6(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const U32* DTable, const U32 dtLog)
{
const void* ddPtr = DTable+1;
const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);
const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));
const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);
BYTE* const pStart = p;
/* up to 16 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-16))
{
HUF_DECODE_SYMBOLX6_2(p, bitDPtr);
HUF_DECODE_SYMBOLX6_1(p, bitDPtr);
HUF_DECODE_SYMBOLX6_2(p, bitDPtr);
HUF_DECODE_SYMBOLX6_0(p, bitDPtr);
}
/* closer to the end, up to 4 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
HUF_DECODE_SYMBOLX6_0(p, bitDPtr);
while (p <= pEnd-4)
HUF_DECODE_SYMBOLX6_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
while (p < pEnd)
p += HUF_decodeLastSymbolsX6(p, (U32)(pEnd-p), bitDPtr, dd, ds, dtLog);
return p-pStart;
}
static size_t HUF_decompress4X6_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U32* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const U32 dtLog = DTable[0];
const void* ddPtr = DTable+1;
const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);
const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));
const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);
size_t errorCode;
/* Init */
BIT_DStream_t bitD1;
BIT_DStream_t bitD2;
BIT_DStream_t bitD3;
BIT_DStream_t bitD4;
const size_t length1 = MEM_readLE16(istart);
const size_t length2 = MEM_readLE16(istart+2);
const size_t length3 = MEM_readLE16(istart+4);
size_t length4;
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
length4 = cSrcSize - (length1 + length2 + length3 + 6);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
errorCode = BIT_initDStream(&bitD1, istart1, length1);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD2, istart2, length2);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD3, istart3, length3);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD4, istart4, length4);
if (HUF_isError(errorCode)) return errorCode;
/* 16-64 symbols per loop (4-16 symbols per stream) */
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
for ( ; (op3 <= opStart4) && (endSignal==BIT_DStream_unfinished) && (op4<=(oend-16)) ; )
{
HUF_DECODE_SYMBOLX6_2(op1, &bitD1);
HUF_DECODE_SYMBOLX6_2(op2, &bitD2);
HUF_DECODE_SYMBOLX6_2(op3, &bitD3);
HUF_DECODE_SYMBOLX6_2(op4, &bitD4);
HUF_DECODE_SYMBOLX6_1(op1, &bitD1);
HUF_DECODE_SYMBOLX6_1(op2, &bitD2);
HUF_DECODE_SYMBOLX6_1(op3, &bitD3);
HUF_DECODE_SYMBOLX6_1(op4, &bitD4);
HUF_DECODE_SYMBOLX6_2(op1, &bitD1);
HUF_DECODE_SYMBOLX6_2(op2, &bitD2);
HUF_DECODE_SYMBOLX6_2(op3, &bitD3);
HUF_DECODE_SYMBOLX6_2(op4, &bitD4);
HUF_DECODE_SYMBOLX6_0(op1, &bitD1);
HUF_DECODE_SYMBOLX6_0(op2, &bitD2);
HUF_DECODE_SYMBOLX6_0(op3, &bitD3);
HUF_DECODE_SYMBOLX6_0(op4, &bitD4);
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUF_decodeStreamX6(op1, &bitD1, opStart2, DTable, dtLog);
HUF_decodeStreamX6(op2, &bitD2, opStart3, DTable, dtLog);
HUF_decodeStreamX6(op3, &bitD3, opStart4, DTable, dtLog);
HUF_decodeStreamX6(op4, &bitD4, oend, DTable, dtLog);
/* check */
endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX6(DTable, HUF_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUF_readDTableX6 (DTable, cSrc, cSrcSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize;
cSrcSize -= hSize;
return HUF_decompress4X6_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
/**********************************/
/* Generic decompression selector */
/**********************************/
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
{
/* single, double, quad */
{{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
{{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
{{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
{{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
{{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
{{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
{{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
{{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
{{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
{{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
{{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
{{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
{{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
{{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
{{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
{{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
};
typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, HUF_decompress4X6 };
/* estimate decompression time */
U32 Q;
const U32 D256 = (U32)(dstSize >> 8);
U32 Dtime[3];
U32 algoNb = 0;
int n;
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
/* decoder timing evaluation */
Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
for (n=0; n<3; n++)
Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
if (Dtime[1] < Dtime[0]) algoNb = 1;
if (Dtime[2] < Dtime[algoNb]) algoNb = 2;
return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
//return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */
//return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */
//return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */
}
/*
zstd - standard compression library
Copyright (C) 2014-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
- ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
*/
/* ***************************************************************
* Tuning parameters
*****************************************************************/
/*!
* MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
*/
#define ZSTD_MEMORY_USAGE 17
/*!
* HEAPMODE :
* Select how default compression functions will allocate memory for their hash table,
* in memory stack (0, fastest), or in memory heap (1, requires malloc())
* Note that compression context is fairly large, as a consequence heap memory is recommended.
*/
#ifndef ZSTD_HEAPMODE
# define ZSTD_HEAPMODE 1
#endif /* ZSTD_HEAPMODE */
/*!
* LEGACY_SUPPORT :
* decompressor can decode older formats (starting from Zstd 0.1+)
*/
#ifndef ZSTD_LEGACY_SUPPORT
# define ZSTD_LEGACY_SUPPORT 1
#endif
/* *******************************************************
* Includes
*********************************************************/
#include <stdlib.h> /* calloc */
#include <string.h> /* memcpy, memmove */
#include <stdio.h> /* debug : printf */
/* *******************************************************
* Compiler specifics
*********************************************************/
#ifdef __AVX2__
# include <immintrin.h> /* AVX2 intrinsics */
#endif
#ifdef _MSC_VER /* Visual Studio */
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
#endif
/* *******************************************************
* Constants
*********************************************************/
#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
#define HASH_TABLESIZE (1 << HASH_LOG)
#define HASH_MASK (HASH_TABLESIZE - 1)
#define KNUTH 2654435761
#define BIT7 128
#define BIT6 64
#define BIT5 32
#define BIT4 16
#define BIT1 2
#define BIT0 1
#define KB *(1 <<10)
#define MB *(1 <<20)
#define GB *(1U<<30)
#define BLOCKSIZE (128 KB) /* define, for static allocation */
#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
#define IS_RAW BIT0
#define IS_RLE BIT1
#define WORKPLACESIZE (BLOCKSIZE*3)
#define MINMATCH 4
#define MLbits 7
#define LLbits 6
#define Offbits 5
#define MaxML ((1<<MLbits )-1)
#define MaxLL ((1<<LLbits )-1)
#define MaxOff 31
#define LitFSELog 11
#define MLFSELog 10
#define LLFSELog 10
#define OffFSELog 9
#define MAX(a,b) ((a)<(b)?(b):(a))
#define MaxSeq MAX(MaxLL, MaxML)
#define LITERAL_NOENTROPY 63
#define COMMAND_NOENTROPY 7 /* to remove */
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
static const size_t ZSTD_blockHeaderSize = 3;
static const size_t ZSTD_frameHeaderSize = 4;
/* *******************************************************
* Memory operations
**********************************************************/
static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
{
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
do COPY8(op, ip) while (op < oend);
}
/* **************************************
* Local structures
****************************************/
typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
typedef struct
{
blockType_t blockType;
U32 origSize;
} blockProperties_t;
typedef struct {
void* buffer;
U32* offsetStart;
U32* offset;
BYTE* offCodeStart;
BYTE* offCode;
BYTE* litStart;
BYTE* lit;
BYTE* litLengthStart;
BYTE* litLength;
BYTE* matchLengthStart;
BYTE* matchLength;
BYTE* dumpsStart;
BYTE* dumps;
} seqStore_t;
/* *************************************
* Error Management
***************************************/
/*! ZSTD_isError
* tells if a return value is an error code */
static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
/* *************************************************************
* Decompression section
***************************************************************/
struct ZSTD_DCtx_s
{
U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
void* previousDstEnd;
void* base;
size_t expected;
blockType_t bType;
U32 phase;
const BYTE* litPtr;
size_t litSize;
BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
}; /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
{
const BYTE* const in = (const BYTE* const)src;
BYTE headerFlags;
U32 cSize;
if (srcSize < 3) return ERROR(srcSize_wrong);
headerFlags = *in;
cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
bpPtr->blockType = (blockType_t)(headerFlags >> 6);
bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
if (bpPtr->blockType == bt_end) return 0;
if (bpPtr->blockType == bt_rle) return 1;
return cSize;
}
static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
if (srcSize > 0) {
memcpy(dst, src, srcSize);
}
return srcSize;
}
/** ZSTD_decompressLiterals
@return : nb of bytes read from src, or an error code*/
static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
*maxDstSizePtr = litSize;
return litCSize + 5;
}
/** ZSTD_decodeLiteralsBlock
@return : nb of bytes read from src (< srcSize )*/
static size_t ZSTD_decodeLiteralsBlock(void* ctx,
const void* src, size_t srcSize)
{
ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
const BYTE* const istart = (const BYTE* const)src;
/* any compressed block with literals segment must be at least this size */
if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
switch(*istart & 3)
{
default:
case 0:
{
size_t litSize = BLOCKSIZE;
const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, 8);
return readSize; /* works if it's an error too */
}
case IS_RAW:
{
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
{
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
if (litSize > srcSize-3) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart, litSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, 8);
return litSize+3;
}
/* direct reference into compressed stream */
dctx->litPtr = istart+3;
dctx->litSize = litSize;
return litSize+3;
}
case IS_RLE:
{
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
memset(dctx->litBuffer, istart[3], litSize + 8);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return 4;
}
}
}
static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
const void* src, size_t srcSize)
{
const BYTE* const istart = (const BYTE* const)src;
const BYTE* ip = istart;
const BYTE* const iend = istart + srcSize;
U32 LLtype, Offtype, MLtype;
U32 LLlog, Offlog, MLlog;
size_t dumpsLength;
/* check */
if (srcSize < 5) return ERROR(srcSize_wrong);
/* SeqHead */
*nbSeq = MEM_readLE16(ip); ip+=2;
LLtype = *ip >> 6;
Offtype = (*ip >> 4) & 3;
MLtype = (*ip >> 2) & 3;
if (*ip & 2)
{
dumpsLength = ip[2];
dumpsLength += ip[1] << 8;
ip += 3;
}
else
{
dumpsLength = ip[1];
dumpsLength += (ip[0] & 1) << 8;
ip += 2;
}
*dumpsPtr = ip;
ip += dumpsLength;
*dumpsLengthPtr = dumpsLength;
/* check */
if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
/* sequences */
{
S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */
size_t headerSize;
/* Build DTables */
switch(LLtype)
{
case bt_rle :
LLlog = 0;
FSE_buildDTable_rle(DTableLL, *ip++); break;
case bt_raw :
LLlog = LLbits;
FSE_buildDTable_raw(DTableLL, LLbits); break;
default :
{ U32 max = MaxLL;
headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (LLlog > LLFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableLL, norm, max, LLlog);
} }
switch(Offtype)
{
case bt_rle :
Offlog = 0;
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
break;
case bt_raw :
Offlog = Offbits;
FSE_buildDTable_raw(DTableOffb, Offbits); break;
default :
{ U32 max = MaxOff;
headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (Offlog > OffFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableOffb, norm, max, Offlog);
} }
switch(MLtype)
{
case bt_rle :
MLlog = 0;
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
FSE_buildDTable_rle(DTableML, *ip++); break;
case bt_raw :
MLlog = MLbits;
FSE_buildDTable_raw(DTableML, MLbits); break;
default :
{ U32 max = MaxML;
headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (MLlog > MLFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableML, norm, max, MLlog);
} } }
return ip-istart;
}
typedef struct {
size_t litLength;
size_t offset;
size_t matchLength;
} seq_t;
typedef struct {
BIT_DStream_t DStream;
FSE_DState_t stateLL;
FSE_DState_t stateOffb;
FSE_DState_t stateML;
size_t prevOffset;
const BYTE* dumps;
const BYTE* dumpsEnd;
} seqState_t;
static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
{
size_t litLength;
size_t prevOffset;
size_t offset;
size_t matchLength;
const BYTE* dumps = seqState->dumps;
const BYTE* const de = seqState->dumpsEnd;
/* Literal length */
litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
prevOffset = litLength ? seq->offset : seqState->prevOffset;
seqState->prevOffset = seq->offset;
if (litLength == MaxLL)
{
const U32 add = dumps<de ? *dumps++ : 0;
if (add < 255) litLength += add;
else if (dumps + 3 <= de)
{
litLength = MEM_readLE24(dumps);
dumps += 3;
}
if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
/* Offset */
{
static const size_t offsetPrefix[MaxOff+1] = { /* note : size_t faster than U32 */
1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
U32 offsetCode, nbBits;
offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */
if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
nbBits = offsetCode - 1;
if (offsetCode==0) nbBits = 0; /* cmove */
offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
if (offsetCode==0) offset = prevOffset; /* cmove */
}
/* MatchLength */
matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
if (matchLength == MaxML)
{
const U32 add = dumps<de ? *dumps++ : 0;
if (add < 255) matchLength += add;
else if (dumps + 3 <= de)
{
matchLength = MEM_readLE24(dumps);
dumps += 3;
}
if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
matchLength += MINMATCH;
/* save result */
seq->litLength = litLength;
seq->offset = offset;
seq->matchLength = matchLength;
seqState->dumps = dumps;
}
static size_t ZSTD_execSequence(BYTE* op,
seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
BYTE* const base, BYTE* const oend)
{
static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */
const BYTE* const ostart = op;
BYTE* const oLitEnd = op + sequence.litLength;
BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */
BYTE* const oend_8 = oend-8;
const BYTE* const litEnd = *litPtr + sequence.litLength;
/* checks */
if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */
/* copy Literals */
ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
op = oLitEnd;
*litPtr = litEnd; /* update for next sequence */
/* copy Match */
{
const BYTE* match = op - sequence.offset;
/* check */
if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */
//if (match > op) return ERROR(corruption_detected); /* address space overflow test (is clang optimizer removing this test ?) */
if (match < base) return ERROR(corruption_detected);
/* close range match, overlap */
if (sequence.offset < 8)
{
const int dec64 = dec64table[sequence.offset];
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
match += dec32table[sequence.offset];
ZSTD_copy4(op+4, match);
match -= dec64;
}
else
{
ZSTD_copy8(op, match);
}
op += 8; match += 8;
if (oMatchEnd > oend-(16-MINMATCH))
{
if (op < oend_8)
{
ZSTD_wildcopy(op, match, oend_8 - op);
match += oend_8 - op;
op = oend_8;
}
while (op < oMatchEnd) *op++ = *match++;
}
else
{
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
}
}
return oMatchEnd - ostart;
}
static size_t ZSTD_decompressSequences(
void* ctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize)
{
ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE* const)dst;
BYTE* op = ostart;
BYTE* const oend = ostart + maxDstSize;
size_t errorCode, dumpsLength;
const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize;
int nbSeq;
const BYTE* dumps;
U32* DTableLL = dctx->LLTable;
U32* DTableML = dctx->MLTable;
U32* DTableOffb = dctx->OffTable;
BYTE* const base = (BYTE*) (dctx->base);
/* Build Decoding Tables */
errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
DTableLL, DTableML, DTableOffb,
ip, iend-ip);
if (ZSTD_isError(errorCode)) return errorCode;
ip += errorCode;
/* Regen sequences */
{
seq_t sequence;
seqState_t seqState;
memset(&sequence, 0, sizeof(sequence));
seqState.dumps = dumps;
seqState.dumpsEnd = dumps + dumpsLength;
seqState.prevOffset = 1;
errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
if (ERR_isError(errorCode)) return ERROR(corruption_detected);
FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )
{
size_t oneSeqSize;
nbSeq--;
ZSTD_decodeSequence(&sequence, &seqState);
oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
}
/* check if reached exact end */
if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */
if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */
/* last literal segment */
{
size_t lastLLSize = litEnd - litPtr;
if (litPtr > litEnd) return ERROR(corruption_detected);
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
if (lastLLSize > 0) {
if (op != litPtr) memmove(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
}
return op-ostart;
}
static size_t ZSTD_decompressBlock(
void* ctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize)
{
/* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
/* Decode literals sub-block */
size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);
if (ZSTD_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);
}
static size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const BYTE* iend = ip + srcSize;
BYTE* const ostart = (BYTE* const)dst;
BYTE* op = ostart;
BYTE* const oend = ostart + maxDstSize;
size_t remainingSize = srcSize;
U32 magicNumber;
blockProperties_t blockProperties;
/* Frame Header */
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
/* Loop on each block */
while (1)
{
size_t decodedSize=0;
size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
if (ZSTD_isError(cBlockSize)) return cBlockSize;
ip += ZSTD_blockHeaderSize;
remainingSize -= ZSTD_blockHeaderSize;
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
switch(blockProperties.blockType)
{
case bt_compressed:
decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);
break;
case bt_raw :
decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet supported */
break;
case bt_end :
/* end of frame */
if (remainingSize) return ERROR(srcSize_wrong);
break;
default:
return ERROR(GENERIC); /* impossible */
}
if (cBlockSize == 0) break; /* bt_end */
if (ZSTD_isError(decodedSize)) return decodedSize;
op += decodedSize;
ip += cBlockSize;
remainingSize -= cBlockSize;
}
return op-ostart;
}
static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
ZSTD_DCtx ctx;
ctx.base = dst;
return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
}
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
const BYTE* ip = (const BYTE*)src;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
U32 magicNumber;
blockProperties_t blockProperties;
/* Frame Header */
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTD_magicNumber) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
/* Loop on each block */
while (1)
{
size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTD_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
ip += ZSTD_blockHeaderSize;
remainingSize -= ZSTD_blockHeaderSize;
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
if (cBlockSize == 0) break; /* bt_end */
ip += cBlockSize;
remainingSize -= cBlockSize;
nbBlocks++;
}
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * BLOCKSIZE;
}
/*******************************
* Streaming Decompression API
*******************************/
static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
{
dctx->expected = ZSTD_frameHeaderSize;
dctx->phase = 0;
dctx->previousDstEnd = NULL;
dctx->base = NULL;
return 0;
}
static ZSTD_DCtx* ZSTD_createDCtx(void)
{
ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
if (dctx==NULL) return NULL;
ZSTD_resetDCtx(dctx);
return dctx;
}
static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
{
free(dctx);
return 0;
}
static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
{
return dctx->expected;
}
static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
/* Sanity check */
if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
if (dst != ctx->previousDstEnd) /* not contiguous */
ctx->base = dst;
/* Decompress : frame header */
if (ctx->phase == 0)
{
/* Check frame magic header */
U32 magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
ctx->phase = 1;
ctx->expected = ZSTD_blockHeaderSize;
return 0;
}
/* Decompress : block header */
if (ctx->phase == 1)
{
blockProperties_t bp;
size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
if (ZSTD_isError(blockSize)) return blockSize;
if (bp.blockType == bt_end)
{
ctx->expected = 0;
ctx->phase = 0;
}
else
{
ctx->expected = blockSize;
ctx->bType = bp.blockType;
ctx->phase = 2;
}
return 0;
}
/* Decompress : block content */
{
size_t rSize;
switch(ctx->bType)
{
case bt_compressed:
rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
break;
case bt_raw :
rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet handled */
break;
case bt_end : /* should never happen (filtered at phase 1) */
rSize = 0;
break;
default:
return ERROR(GENERIC);
}
ctx->phase = 1;
ctx->expected = ZSTD_blockHeaderSize;
ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
return rSize;
}
}
/* wrapper layer */
unsigned ZSTDv02_isError(size_t code)
{
return ZSTD_isError(code);
}
size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize)
{
return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
}
ZSTDv02_Dctx* ZSTDv02_createDCtx(void)
{
return (ZSTDv02_Dctx*)ZSTD_createDCtx();
}
size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx)
{
return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
}
size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx)
{
return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
}
size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx)
{
return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
}
size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
}
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V02_H_4174539423
#define ZSTD_V02_H_4174539423
#if defined (__cplusplus)
extern "C" {
#endif
/* *************************************
* Includes
***************************************/
#include <stddef.h> /* size_t */
/* *************************************
* Simple one-step function
***************************************/
/**
ZSTDv02_decompress() : decompress ZSTD frames compliant with v0.2.x format
compressedSize : is the exact source size
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
It must be equal or larger than originalSize, otherwise decompression will fail.
return : the number of bytes decompressed into destination buffer (originalSize)
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
*/
size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize);
/**
ZSTDv02_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.2.x format
srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
cSize (output parameter) : the number of bytes that would be read to decompress this frame
or an error code if it fails (which can be tested using ZSTDv01_isError())
dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
or ZSTD_CONTENTSIZE_ERROR if an error occurs
note : assumes `cSize` and `dBound` are _not_ NULL.
*/
void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
/**
ZSTDv02_isError() : tells if the result of ZSTDv02_decompress() is an error
*/
unsigned ZSTDv02_isError(size_t code);
/* *************************************
* Advanced functions
***************************************/
typedef struct ZSTDv02_Dctx_s ZSTDv02_Dctx;
ZSTDv02_Dctx* ZSTDv02_createDCtx(void);
size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx);
size_t ZSTDv02_decompressDCtx(void* ctx,
void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize);
/* *************************************
* Streaming functions
***************************************/
size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx);
size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx);
size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
/**
Use above functions alternatively.
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
Result is the number of bytes regenerated within 'dst'.
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
*/
/* *************************************
* Prefix - version detection
***************************************/
#define ZSTDv02_magicNumber 0xFD2FB522 /* v0.2 */
#if defined (__cplusplus)
}
#endif
#endif /* ZSTD_V02_H_4174539423 */
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#include <stddef.h> /* size_t, ptrdiff_t */
#include "zstd_v03.h"
#include "../common/error_private.h"
/******************************************
* Compiler-specific
******************************************/
#if defined(_MSC_VER) /* Visual Studio */
# include <stdlib.h> /* _byteswap_ulong */
# include <intrin.h> /* _byteswap_* */
#endif
/* ******************************************************************
mem.h
low-level memory access routines
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef MEM_H_MODULE
#define MEM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/******************************************
* Includes
******************************************/
#include <stddef.h> /* size_t, ptrdiff_t */
#include <string.h> /* memcpy */
/******************************************
* Compiler-specific
******************************************/
#if defined(__GNUC__)
# define MEM_STATIC static __attribute__((unused))
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define MEM_STATIC static inline
#elif defined(_MSC_VER)
# define MEM_STATIC static __inline
#else
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif
/****************************************************************
* Basic Types
*****************************************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# if defined(_AIX)
# include <inttypes.h>
# else
# include <stdint.h> /* intptr_t */
# endif
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef signed short S16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
typedef signed long long S64;
#endif
/****************************************************************
* Memory I/O
*****************************************************************/
/* MEM_FORCE_MEMORY_ACCESS
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets generating assembly depending on alignment.
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define MEM_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
# define MEM_FORCE_MEMORY_ACCESS 1
# endif
#endif
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
MEM_STATIC unsigned MEM_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
/* violates C standard on structure alignment.
Only use if no other choice to achieve best performance on target platform */
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
#else
/* default method, safe and standard.
can sometimes prove slower */
MEM_STATIC U16 MEM_read16(const void* memPtr)
{
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U32 MEM_read32(const void* memPtr)
{
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U64 MEM_read64(const void* memPtr)
{
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
{
memcpy(memPtr, &value, sizeof(value));
}
#endif /* MEM_FORCE_MEMORY_ACCESS */
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read16(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U16)(p[0] + (p[1]<<8));
}
}
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
{
if (MEM_isLittleEndian())
{
MEM_write16(memPtr, val);
}
else
{
BYTE* p = (BYTE*)memPtr;
p[0] = (BYTE)val;
p[1] = (BYTE)(val>>8);
}
}
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
{
return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
}
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read32(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
}
}
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read64(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+ ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
}
}
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
{
if (MEM_32bits())
return (size_t)MEM_readLE32(memPtr);
else
return (size_t)MEM_readLE64(memPtr);
}
#if defined (__cplusplus)
}
#endif
#endif /* MEM_H_MODULE */
/* ******************************************************************
bitstream
Part of NewGen Entropy library
header file (to include)
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef BITSTREAM_H_MODULE
#define BITSTREAM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/*
* This API consists of small unitary functions, which highly benefit from being inlined.
* Since link-time-optimization is not available for all compilers,
* these functions are defined into a .h to be included.
*/
/**********************************************
* bitStream decompression API (read backward)
**********************************************/
typedef struct
{
size_t bitContainer;
unsigned bitsConsumed;
const char* ptr;
const char* start;
} BIT_DStream_t;
typedef enum { BIT_DStream_unfinished = 0,
BIT_DStream_endOfBuffer = 1,
BIT_DStream_completed = 2,
BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
/******************************************
* unsafe API
******************************************/
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
/* faster, but works only if nbBits >= 1 */
/****************************************************************
* Helper functions
****************************************************************/
MEM_STATIC unsigned BIT_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r=0;
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
unsigned r;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
return r;
# endif
}
/**********************************************************
* bitStream decoding
**********************************************************/
/*!BIT_initDStream
* Initialize a BIT_DStream_t.
* @bitD : a pointer to an already allocated BIT_DStream_t structure
* @srcBuffer must point at the beginning of a bitStream
* @srcSize must be the exact size of the bitStream
* @result : size of stream (== srcSize) or an errorCode if a problem is detected
*/
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
if (srcSize >= sizeof(size_t)) /* normal case */
{
U32 contain32;
bitD->start = (const char*)srcBuffer;
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
}
else
{
U32 contain32;
bitD->start = (const char*)srcBuffer;
bitD->ptr = bitD->start;
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
/* fallthrough */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
/* fallthrough */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
/* fallthrough */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
/* fallthrough */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
/* fallthrough */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8;
/* fallthrough */
default:;
}
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
}
return srcSize;
}
MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
{
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
}
/*! BIT_lookBitsFast :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
{
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
}
MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
{
bitD->bitsConsumed += nbBits;
}
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
{
size_t value = BIT_lookBits(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
/*!BIT_readBitsFast :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
{
size_t value = BIT_lookBitsFast(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
return BIT_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
{
bitD->ptr -= bitD->bitsConsumed >> 3;
bitD->bitsConsumed &= 7;
bitD->bitContainer = MEM_readLEST(bitD->ptr);
return BIT_DStream_unfinished;
}
if (bitD->ptr == bitD->start)
{
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
return BIT_DStream_completed;
}
{
U32 nbBytes = bitD->bitsConsumed >> 3;
BIT_DStream_status result = BIT_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start)
{
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
result = BIT_DStream_endOfBuffer;
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes*8;
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
return result;
}
}
/*! BIT_endOfDStream
* @return Tells if DStream has reached its exact end
*/
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
{
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
}
#if defined (__cplusplus)
}
#endif
#endif /* BITSTREAM_H_MODULE */
/* ******************************************************************
Error codes and messages
Copyright (C) 2013-2015, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef ERROR_H_MODULE
#define ERROR_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/******************************************
* Compiler-specific
******************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define ERR_STATIC static inline
#elif defined(_MSC_VER)
# define ERR_STATIC static __inline
#elif defined(__GNUC__)
# define ERR_STATIC static __attribute__((unused))
#else
# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif
/******************************************
* Error Management
******************************************/
#define PREFIX(name) ZSTD_error_##name
#define ERROR(name) (size_t)-PREFIX(name)
#define ERROR_LIST(ITEM) \
ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \
ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \
ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \
ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \
ITEM(PREFIX(maxCode))
#define ERROR_GENERATE_ENUM(ENUM) ENUM,
typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
#define ERROR_CONVERTTOSTRING(STRING) #STRING,
#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR)
static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) };
ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
ERR_STATIC const char* ERR_getErrorName(size_t code)
{
static const char* codeError = "Unspecified error code";
if (ERR_isError(code)) return ERR_strings[-(int)(code)];
return codeError;
}
#if defined (__cplusplus)
}
#endif
#endif /* ERROR_H_MODULE */
/*
Constructor and Destructor of type FSE_CTable
Note that its size depends on 'tableLog' and 'maxSymbolValue' */
typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
/* ******************************************************************
FSE : Finite State Entropy coder
header file for static linking (only)
Copyright (C) 2013-2015, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#if defined (__cplusplus)
extern "C" {
#endif
/******************************************
* Static allocation
******************************************/
/* FSE buffer bounds */
#define FSE_NCOUNTBOUND 512
#define FSE_BLOCKBOUND(size) (size + (size>>7))
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
/******************************************
* FSE advanced API
******************************************/
static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
/* build a fake FSE_DTable, designed to always generate the same symbolValue */
/******************************************
* FSE symbol decompression API
******************************************/
typedef struct
{
size_t state;
const void* table; /* precise table may vary, depending on U16 */
} FSE_DState_t;
static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
/******************************************
* FSE unsafe API
******************************************/
static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
/******************************************
* Implementation of inline functions
******************************************/
/* decompression */
typedef struct {
U16 tableLog;
U16 fastMode;
} FSE_DTableHeader; /* sizeof U32 */
typedef struct
{
unsigned short newState;
unsigned char symbol;
unsigned char nbBits;
} FSE_decode_t; /* size == U32 */
MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
{
FSE_DTableHeader DTableH;
memcpy(&DTableH, dt, sizeof(DTableH));
DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
BIT_reloadDStream(bitD);
DStatePtr->table = dt + 1;
}
MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
{
const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
const U32 nbBits = DInfo.nbBits;
BYTE symbol = DInfo.symbol;
size_t lowBits = BIT_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
{
const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
const U32 nbBits = DInfo.nbBits;
BYTE symbol = DInfo.symbol;
size_t lowBits = BIT_readBitsFast(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
{
return DStatePtr->state == 0;
}
#if defined (__cplusplus)
}
#endif
/* ******************************************************************
Huff0 : Huffman coder, part of New Generation Entropy library
header file for static linking (only)
Copyright (C) 2013-2015, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#if defined (__cplusplus)
extern "C" {
#endif
/******************************************
* Static allocation macros
******************************************/
/* Huff0 buffer bounds */
#define HUF_CTABLEBOUND 129
#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* static allocation of Huff0's DTable */
#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog)) /* nb Cells; use unsigned short for X2, unsigned int for X4 */
#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
/******************************************
* Advanced functions
******************************************/
static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
#if defined (__cplusplus)
}
#endif
/*
zstd - standard compression library
Header File
Copyright (C) 2014-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
- ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
*/
#if defined (__cplusplus)
extern "C" {
#endif
/* *************************************
* Includes
***************************************/
#include <stddef.h> /* size_t */
/* *************************************
* Version
***************************************/
#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */
#define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */
#define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
/* *************************************
* Advanced functions
***************************************/
typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */
#if defined (__cplusplus)
}
#endif
/*
zstd - standard compression library
Header File for static linking only
Copyright (C) 2014-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
- ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
*/
/* The objects defined into this file should be considered experimental.
* They are not labelled stable, as their prototype may change in the future.
* You can use them for tests, provide feedback, or if you can endure risk of future changes.
*/
#if defined (__cplusplus)
extern "C" {
#endif
/* *************************************
* Streaming functions
***************************************/
typedef struct ZSTD_DCtx_s ZSTD_DCtx;
/*
Use above functions alternatively.
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
Result is the number of bytes regenerated within 'dst'.
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
*/
/* *************************************
* Prefix - version detection
***************************************/
#define ZSTD_magicNumber 0xFD2FB523 /* v0.3 */
#if defined (__cplusplus)
}
#endif
/* ******************************************************************
FSE : Finite State Entropy coder
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef FSE_COMMONDEFS_ONLY
/****************************************************************
* Tuning parameters
****************************************************************/
/* MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
#define FSE_MAX_MEMORY_USAGE 14
#define FSE_DEFAULT_MEMORY_USAGE 13
/* FSE_MAX_SYMBOL_VALUE :
* Maximum symbol value authorized.
* Required for proper stack allocation */
#define FSE_MAX_SYMBOL_VALUE 255
/****************************************************************
* template functions type & suffix
****************************************************************/
#define FSE_FUNCTION_TYPE BYTE
#define FSE_FUNCTION_EXTENSION
/****************************************************************
* Byte symbol type
****************************************************************/
#endif /* !FSE_COMMONDEFS_ONLY */
/****************************************************************
* Compiler specifics
****************************************************************/
#ifdef _MSC_VER /* Visual Studio */
# define FORCE_INLINE static __forceinline
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
#else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline))
# else
# define FORCE_INLINE static inline
# endif
# else
# define FORCE_INLINE static
# endif /* __STDC_VERSION__ */
#endif
/****************************************************************
* Includes
****************************************************************/
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
/****************************************************************
* Constants
*****************************************************************/
#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
#define FSE_MIN_TABLELOG 5
#define FSE_TABLELOG_ABSOLUTE_MAX 15
#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
#endif
/****************************************************************
* Error Management
****************************************************************/
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/****************************************************************
* Complex types
****************************************************************/
typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
/****************************************************************
* Templates
****************************************************************/
/*
designed to be included
for type-specific functions (template emulation in C)
Objective is to write these functions only once, for improved maintenance
*/
/* safety checks */
#ifndef FSE_FUNCTION_EXTENSION
# error "FSE_FUNCTION_EXTENSION must be defined"
#endif
#ifndef FSE_FUNCTION_TYPE
# error "FSE_FUNCTION_TYPE must be defined"
#endif
/* Function names */
#define FSE_CAT(X,Y) X##Y
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
/* Function templates */
#define FSE_DECODE_TYPE FSE_decode_t
static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
static size_t FSE_buildDTable
(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
void* ptr = dt+1;
FSE_DTableHeader DTableH;
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr;
const U32 tableSize = 1 << tableLog;
const U32 tableMask = tableSize-1;
const U32 step = FSE_tableStep(tableSize);
U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
U32 position = 0;
U32 highThreshold = tableSize-1;
const S16 largeLimit= (S16)(1 << (tableLog-1));
U32 noLarge = 1;
U32 s;
/* Sanity Checks */
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
/* Init, lay down lowprob symbols */
DTableH.tableLog = (U16)tableLog;
for (s=0; s<=maxSymbolValue; s++)
{
if (normalizedCounter[s]==-1)
{
tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
symbolNext[s] = 1;
}
else
{
if (normalizedCounter[s] >= largeLimit) noLarge=0;
symbolNext[s] = normalizedCounter[s];
}
}
/* Spread symbols */
for (s=0; s<=maxSymbolValue; s++)
{
int i;
for (i=0; i<normalizedCounter[s]; i++)
{
tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
}
}
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
/* Build Decoding table */
{
U32 i;
for (i=0; i<tableSize; i++)
{
FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
U16 nextState = symbolNext[symbol]++;
tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
}
}
DTableH.fastMode = (U16)noLarge;
memcpy(dt, &DTableH, sizeof(DTableH));
return 0;
}
#ifndef FSE_COMMONDEFS_ONLY
/******************************************
* FSE helper functions
******************************************/
static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
/****************************************************************
* FSE NCount encoding-decoding
****************************************************************/
static short FSE_abs(short a)
{
return a<0 ? -a : a;
}
static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
const BYTE* const istart = (const BYTE*) headerBuffer;
const BYTE* const iend = istart + hbSize;
const BYTE* ip = istart;
int nbBits;
int remaining;
int threshold;
U32 bitStream;
int bitCount;
unsigned charnum = 0;
int previous0 = 0;
if (hbSize < 4) return ERROR(srcSize_wrong);
bitStream = MEM_readLE32(ip);
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
bitStream >>= 4;
bitCount = 4;
*tableLogPtr = nbBits;
remaining = (1<<nbBits)+1;
threshold = 1<<nbBits;
nbBits++;
while ((remaining>1) && (charnum<=*maxSVPtr))
{
if (previous0)
{
unsigned n0 = charnum;
while ((bitStream & 0xFFFF) == 0xFFFF)
{
n0+=24;
if (ip < iend-5)
{
ip+=2;
bitStream = MEM_readLE32(ip) >> bitCount;
}
else
{
bitStream >>= 16;
bitCount+=16;
}
}
while ((bitStream & 3) == 3)
{
n0+=3;
bitStream>>=2;
bitCount+=2;
}
n0 += bitStream & 3;
bitCount += 2;
if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
while (charnum < n0) normalizedCounter[charnum++] = 0;
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
{
ip += bitCount>>3;
bitCount &= 7;
bitStream = MEM_readLE32(ip) >> bitCount;
}
else
bitStream >>= 2;
}
{
const short max = (short)((2*threshold-1)-remaining);
short count;
if ((bitStream & (threshold-1)) < (U32)max)
{
count = (short)(bitStream & (threshold-1));
bitCount += nbBits-1;
}
else
{
count = (short)(bitStream & (2*threshold-1));
if (count >= threshold) count -= max;
bitCount += nbBits;
}
count--; /* extra accuracy */
remaining -= FSE_abs(count);
normalizedCounter[charnum++] = count;
previous0 = !count;
while (remaining < threshold)
{
nbBits--;
threshold >>= 1;
}
{
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
{
ip += bitCount>>3;
bitCount &= 7;
}
else
{
bitCount -= (int)(8 * (iend - 4 - ip));
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
}
}
}
if (remaining != 1) return ERROR(GENERIC);
*maxSVPtr = charnum-1;
ip += (bitCount+7)>>3;
if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
return ip-istart;
}
/*********************************************************
* Decompression (Byte symbols)
*********************************************************/
static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1;
DTableH->tableLog = 0;
DTableH->fastMode = 0;
cell->newState = 0;
cell->symbol = symbolValue;
cell->nbBits = 0;
return 0;
}
static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1;
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSymbolValue = tableMask;
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* Build Decoding Table */
DTableH->tableLog = (U16)nbBits;
DTableH->fastMode = 1;
for (s=0; s<=maxSymbolValue; s++)
{
dinfo[s].newState = 0;
dinfo[s].symbol = (BYTE)s;
dinfo[s].nbBits = (BYTE)nbBits;
}
return 0;
}
FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt, const unsigned fast)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const omax = op + maxDstSize;
BYTE* const olimit = omax-3;
BIT_DStream_t bitD;
FSE_DState_t state1;
FSE_DState_t state2;
size_t errorCode;
/* Init */
errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
if (FSE_isError(errorCode)) return errorCode;
FSE_initDState(&state1, &bitD, dt);
FSE_initDState(&state2, &bitD, dt);
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
/* 4 symbols per loop */
for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
{
op[0] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BIT_reloadDStream(&bitD);
op[1] = FSE_GETSYMBOL(&state2);
if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
{ if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
op[2] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BIT_reloadDStream(&bitD);
op[3] = FSE_GETSYMBOL(&state2);
}
/* tail */
/* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
while (1)
{
if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
break;
*op++ = FSE_GETSYMBOL(&state1);
if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
break;
*op++ = FSE_GETSYMBOL(&state2);
}
/* end ? */
if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
return op-ostart;
if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */
return ERROR(corruption_detected);
}
static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt)
{
FSE_DTableHeader DTableH;
memcpy(&DTableH, dt, sizeof(DTableH));
/* select fast mode (static) */
if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
}
static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* const istart = (const BYTE*)cSrc;
const BYTE* ip = istart;
short counting[FSE_MAX_SYMBOL_VALUE+1];
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
unsigned tableLog;
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
size_t errorCode;
if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
/* normal FSE decoding mode */
errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
if (FSE_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
ip += errorCode;
cSrcSize -= errorCode;
errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
if (FSE_isError(errorCode)) return errorCode;
/* always return, even if it is an error code */
return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
}
#endif /* FSE_COMMONDEFS_ONLY */
/* ******************************************************************
Huff0 : Huffman coder, part of New Generation Entropy library
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/****************************************************************
* Compiler specifics
****************************************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
/* inline is defined */
#elif defined(_MSC_VER)
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# define inline __inline
#else
# define inline /* disable inline */
#endif
/****************************************************************
* Includes
****************************************************************/
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
/****************************************************************
* Error Management
****************************************************************/
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/******************************************
* Helper functions
******************************************/
static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
#define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
#define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */
#define HUF_MAX_SYMBOL_VALUE 255
#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
# error "HUF_MAX_TABLELOG is too large !"
#endif
/*********************************************************
* Huff0 : Huffman block decompression
*********************************************************/
typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
/*! HUF_readStats
Read compact Huffman tree, saved by HUF_writeCTable
@huffWeight : destination buffer
@return : size read from `src`
*/
static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize)
{
U32 weightTotal;
U32 tableLog;
const BYTE* ip = (const BYTE*) src;
size_t iSize;
size_t oSize;
U32 n;
if (!srcSize) return ERROR(srcSize_wrong);
iSize = ip[0];
//memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
if (iSize >= 128) /* special header */
{
if (iSize >= (242)) /* RLE */
{
static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
oSize = l[iSize-242];
memset(huffWeight, 1, hwSize);
iSize = 0;
}
else /* Incompressible */
{
oSize = iSize - 127;
iSize = ((oSize+1)/2);
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
if (oSize >= hwSize) return ERROR(corruption_detected);
ip += 1;
for (n=0; n<oSize; n+=2)
{
huffWeight[n] = ip[n/2] >> 4;
huffWeight[n+1] = ip[n/2] & 15;
}
}
}
else /* header compressed with FSE (normal case) */
{
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
if (FSE_isError(oSize)) return oSize;
}
/* collect weight stats */
memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
weightTotal = 0;
for (n=0; n<oSize; n++)
{
if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
rankStats[huffWeight[n]]++;
weightTotal += (1 << huffWeight[n]) >> 1;
}
if (weightTotal == 0) return ERROR(corruption_detected);
/* get last non-null symbol weight (implied, total must be 2^n) */
tableLog = BIT_highbit32(weightTotal) + 1;
if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
{
U32 total = 1 << tableLog;
U32 rest = total - weightTotal;
U32 verif = 1 << BIT_highbit32(rest);
U32 lastWeight = BIT_highbit32(rest) + 1;
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
huffWeight[oSize] = (BYTE)lastWeight;
rankStats[lastWeight]++;
}
/* check tree construction validity */
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
/* results */
*nbSymbolsPtr = (U32)(oSize+1);
*tableLogPtr = tableLog;
return iSize+1;
}
/**************************/
/* single-symbol decoding */
/**************************/
static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
{
BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
const BYTE* ip = (const BYTE*) src;
size_t iSize = ip[0];
U32 nbSymbols = 0;
U32 n;
U32 nextRankStart;
void* ptr = DTable+1;
HUF_DEltX2* const dt = (HUF_DEltX2*)(ptr);
HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
//memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
/* Prepare ranks */
nextRankStart = 0;
for (n=1; n<=tableLog; n++)
{
U32 current = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = current;
}
/* fill DTable */
for (n=0; n<nbSymbols; n++)
{
const U32 w = huffWeight[n];
const U32 length = (1 << w) >> 1;
U32 i;
HUF_DEltX2 D;
D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
for (i = rankVal[w]; i < rankVal[w] + length; i++)
dt[i] = D;
rankVal[w] += length;
}
return iSize;
}
static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
{
const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
const BYTE c = dt[val].byte;
BIT_skipBits(Dstream, dt[val].nbBits);
return c;
}
#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
*ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 4 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
{
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
}
/* closer to the end */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
/* no more data to retrieve from bitstream, hence no need to reload */
while (p < pEnd)
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
return pEnd-pStart;
}
static size_t HUF_decompress4X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U16* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* ptr = DTable;
const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1;
const U32 dtLog = DTable[0];
size_t errorCode;
/* Init */
BIT_DStream_t bitD1;
BIT_DStream_t bitD2;
BIT_DStream_t bitD3;
BIT_DStream_t bitD4;
const size_t length1 = MEM_readLE16(istart);
const size_t length2 = MEM_readLE16(istart+2);
const size_t length3 = MEM_readLE16(istart+4);
size_t length4;
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
length4 = cSrcSize - (length1 + length2 + length3 + 6);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
errorCode = BIT_initDStream(&bitD1, istart1, length1);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD2, istart2, length2);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD3, istart3, length3);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD4, istart4, length4);
if (HUF_isError(errorCode)) return errorCode;
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
{
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t errorCode;
errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
if (HUF_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
ip += errorCode;
cSrcSize -= errorCode;
return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
/***************************/
/* double-symbols decoding */
/***************************/
static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
const U32* rankValOrigin, const int minWeight,
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
U32 nbBitsBaseline, U16 baseSeq)
{
HUF_DEltX4 DElt;
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
U32 s;
/* get pre-calculated rankVal */
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill skipped values */
if (minWeight>1)
{
U32 i, skipSize = rankVal[minWeight];
MEM_writeLE16(&(DElt.sequence), baseSeq);
DElt.nbBits = (BYTE)(consumed);
DElt.length = 1;
for (i = 0; i < skipSize; i++)
DTable[i] = DElt;
}
/* fill DTable */
for (s=0; s<sortedListSize; s++) /* note : sortedSymbols already skipped */
{
const U32 symbol = sortedSymbols[s].symbol;
const U32 weight = sortedSymbols[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 length = 1 << (sizeLog-nbBits);
const U32 start = rankVal[weight];
U32 i = start;
const U32 end = start + length;
MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
DElt.nbBits = (BYTE)(nbBits + consumed);
DElt.length = 2;
do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
rankVal[weight] += length;
}
}
typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList, const U32 sortedListSize,
const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
const U32 nbBitsBaseline)
{
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
const U32 minBits = nbBitsBaseline - maxWeight;
U32 s;
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill DTable */
for (s=0; s<sortedListSize; s++)
{
const U16 symbol = sortedList[s].symbol;
const U32 weight = sortedList[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 start = rankVal[weight];
const U32 length = 1 << (targetLog-nbBits);
if (targetLog-nbBits >= minBits) /* enough room for a second symbol */
{
U32 sortedRank;
int minWeight = nbBits + scaleLog;
if (minWeight < 1) minWeight = 1;
sortedRank = rankStart[minWeight];
HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
rankValOrigin[nbBits], minWeight,
sortedList+sortedRank, sortedListSize-sortedRank,
nbBitsBaseline, symbol);
}
else
{
U32 i;
const U32 end = start + length;
HUF_DEltX4 DElt;
MEM_writeLE16(&(DElt.sequence), symbol);
DElt.nbBits = (BYTE)(nbBits);
DElt.length = 1;
for (i = start; i < end; i++)
DTable[i] = DElt;
}
rankVal[weight] += length;
}
}
static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
{
BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
U32* const rankStart = rankStart0+1;
rankVal_t rankVal;
U32 tableLog, maxW, sizeOfSort, nbSymbols;
const U32 memLog = DTable[0];
const BYTE* ip = (const BYTE*) src;
size_t iSize = ip[0];
void* ptr = DTable;
HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1;
HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */
if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
//memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
/* find maxWeight */
for (maxW = tableLog; rankStats[maxW]==0; maxW--)
{ if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */
/* Get start index of each weight */
{
U32 w, nextRankStart = 0;
for (w=1; w<=maxW; w++)
{
U32 current = nextRankStart;
nextRankStart += rankStats[w];
rankStart[w] = current;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
sizeOfSort = nextRankStart;
}
/* sort symbols by weight */
{
U32 s;
for (s=0; s<nbSymbols; s++)
{
U32 w = weightList[s];
U32 r = rankStart[w]++;
sortedSymbol[r].symbol = (BYTE)s;
sortedSymbol[r].weight = (BYTE)w;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
/* Build rankVal */
{
const U32 minBits = tableLog+1 - maxW;
U32 nextRankVal = 0;
U32 w, consumed;
const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
U32* rankVal0 = rankVal[0];
for (w=1; w<=maxW; w++)
{
U32 current = nextRankVal;
nextRankVal += rankStats[w] << (w+rescale);
rankVal0[w] = current;
}
for (consumed = minBits; consumed <= memLog - minBits; consumed++)
{
U32* rankValPtr = rankVal[consumed];
for (w = 1; w <= maxW; w++)
{
rankValPtr[w] = rankVal0[w] >> consumed;
}
}
}
HUF_fillDTableX4(dt, memLog,
sortedSymbol, sizeOfSort,
rankStart0, rankVal, maxW,
tableLog+1);
return iSize;
}
static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
{
const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 2);
BIT_skipBits(DStream, dt[val].nbBits);
return dt[val].length;
}
static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
{
const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 1);
if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
else
{
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
{
BIT_skipBits(DStream, dt[val].nbBits);
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
}
}
return 1;
}
#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 8 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
{
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
}
/* closer to the end */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
while (p <= pEnd-2)
HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
if (p < pEnd)
p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
return p-pStart;
}
static size_t HUF_decompress4X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U32* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* ptr = DTable;
const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1;
const U32 dtLog = DTable[0];
size_t errorCode;
/* Init */
BIT_DStream_t bitD1;
BIT_DStream_t bitD2;
BIT_DStream_t bitD3;
BIT_DStream_t bitD4;
const size_t length1 = MEM_readLE16(istart);
const size_t length2 = MEM_readLE16(istart+2);
const size_t length3 = MEM_readLE16(istart+4);
size_t length4;
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
length4 = cSrcSize - (length1 + length2 + length3 + 6);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
errorCode = BIT_initDStream(&bitD1, istart1, length1);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD2, istart2, length2);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD3, istart3, length3);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD4, istart4, length4);
if (HUF_isError(errorCode)) return errorCode;
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
{
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize;
cSrcSize -= hSize;
return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
/**********************************/
/* Generic decompression selector */
/**********************************/
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
{
/* single, double, quad */
{{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
{{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
{{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
{{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
{{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
{{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
{{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
{{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
{{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
{{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
{{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
{{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
{{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
{{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
{{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
{{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
};
typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL };
/* estimate decompression time */
U32 Q;
const U32 D256 = (U32)(dstSize >> 8);
U32 Dtime[3];
U32 algoNb = 0;
int n;
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
/* decoder timing evaluation */
Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
for (n=0; n<3; n++)
Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
if (Dtime[1] < Dtime[0]) algoNb = 1;
return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
//return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */
//return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */
//return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */
}
/*
zstd - standard compression library
Copyright (C) 2014-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
- ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
*/
/* ***************************************************************
* Tuning parameters
*****************************************************************/
/*!
* MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
*/
#define ZSTD_MEMORY_USAGE 17
/*!
* HEAPMODE :
* Select how default compression functions will allocate memory for their hash table,
* in memory stack (0, fastest), or in memory heap (1, requires malloc())
* Note that compression context is fairly large, as a consequence heap memory is recommended.
*/
#ifndef ZSTD_HEAPMODE
# define ZSTD_HEAPMODE 1
#endif /* ZSTD_HEAPMODE */
/*!
* LEGACY_SUPPORT :
* decompressor can decode older formats (starting from Zstd 0.1+)
*/
#ifndef ZSTD_LEGACY_SUPPORT
# define ZSTD_LEGACY_SUPPORT 1
#endif
/* *******************************************************
* Includes
*********************************************************/
#include <stdlib.h> /* calloc */
#include <string.h> /* memcpy, memmove */
#include <stdio.h> /* debug : printf */
/* *******************************************************
* Compiler specifics
*********************************************************/
#ifdef __AVX2__
# include <immintrin.h> /* AVX2 intrinsics */
#endif
#ifdef _MSC_VER /* Visual Studio */
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
#else
# define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#endif
/* *******************************************************
* Constants
*********************************************************/
#define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
#define HASH_TABLESIZE (1 << HASH_LOG)
#define HASH_MASK (HASH_TABLESIZE - 1)
#define KNUTH 2654435761
#define BIT7 128
#define BIT6 64
#define BIT5 32
#define BIT4 16
#define BIT1 2
#define BIT0 1
#define KB *(1 <<10)
#define MB *(1 <<20)
#define GB *(1U<<30)
#define BLOCKSIZE (128 KB) /* define, for static allocation */
#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
#define IS_RAW BIT0
#define IS_RLE BIT1
#define WORKPLACESIZE (BLOCKSIZE*3)
#define MINMATCH 4
#define MLbits 7
#define LLbits 6
#define Offbits 5
#define MaxML ((1<<MLbits )-1)
#define MaxLL ((1<<LLbits )-1)
#define MaxOff 31
#define LitFSELog 11
#define MLFSELog 10
#define LLFSELog 10
#define OffFSELog 9
#define MAX(a,b) ((a)<(b)?(b):(a))
#define MaxSeq MAX(MaxLL, MaxML)
#define LITERAL_NOENTROPY 63
#define COMMAND_NOENTROPY 7 /* to remove */
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
static const size_t ZSTD_blockHeaderSize = 3;
static const size_t ZSTD_frameHeaderSize = 4;
/* *******************************************************
* Memory operations
**********************************************************/
static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
{
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
do COPY8(op, ip) while (op < oend);
}
/* **************************************
* Local structures
****************************************/
typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
typedef struct
{
blockType_t blockType;
U32 origSize;
} blockProperties_t;
typedef struct {
void* buffer;
U32* offsetStart;
U32* offset;
BYTE* offCodeStart;
BYTE* offCode;
BYTE* litStart;
BYTE* lit;
BYTE* litLengthStart;
BYTE* litLength;
BYTE* matchLengthStart;
BYTE* matchLength;
BYTE* dumpsStart;
BYTE* dumps;
} seqStore_t;
/* *************************************
* Error Management
***************************************/
/*! ZSTD_isError
* tells if a return value is an error code */
static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
/* *************************************************************
* Decompression section
***************************************************************/
struct ZSTD_DCtx_s
{
U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
void* previousDstEnd;
void* base;
size_t expected;
blockType_t bType;
U32 phase;
const BYTE* litPtr;
size_t litSize;
BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
}; /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
{
const BYTE* const in = (const BYTE* const)src;
BYTE headerFlags;
U32 cSize;
if (srcSize < 3) return ERROR(srcSize_wrong);
headerFlags = *in;
cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
bpPtr->blockType = (blockType_t)(headerFlags >> 6);
bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
if (bpPtr->blockType == bt_end) return 0;
if (bpPtr->blockType == bt_rle) return 1;
return cSize;
}
static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
if (srcSize > 0) {
memcpy(dst, src, srcSize);
}
return srcSize;
}
/** ZSTD_decompressLiterals
@return : nb of bytes read from src, or an error code*/
static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
*maxDstSizePtr = litSize;
return litCSize + 5;
}
/** ZSTD_decodeLiteralsBlock
@return : nb of bytes read from src (< srcSize )*/
static size_t ZSTD_decodeLiteralsBlock(void* ctx,
const void* src, size_t srcSize)
{
ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
const BYTE* const istart = (const BYTE* const)src;
/* any compressed block with literals segment must be at least this size */
if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
switch(*istart & 3)
{
default:
case 0:
{
size_t litSize = BLOCKSIZE;
const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, 8);
return readSize; /* works if it's an error too */
}
case IS_RAW:
{
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
{
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
if (litSize > srcSize-3) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart, litSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, 8);
return litSize+3;
}
/* direct reference into compressed stream */
dctx->litPtr = istart+3;
dctx->litSize = litSize;
return litSize+3;
}
case IS_RLE:
{
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
memset(dctx->litBuffer, istart[3], litSize + 8);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return 4;
}
}
}
static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
const void* src, size_t srcSize)
{
const BYTE* const istart = (const BYTE* const)src;
const BYTE* ip = istart;
const BYTE* const iend = istart + srcSize;
U32 LLtype, Offtype, MLtype;
U32 LLlog, Offlog, MLlog;
size_t dumpsLength;
/* check */
if (srcSize < 5) return ERROR(srcSize_wrong);
/* SeqHead */
*nbSeq = MEM_readLE16(ip); ip+=2;
LLtype = *ip >> 6;
Offtype = (*ip >> 4) & 3;
MLtype = (*ip >> 2) & 3;
if (*ip & 2)
{
dumpsLength = ip[2];
dumpsLength += ip[1] << 8;
ip += 3;
}
else
{
dumpsLength = ip[1];
dumpsLength += (ip[0] & 1) << 8;
ip += 2;
}
*dumpsPtr = ip;
ip += dumpsLength;
*dumpsLengthPtr = dumpsLength;
/* check */
if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
/* sequences */
{
S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */
size_t headerSize;
/* Build DTables */
switch(LLtype)
{
case bt_rle :
LLlog = 0;
FSE_buildDTable_rle(DTableLL, *ip++); break;
case bt_raw :
LLlog = LLbits;
FSE_buildDTable_raw(DTableLL, LLbits); break;
default :
{ U32 max = MaxLL;
headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (LLlog > LLFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableLL, norm, max, LLlog);
} }
switch(Offtype)
{
case bt_rle :
Offlog = 0;
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
break;
case bt_raw :
Offlog = Offbits;
FSE_buildDTable_raw(DTableOffb, Offbits); break;
default :
{ U32 max = MaxOff;
headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (Offlog > OffFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableOffb, norm, max, Offlog);
} }
switch(MLtype)
{
case bt_rle :
MLlog = 0;
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
FSE_buildDTable_rle(DTableML, *ip++); break;
case bt_raw :
MLlog = MLbits;
FSE_buildDTable_raw(DTableML, MLbits); break;
default :
{ U32 max = MaxML;
headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (MLlog > MLFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableML, norm, max, MLlog);
} } }
return ip-istart;
}
typedef struct {
size_t litLength;
size_t offset;
size_t matchLength;
} seq_t;
typedef struct {
BIT_DStream_t DStream;
FSE_DState_t stateLL;
FSE_DState_t stateOffb;
FSE_DState_t stateML;
size_t prevOffset;
const BYTE* dumps;
const BYTE* dumpsEnd;
} seqState_t;
static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
{
size_t litLength;
size_t prevOffset;
size_t offset;
size_t matchLength;
const BYTE* dumps = seqState->dumps;
const BYTE* const de = seqState->dumpsEnd;
/* Literal length */
litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
prevOffset = litLength ? seq->offset : seqState->prevOffset;
seqState->prevOffset = seq->offset;
if (litLength == MaxLL)
{
const U32 add = dumps<de ? *dumps++ : 0;
if (add < 255) litLength += add;
else if (dumps + 3 <= de)
{
litLength = MEM_readLE24(dumps);
dumps += 3;
}
if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
/* Offset */
{
static const size_t offsetPrefix[MaxOff+1] = { /* note : size_t faster than U32 */
1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
U32 offsetCode, nbBits;
offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */
if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
nbBits = offsetCode - 1;
if (offsetCode==0) nbBits = 0; /* cmove */
offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
if (offsetCode==0) offset = prevOffset; /* cmove */
}
/* MatchLength */
matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
if (matchLength == MaxML)
{
const U32 add = dumps<de ? *dumps++ : 0;
if (add < 255) matchLength += add;
else if (dumps + 3 <= de)
{
matchLength = MEM_readLE24(dumps);
dumps += 3;
}
if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
matchLength += MINMATCH;
/* save result */
seq->litLength = litLength;
seq->offset = offset;
seq->matchLength = matchLength;
seqState->dumps = dumps;
}
static size_t ZSTD_execSequence(BYTE* op,
seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
BYTE* const base, BYTE* const oend)
{
static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */
const BYTE* const ostart = op;
BYTE* const oLitEnd = op + sequence.litLength;
BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */
BYTE* const oend_8 = oend-8;
const BYTE* const litEnd = *litPtr + sequence.litLength;
/* checks */
if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */
/* copy Literals */
ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
op = oLitEnd;
*litPtr = litEnd; /* update for next sequence */
/* copy Match */
{
const BYTE* match = op - sequence.offset;
/* check */
if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */
//if (match > op) return ERROR(corruption_detected); /* address space overflow test (is clang optimizer removing this test ?) */
if (match < base) return ERROR(corruption_detected);
/* close range match, overlap */
if (sequence.offset < 8)
{
const int dec64 = dec64table[sequence.offset];
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
match += dec32table[sequence.offset];
ZSTD_copy4(op+4, match);
match -= dec64;
}
else
{
ZSTD_copy8(op, match);
}
op += 8; match += 8;
if (oMatchEnd > oend-(16-MINMATCH))
{
if (op < oend_8)
{
ZSTD_wildcopy(op, match, oend_8 - op);
match += oend_8 - op;
op = oend_8;
}
while (op < oMatchEnd) *op++ = *match++;
}
else
{
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
}
}
return oMatchEnd - ostart;
}
static size_t ZSTD_decompressSequences(
void* ctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize)
{
ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE* const)dst;
BYTE* op = ostart;
BYTE* const oend = ostart + maxDstSize;
size_t errorCode, dumpsLength;
const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize;
int nbSeq;
const BYTE* dumps;
U32* DTableLL = dctx->LLTable;
U32* DTableML = dctx->MLTable;
U32* DTableOffb = dctx->OffTable;
BYTE* const base = (BYTE*) (dctx->base);
/* Build Decoding Tables */
errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
DTableLL, DTableML, DTableOffb,
ip, iend-ip);
if (ZSTD_isError(errorCode)) return errorCode;
ip += errorCode;
/* Regen sequences */
{
seq_t sequence;
seqState_t seqState;
memset(&sequence, 0, sizeof(sequence));
seqState.dumps = dumps;
seqState.dumpsEnd = dumps + dumpsLength;
seqState.prevOffset = sequence.offset = 4;
errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
if (ERR_isError(errorCode)) return ERROR(corruption_detected);
FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )
{
size_t oneSeqSize;
nbSeq--;
ZSTD_decodeSequence(&sequence, &seqState);
oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
}
/* check if reached exact end */
if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */
if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */
/* last literal segment */
{
size_t lastLLSize = litEnd - litPtr;
if (litPtr > litEnd) return ERROR(corruption_detected);
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
if (lastLLSize > 0) {
if (op != litPtr) memmove(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
}
return op-ostart;
}
static size_t ZSTD_decompressBlock(
void* ctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize)
{
/* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
/* Decode literals sub-block */
size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);
if (ZSTD_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);
}
static size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const BYTE* iend = ip + srcSize;
BYTE* const ostart = (BYTE* const)dst;
BYTE* op = ostart;
BYTE* const oend = ostart + maxDstSize;
size_t remainingSize = srcSize;
U32 magicNumber;
blockProperties_t blockProperties;
/* Frame Header */
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
/* Loop on each block */
while (1)
{
size_t decodedSize=0;
size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
if (ZSTD_isError(cBlockSize)) return cBlockSize;
ip += ZSTD_blockHeaderSize;
remainingSize -= ZSTD_blockHeaderSize;
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
switch(blockProperties.blockType)
{
case bt_compressed:
decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);
break;
case bt_raw :
decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet supported */
break;
case bt_end :
/* end of frame */
if (remainingSize) return ERROR(srcSize_wrong);
break;
default:
return ERROR(GENERIC); /* impossible */
}
if (cBlockSize == 0) break; /* bt_end */
if (ZSTD_isError(decodedSize)) return decodedSize;
op += decodedSize;
ip += cBlockSize;
remainingSize -= cBlockSize;
}
return op-ostart;
}
static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
ZSTD_DCtx ctx;
ctx.base = dst;
return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
}
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
MEM_STATIC void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
const BYTE* ip = (const BYTE*)src;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
U32 magicNumber;
blockProperties_t blockProperties;
/* Frame Header */
if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTD_magicNumber) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
/* Loop on each block */
while (1)
{
size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTD_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
ip += ZSTD_blockHeaderSize;
remainingSize -= ZSTD_blockHeaderSize;
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
if (cBlockSize == 0) break; /* bt_end */
ip += cBlockSize;
remainingSize -= cBlockSize;
nbBlocks++;
}
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * BLOCKSIZE;
}
/*******************************
* Streaming Decompression API
*******************************/
static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
{
dctx->expected = ZSTD_frameHeaderSize;
dctx->phase = 0;
dctx->previousDstEnd = NULL;
dctx->base = NULL;
return 0;
}
static ZSTD_DCtx* ZSTD_createDCtx(void)
{
ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
if (dctx==NULL) return NULL;
ZSTD_resetDCtx(dctx);
return dctx;
}
static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
{
free(dctx);
return 0;
}
static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
{
return dctx->expected;
}
static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
/* Sanity check */
if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
if (dst != ctx->previousDstEnd) /* not contiguous */
ctx->base = dst;
/* Decompress : frame header */
if (ctx->phase == 0)
{
/* Check frame magic header */
U32 magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
ctx->phase = 1;
ctx->expected = ZSTD_blockHeaderSize;
return 0;
}
/* Decompress : block header */
if (ctx->phase == 1)
{
blockProperties_t bp;
size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
if (ZSTD_isError(blockSize)) return blockSize;
if (bp.blockType == bt_end)
{
ctx->expected = 0;
ctx->phase = 0;
}
else
{
ctx->expected = blockSize;
ctx->bType = bp.blockType;
ctx->phase = 2;
}
return 0;
}
/* Decompress : block content */
{
size_t rSize;
switch(ctx->bType)
{
case bt_compressed:
rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
break;
case bt_raw :
rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet handled */
break;
case bt_end : /* should never happen (filtered at phase 1) */
rSize = 0;
break;
default:
return ERROR(GENERIC);
}
ctx->phase = 1;
ctx->expected = ZSTD_blockHeaderSize;
ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
return rSize;
}
}
/* wrapper layer */
unsigned ZSTDv03_isError(size_t code)
{
return ZSTD_isError(code);
}
size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize)
{
return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
}
ZSTDv03_Dctx* ZSTDv03_createDCtx(void)
{
return (ZSTDv03_Dctx*)ZSTD_createDCtx();
}
size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx)
{
return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
}
size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx)
{
return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
}
size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx)
{
return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
}
size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
}
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V03_H_298734209782
#define ZSTD_V03_H_298734209782
#if defined (__cplusplus)
extern "C" {
#endif
/* *************************************
* Includes
***************************************/
#include <stddef.h> /* size_t */
/* *************************************
* Simple one-step function
***************************************/
/**
ZSTDv03_decompress() : decompress ZSTD frames compliant with v0.3.x format
compressedSize : is the exact source size
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
It must be equal or larger than originalSize, otherwise decompression will fail.
return : the number of bytes decompressed into destination buffer (originalSize)
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
*/
size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize);
/**
ZSTDv03_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.3.x format
srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
cSize (output parameter) : the number of bytes that would be read to decompress this frame
or an error code if it fails (which can be tested using ZSTDv01_isError())
dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
or ZSTD_CONTENTSIZE_ERROR if an error occurs
note : assumes `cSize` and `dBound` are _not_ NULL.
*/
void ZSTDv03_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
/**
ZSTDv03_isError() : tells if the result of ZSTDv03_decompress() is an error
*/
unsigned ZSTDv03_isError(size_t code);
/* *************************************
* Advanced functions
***************************************/
typedef struct ZSTDv03_Dctx_s ZSTDv03_Dctx;
ZSTDv03_Dctx* ZSTDv03_createDCtx(void);
size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx);
size_t ZSTDv03_decompressDCtx(void* ctx,
void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize);
/* *************************************
* Streaming functions
***************************************/
size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx);
size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx);
size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
/**
Use above functions alternatively.
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
Result is the number of bytes regenerated within 'dst'.
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
*/
/* *************************************
* Prefix - version detection
***************************************/
#define ZSTDv03_magicNumber 0xFD2FB523 /* v0.3 */
#if defined (__cplusplus)
}
#endif
#endif /* ZSTD_V03_H_298734209782 */
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/******************************************
* Includes
******************************************/
#include <stddef.h> /* size_t, ptrdiff_t */
#include <string.h> /* memcpy */
#include "zstd_v04.h"
#include "../common/error_private.h"
/* ******************************************************************
* mem.h
*******************************************************************/
#ifndef MEM_H_MODULE
#define MEM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/******************************************
* Compiler-specific
******************************************/
#if defined(_MSC_VER) /* Visual Studio */
# include <stdlib.h> /* _byteswap_ulong */
# include <intrin.h> /* _byteswap_* */
#endif
#if defined(__GNUC__)
# define MEM_STATIC static __attribute__((unused))
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define MEM_STATIC static inline
#elif defined(_MSC_VER)
# define MEM_STATIC static __inline
#else
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif
/****************************************************************
* Basic Types
*****************************************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# if defined(_AIX)
# include <inttypes.h>
# else
# include <stdint.h> /* intptr_t */
# endif
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef signed short S16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
typedef signed long long S64;
#endif
/*-*************************************
* Debug
***************************************/
#include "../common/debug.h"
#ifndef assert
# define assert(condition) ((void)0)
#endif
/****************************************************************
* Memory I/O
*****************************************************************/
/* MEM_FORCE_MEMORY_ACCESS
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets generating assembly depending on alignment.
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define MEM_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
# define MEM_FORCE_MEMORY_ACCESS 1
# endif
#endif
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
MEM_STATIC unsigned MEM_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
/* violates C standard on structure alignment.
Only use if no other choice to achieve best performance on target platform */
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
#else
/* default method, safe and standard.
can sometimes prove slower */
MEM_STATIC U16 MEM_read16(const void* memPtr)
{
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U32 MEM_read32(const void* memPtr)
{
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U64 MEM_read64(const void* memPtr)
{
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
{
memcpy(memPtr, &value, sizeof(value));
}
#endif /* MEM_FORCE_MEMORY_ACCESS */
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read16(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U16)(p[0] + (p[1]<<8));
}
}
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
{
if (MEM_isLittleEndian())
{
MEM_write16(memPtr, val);
}
else
{
BYTE* p = (BYTE*)memPtr;
p[0] = (BYTE)val;
p[1] = (BYTE)(val>>8);
}
}
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
{
return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
}
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read32(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
}
}
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read64(memPtr);
else
{
const BYTE* p = (const BYTE*)memPtr;
return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+ ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
}
}
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
{
if (MEM_32bits())
return (size_t)MEM_readLE32(memPtr);
else
return (size_t)MEM_readLE64(memPtr);
}
#if defined (__cplusplus)
}
#endif
#endif /* MEM_H_MODULE */
/*
zstd - standard compression library
Header File for static linking only
*/
#ifndef ZSTD_STATIC_H
#define ZSTD_STATIC_H
/* *************************************
* Types
***************************************/
#define ZSTD_WINDOWLOG_ABSOLUTEMIN 11
/** from faster to stronger */
typedef enum { ZSTD_fast, ZSTD_greedy, ZSTD_lazy, ZSTD_lazy2, ZSTD_btlazy2 } ZSTD_strategy;
typedef struct
{
U64 srcSize; /* optional : tells how much bytes are present in the frame. Use 0 if not known. */
U32 windowLog; /* largest match distance : larger == more compression, more memory needed during decompression */
U32 contentLog; /* full search segment : larger == more compression, slower, more memory (useless for fast) */
U32 hashLog; /* dispatch table : larger == more memory, faster */
U32 searchLog; /* nb of searches : larger == more compression, slower */
U32 searchLength; /* size of matches : larger == faster decompression, sometimes less compression */
ZSTD_strategy strategy;
} ZSTD_parameters;
typedef ZSTDv04_Dctx ZSTD_DCtx;
/* *************************************
* Advanced functions
***************************************/
/** ZSTD_decompress_usingDict
* Same as ZSTD_decompressDCtx, using a Dictionary content as prefix
* Note : dict can be NULL, in which case, it's equivalent to ZSTD_decompressDCtx() */
static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize,
const void* dict,size_t dictSize);
/* **************************************
* Streaming functions (direct mode)
****************************************/
static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx);
static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize);
static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* src, size_t srcSize);
static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
static size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
/**
Streaming decompression, bufferless mode
A ZSTD_DCtx object is required to track streaming operations.
Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
A ZSTD_DCtx object can be re-used multiple times. Use ZSTD_resetDCtx() to return to fresh status.
First operation is to retrieve frame parameters, using ZSTD_getFrameParams().
This function doesn't consume its input. It needs enough input data to properly decode the frame header.
Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding.
Result : 0 when successful, it means the ZSTD_parameters structure has been filled.
>0 : means there is not enough data into src. Provides the expected size to successfully decode header.
errorCode, which can be tested using ZSTD_isError() (For example, if it's not a ZSTD header)
Then, you can optionally insert a dictionary.
This operation must mimic the compressor behavior, otherwise decompression will fail or be corrupted.
Then it's possible to start decompression.
Use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() requires this exact amount of bytes, or it will fail.
ZSTD_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
@result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst'.
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
Context can then be reset to start a new decompression.
*/
#endif /* ZSTD_STATIC_H */
/*
zstd_internal - common functions to include
Header File for include
*/
#ifndef ZSTD_CCOMMON_H_MODULE
#define ZSTD_CCOMMON_H_MODULE
/* *************************************
* Common macros
***************************************/
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define MAX(a,b) ((a)>(b) ? (a) : (b))
/* *************************************
* Common constants
***************************************/
#define ZSTD_MAGICNUMBER 0xFD2FB524 /* v0.4 */
#define KB *(1 <<10)
#define MB *(1 <<20)
#define GB *(1U<<30)
#define BLOCKSIZE (128 KB) /* define, for static allocation */
static const size_t ZSTD_blockHeaderSize = 3;
static const size_t ZSTD_frameHeaderSize_min = 5;
#define ZSTD_frameHeaderSize_max 5 /* define, for static allocation */
#define BIT7 128
#define BIT6 64
#define BIT5 32
#define BIT4 16
#define BIT1 2
#define BIT0 1
#define IS_RAW BIT0
#define IS_RLE BIT1
#define MINMATCH 4
#define REPCODE_STARTVALUE 4
#define MLbits 7
#define LLbits 6
#define Offbits 5
#define MaxML ((1<<MLbits) - 1)
#define MaxLL ((1<<LLbits) - 1)
#define MaxOff ((1<<Offbits)- 1)
#define MLFSELog 10
#define LLFSELog 10
#define OffFSELog 9
#define MaxSeq MAX(MaxLL, MaxML)
#define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
#define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
/* ******************************************
* Shared functions to include for inlining
********************************************/
static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
/*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
{
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
do
COPY8(op, ip)
while (op < oend);
}
/* ******************************************************************
FSE : Finite State Entropy coder
header file
****************************************************************** */
#ifndef FSE_H
#define FSE_H
#if defined (__cplusplus)
extern "C" {
#endif
/* *****************************************
* Includes
******************************************/
#include <stddef.h> /* size_t, ptrdiff_t */
/* *****************************************
* FSE simple functions
******************************************/
static size_t FSE_decompress(void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize);
/*!
FSE_decompress():
Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
into already allocated destination buffer 'dst', of size 'maxDstSize'.
return : size of regenerated data (<= maxDstSize)
or an error code, which can be tested using FSE_isError()
** Important ** : FSE_decompress() doesn't decompress non-compressible nor RLE data !!!
Why ? : making this distinction requires a header.
Header management is intentionally delegated to the user layer, which can better manage special cases.
*/
/* *****************************************
* Tool functions
******************************************/
/* Error Management */
static unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
/* *****************************************
* FSE detailed API
******************************************/
/*!
FSE_compress() does the following:
1. count symbol occurrence from source[] into table count[]
2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
3. save normalized counters to memory buffer using writeNCount()
4. build encoding table 'CTable' from normalized counters
5. encode the data stream using encoding table 'CTable'
FSE_decompress() does the following:
1. read normalized counters with readNCount()
2. build decoding table 'DTable' from normalized counters
3. decode the data stream using decoding table 'DTable'
The following API allows targeting specific sub-functions for advanced tasks.
For example, it's possible to compress several blocks using the same 'CTable',
or to save and provide normalized distribution using external method.
*/
/* *** DECOMPRESSION *** */
/*!
FSE_readNCount():
Read compactly saved 'normalizedCounter' from 'rBuffer'.
return : size read from 'rBuffer'
or an errorCode, which can be tested using FSE_isError()
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
/*!
Constructor and Destructor of type FSE_DTable
Note that its size depends on 'tableLog' */
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
/*!
FSE_buildDTable():
Builds 'dt', which must be already allocated, using FSE_createDTable()
return : 0,
or an errorCode, which can be tested using FSE_isError() */
static size_t FSE_buildDTable ( FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
/*!
FSE_decompress_usingDTable():
Decompress compressed source 'cSrc' of size 'cSrcSize' using 'dt'
into 'dst' which must be already allocated.
return : size of regenerated data (necessarily <= maxDstSize)
or an errorCode, which can be tested using FSE_isError() */
static size_t FSE_decompress_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
/*!
Tutorial :
----------
(Note : these functions only decompress FSE-compressed blocks.
If block is uncompressed, use memcpy() instead
If block is a single repeated byte, use memset() instead )
The first step is to obtain the normalized frequencies of symbols.
This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
or size the table to handle worst case situations (typically 256).
FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
If there is an error, the function will return an error code, which can be tested using FSE_isError().
The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
This is performed by the function FSE_buildDTable().
The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
If there is an error, the function will return an error code, which can be tested using FSE_isError().
'FSE_DTable' can then be used to decompress 'cSrc', with FSE_decompress_usingDTable().
'cSrcSize' must be strictly correct, otherwise decompression will fail.
FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=maxDstSize).
If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
*/
#if defined (__cplusplus)
}
#endif
#endif /* FSE_H */
/* ******************************************************************
bitstream
Part of NewGen Entropy library
header file (to include)
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef BITSTREAM_H_MODULE
#define BITSTREAM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/*
* This API consists of small unitary functions, which highly benefit from being inlined.
* Since link-time-optimization is not available for all compilers,
* these functions are defined into a .h to be included.
*/
/**********************************************
* bitStream decompression API (read backward)
**********************************************/
typedef struct
{
size_t bitContainer;
unsigned bitsConsumed;
const char* ptr;
const char* start;
} BIT_DStream_t;
typedef enum { BIT_DStream_unfinished = 0,
BIT_DStream_endOfBuffer = 1,
BIT_DStream_completed = 2,
BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
/******************************************
* unsafe API
******************************************/
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
/* faster, but works only if nbBits >= 1 */
/****************************************************************
* Helper functions
****************************************************************/
MEM_STATIC unsigned BIT_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r=0;
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
unsigned r;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
return r;
# endif
}
/**********************************************************
* bitStream decoding
**********************************************************/
/*!BIT_initDStream
* Initialize a BIT_DStream_t.
* @bitD : a pointer to an already allocated BIT_DStream_t structure
* @srcBuffer must point at the beginning of a bitStream
* @srcSize must be the exact size of the bitStream
* @result : size of stream (== srcSize) or an errorCode if a problem is detected
*/
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
if (srcSize >= sizeof(size_t)) /* normal case */
{
U32 contain32;
bitD->start = (const char*)srcBuffer;
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
}
else
{
U32 contain32;
bitD->start = (const char*)srcBuffer;
bitD->ptr = bitD->start;
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */
default: break;
}
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
}
return srcSize;
}
MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
{
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
}
/*! BIT_lookBitsFast :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
{
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
}
MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
{
bitD->bitsConsumed += nbBits;
}
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
{
size_t value = BIT_lookBits(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
/*!BIT_readBitsFast :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
{
size_t value = BIT_lookBitsFast(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
return BIT_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
{
bitD->ptr -= bitD->bitsConsumed >> 3;
bitD->bitsConsumed &= 7;
bitD->bitContainer = MEM_readLEST(bitD->ptr);
return BIT_DStream_unfinished;
}
if (bitD->ptr == bitD->start)
{
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
return BIT_DStream_completed;
}
{
U32 nbBytes = bitD->bitsConsumed >> 3;
BIT_DStream_status result = BIT_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start)
{
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
result = BIT_DStream_endOfBuffer;
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes*8;
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
return result;
}
}
/*! BIT_endOfDStream
* @return Tells if DStream has reached its exact end
*/
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
{
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
}
#if defined (__cplusplus)
}
#endif
#endif /* BITSTREAM_H_MODULE */
/* ******************************************************************
FSE : Finite State Entropy coder
header file for static linking (only)
Copyright (C) 2013-2015, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef FSE_STATIC_H
#define FSE_STATIC_H
#if defined (__cplusplus)
extern "C" {
#endif
/* *****************************************
* Static allocation
*******************************************/
/* FSE buffer bounds */
#define FSE_NCOUNTBOUND 512
#define FSE_BLOCKBOUND(size) (size + (size>>7))
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
/* *****************************************
* FSE advanced API
*******************************************/
static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
/* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
/* build a fake FSE_DTable, designed to always generate the same symbolValue */
/* *****************************************
* FSE symbol decompression API
*******************************************/
typedef struct
{
size_t state;
const void* table; /* precise table may vary, depending on U16 */
} FSE_DState_t;
static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
/* *****************************************
* FSE unsafe API
*******************************************/
static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
/* *****************************************
* Implementation of inlined functions
*******************************************/
/* decompression */
typedef struct {
U16 tableLog;
U16 fastMode;
} FSE_DTableHeader; /* sizeof U32 */
typedef struct
{
unsigned short newState;
unsigned char symbol;
unsigned char nbBits;
} FSE_decode_t; /* size == U32 */
MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
{
FSE_DTableHeader DTableH;
memcpy(&DTableH, dt, sizeof(DTableH));
DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
BIT_reloadDStream(bitD);
DStatePtr->table = dt + 1;
}
MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
{
const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
const U32 nbBits = DInfo.nbBits;
BYTE symbol = DInfo.symbol;
size_t lowBits = BIT_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
{
const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
const U32 nbBits = DInfo.nbBits;
BYTE symbol = DInfo.symbol;
size_t lowBits = BIT_readBitsFast(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
{
return DStatePtr->state == 0;
}
#if defined (__cplusplus)
}
#endif
#endif /* FSE_STATIC_H */
/* ******************************************************************
FSE : Finite State Entropy coder
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef FSE_COMMONDEFS_ONLY
/* **************************************************************
* Tuning parameters
****************************************************************/
/*!MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
#define FSE_MAX_MEMORY_USAGE 14
#define FSE_DEFAULT_MEMORY_USAGE 13
/*!FSE_MAX_SYMBOL_VALUE :
* Maximum symbol value authorized.
* Required for proper stack allocation */
#define FSE_MAX_SYMBOL_VALUE 255
/* **************************************************************
* template functions type & suffix
****************************************************************/
#define FSE_FUNCTION_TYPE BYTE
#define FSE_FUNCTION_EXTENSION
#define FSE_DECODE_TYPE FSE_decode_t
#endif /* !FSE_COMMONDEFS_ONLY */
/* **************************************************************
* Compiler specifics
****************************************************************/
#ifdef _MSC_VER /* Visual Studio */
# define FORCE_INLINE static __forceinline
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
#else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline))
# else
# define FORCE_INLINE static inline
# endif
# else
# define FORCE_INLINE static
# endif /* __STDC_VERSION__ */
#endif
/* **************************************************************
* Dependencies
****************************************************************/
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
/* ***************************************************************
* Constants
*****************************************************************/
#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
#define FSE_MIN_TABLELOG 5
#define FSE_TABLELOG_ABSOLUTE_MAX 15
#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
#endif
/* **************************************************************
* Error Management
****************************************************************/
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* **************************************************************
* Complex types
****************************************************************/
typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
/*-**************************************************************
* Templates
****************************************************************/
/*
designed to be included
for type-specific functions (template emulation in C)
Objective is to write these functions only once, for improved maintenance
*/
/* safety checks */
#ifndef FSE_FUNCTION_EXTENSION
# error "FSE_FUNCTION_EXTENSION must be defined"
#endif
#ifndef FSE_FUNCTION_TYPE
# error "FSE_FUNCTION_TYPE must be defined"
#endif
/* Function names */
#define FSE_CAT(X,Y) X##Y
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
static size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
FSE_DTableHeader DTableH;
void* const tdPtr = dt+1; /* because dt is unsigned, 32-bits aligned on 32-bits */
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
const U32 tableSize = 1 << tableLog;
const U32 tableMask = tableSize-1;
const U32 step = FSE_tableStep(tableSize);
U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
U32 position = 0;
U32 highThreshold = tableSize-1;
const S16 largeLimit= (S16)(1 << (tableLog-1));
U32 noLarge = 1;
U32 s;
/* Sanity Checks */
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
/* Init, lay down lowprob symbols */
memset(tableDecode, 0, sizeof(FSE_DECODE_TYPE) * (maxSymbolValue+1) ); /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */
DTableH.tableLog = (U16)tableLog;
for (s=0; s<=maxSymbolValue; s++)
{
if (normalizedCounter[s]==-1)
{
tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
symbolNext[s] = 1;
}
else
{
if (normalizedCounter[s] >= largeLimit) noLarge=0;
symbolNext[s] = normalizedCounter[s];
}
}
/* Spread symbols */
for (s=0; s<=maxSymbolValue; s++)
{
int i;
for (i=0; i<normalizedCounter[s]; i++)
{
tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
}
}
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
/* Build Decoding table */
{
U32 i;
for (i=0; i<tableSize; i++)
{
FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
U16 nextState = symbolNext[symbol]++;
tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
}
}
DTableH.fastMode = (U16)noLarge;
memcpy(dt, &DTableH, sizeof(DTableH));
return 0;
}
#ifndef FSE_COMMONDEFS_ONLY
/******************************************
* FSE helper functions
******************************************/
static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
/****************************************************************
* FSE NCount encoding-decoding
****************************************************************/
static short FSE_abs(short a)
{
return a<0 ? -a : a;
}
static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
const BYTE* const istart = (const BYTE*) headerBuffer;
const BYTE* const iend = istart + hbSize;
const BYTE* ip = istart;
int nbBits;
int remaining;
int threshold;
U32 bitStream;
int bitCount;
unsigned charnum = 0;
int previous0 = 0;
if (hbSize < 4) return ERROR(srcSize_wrong);
bitStream = MEM_readLE32(ip);
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
bitStream >>= 4;
bitCount = 4;
*tableLogPtr = nbBits;
remaining = (1<<nbBits)+1;
threshold = 1<<nbBits;
nbBits++;
while ((remaining>1) && (charnum<=*maxSVPtr))
{
if (previous0)
{
unsigned n0 = charnum;
while ((bitStream & 0xFFFF) == 0xFFFF)
{
n0+=24;
if (ip < iend-5)
{
ip+=2;
bitStream = MEM_readLE32(ip) >> bitCount;
}
else
{
bitStream >>= 16;
bitCount+=16;
}
}
while ((bitStream & 3) == 3)
{
n0+=3;
bitStream>>=2;
bitCount+=2;
}
n0 += bitStream & 3;
bitCount += 2;
if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
while (charnum < n0) normalizedCounter[charnum++] = 0;
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
{
ip += bitCount>>3;
bitCount &= 7;
bitStream = MEM_readLE32(ip) >> bitCount;
}
else
bitStream >>= 2;
}
{
const short max = (short)((2*threshold-1)-remaining);
short count;
if ((bitStream & (threshold-1)) < (U32)max)
{
count = (short)(bitStream & (threshold-1));
bitCount += nbBits-1;
}
else
{
count = (short)(bitStream & (2*threshold-1));
if (count >= threshold) count -= max;
bitCount += nbBits;
}
count--; /* extra accuracy */
remaining -= FSE_abs(count);
normalizedCounter[charnum++] = count;
previous0 = !count;
while (remaining < threshold)
{
nbBits--;
threshold >>= 1;
}
{
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
{
ip += bitCount>>3;
bitCount &= 7;
}
else
{
bitCount -= (int)(8 * (iend - 4 - ip));
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
}
}
}
if (remaining != 1) return ERROR(GENERIC);
*maxSVPtr = charnum-1;
ip += (bitCount+7)>>3;
if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
return ip-istart;
}
/*********************************************************
* Decompression (Byte symbols)
*********************************************************/
static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
DTableH->tableLog = 0;
DTableH->fastMode = 0;
cell->newState = 0;
cell->symbol = symbolValue;
cell->nbBits = 0;
return 0;
}
static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSymbolValue = tableMask;
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* Build Decoding Table */
DTableH->tableLog = (U16)nbBits;
DTableH->fastMode = 1;
for (s=0; s<=maxSymbolValue; s++)
{
dinfo[s].newState = 0;
dinfo[s].symbol = (BYTE)s;
dinfo[s].nbBits = (BYTE)nbBits;
}
return 0;
}
FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt, const unsigned fast)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const omax = op + maxDstSize;
BYTE* const olimit = omax-3;
BIT_DStream_t bitD;
FSE_DState_t state1;
FSE_DState_t state2;
size_t errorCode;
/* Init */
errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
if (FSE_isError(errorCode)) return errorCode;
FSE_initDState(&state1, &bitD, dt);
FSE_initDState(&state2, &bitD, dt);
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
/* 4 symbols per loop */
for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
{
op[0] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BIT_reloadDStream(&bitD);
op[1] = FSE_GETSYMBOL(&state2);
if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
{ if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
op[2] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BIT_reloadDStream(&bitD);
op[3] = FSE_GETSYMBOL(&state2);
}
/* tail */
/* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
while (1)
{
if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
break;
*op++ = FSE_GETSYMBOL(&state1);
if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
break;
*op++ = FSE_GETSYMBOL(&state2);
}
/* end ? */
if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
return op-ostart;
if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */
return ERROR(corruption_detected);
}
static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt)
{
FSE_DTableHeader DTableH;
U32 fastMode;
memcpy(&DTableH, dt, sizeof(DTableH));
fastMode = DTableH.fastMode;
/* select fast mode (static) */
if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
}
static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* const istart = (const BYTE*)cSrc;
const BYTE* ip = istart;
short counting[FSE_MAX_SYMBOL_VALUE+1];
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
unsigned tableLog;
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
size_t errorCode;
if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
/* normal FSE decoding mode */
errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
if (FSE_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
ip += errorCode;
cSrcSize -= errorCode;
errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
if (FSE_isError(errorCode)) return errorCode;
/* always return, even if it is an error code */
return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
}
#endif /* FSE_COMMONDEFS_ONLY */
/* ******************************************************************
Huff0 : Huffman coder, part of New Generation Entropy library
header file
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef HUFF0_H
#define HUFF0_H
#if defined (__cplusplus)
extern "C" {
#endif
/* ****************************************
* Dependency
******************************************/
#include <stddef.h> /* size_t */
/* ****************************************
* Huff0 simple functions
******************************************/
static size_t HUF_decompress(void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize);
/*!
HUF_decompress():
Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',
into already allocated destination buffer 'dst', of size 'dstSize'.
'dstSize' must be the exact size of original (uncompressed) data.
Note : in contrast with FSE, HUF_decompress can regenerate RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, because it knows size to regenerate.
@return : size of regenerated data (== dstSize)
or an error code, which can be tested using HUF_isError()
*/
/* ****************************************
* Tool functions
******************************************/
/* Error Management */
static unsigned HUF_isError(size_t code); /* tells if a return value is an error code */
#if defined (__cplusplus)
}
#endif
#endif /* HUFF0_H */
/* ******************************************************************
Huff0 : Huffman coder, part of New Generation Entropy library
header file for static linking (only)
Copyright (C) 2013-2015, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef HUFF0_STATIC_H
#define HUFF0_STATIC_H
#if defined (__cplusplus)
extern "C" {
#endif
/* ****************************************
* Static allocation macros
******************************************/
/* static allocation of Huff0's DTable */
#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog)) /* nb Cells; use unsigned short for X2, unsigned int for X4 */
#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
#define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
/* ****************************************
* Advanced decompression functions
******************************************/
static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
/* ****************************************
* Huff0 detailed API
******************************************/
/*!
HUF_decompress() does the following:
1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
2. build Huffman table from save, using HUF_readDTableXn()
3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
*/
static size_t HUF_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
static size_t HUF_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
static size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
static size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
#if defined (__cplusplus)
}
#endif
#endif /* HUFF0_STATIC_H */
/* ******************************************************************
Huff0 : Huffman coder, part of New Generation Entropy library
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
/* **************************************************************
* Compiler specifics
****************************************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
/* inline is defined */
#elif defined(_MSC_VER)
# define inline __inline
#else
# define inline /* disable inline */
#endif
#ifdef _MSC_VER /* Visual Studio */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif
/* **************************************************************
* Includes
****************************************************************/
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
/* **************************************************************
* Constants
****************************************************************/
#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
#define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
#define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */
#define HUF_MAX_SYMBOL_VALUE 255
#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
# error "HUF_MAX_TABLELOG is too large !"
#endif
/* **************************************************************
* Error Management
****************************************************************/
static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/*-*******************************************************
* Huff0 : Huffman block decompression
*********************************************************/
typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
/*! HUF_readStats
Read compact Huffman tree, saved by HUF_writeCTable
@huffWeight : destination buffer
@return : size read from `src`
*/
static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize)
{
U32 weightTotal;
U32 tableLog;
const BYTE* ip = (const BYTE*) src;
size_t iSize;
size_t oSize;
U32 n;
if (!srcSize) return ERROR(srcSize_wrong);
iSize = ip[0];
//memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
if (iSize >= 128) /* special header */
{
if (iSize >= (242)) /* RLE */
{
static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
oSize = l[iSize-242];
memset(huffWeight, 1, hwSize);
iSize = 0;
}
else /* Incompressible */
{
oSize = iSize - 127;
iSize = ((oSize+1)/2);
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
if (oSize >= hwSize) return ERROR(corruption_detected);
ip += 1;
for (n=0; n<oSize; n+=2)
{
huffWeight[n] = ip[n/2] >> 4;
huffWeight[n+1] = ip[n/2] & 15;
}
}
}
else /* header compressed with FSE (normal case) */
{
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
if (FSE_isError(oSize)) return oSize;
}
/* collect weight stats */
memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
weightTotal = 0;
for (n=0; n<oSize; n++)
{
if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
rankStats[huffWeight[n]]++;
weightTotal += (1 << huffWeight[n]) >> 1;
}
if (weightTotal == 0) return ERROR(corruption_detected);
/* get last non-null symbol weight (implied, total must be 2^n) */
tableLog = BIT_highbit32(weightTotal) + 1;
if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
{
U32 total = 1 << tableLog;
U32 rest = total - weightTotal;
U32 verif = 1 << BIT_highbit32(rest);
U32 lastWeight = BIT_highbit32(rest) + 1;
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
huffWeight[oSize] = (BYTE)lastWeight;
rankStats[lastWeight]++;
}
/* check tree construction validity */
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
/* results */
*nbSymbolsPtr = (U32)(oSize+1);
*tableLogPtr = tableLog;
return iSize+1;
}
/**************************/
/* single-symbol decoding */
/**************************/
static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
{
BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
size_t iSize;
U32 nbSymbols = 0;
U32 n;
U32 nextRankStart;
void* const dtPtr = DTable + 1;
HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
//memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
/* Prepare ranks */
nextRankStart = 0;
for (n=1; n<=tableLog; n++)
{
U32 current = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = current;
}
/* fill DTable */
for (n=0; n<nbSymbols; n++)
{
const U32 w = huffWeight[n];
const U32 length = (1 << w) >> 1;
U32 i;
HUF_DEltX2 D;
D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
for (i = rankVal[w]; i < rankVal[w] + length; i++)
dt[i] = D;
rankVal[w] += length;
}
return iSize;
}
static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
{
const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
const BYTE c = dt[val].byte;
BIT_skipBits(Dstream, dt[val].nbBits);
return c;
}
#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
*ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 4 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
{
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
}
/* closer to the end */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
/* no more data to retrieve from bitstream, hence no need to reload */
while (p < pEnd)
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
return pEnd-pStart;
}
static size_t HUF_decompress4X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U16* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable;
const HUF_DEltX2* const dt = ((const HUF_DEltX2*)dtPtr) +1;
const U32 dtLog = DTable[0];
size_t errorCode;
/* Init */
BIT_DStream_t bitD1;
BIT_DStream_t bitD2;
BIT_DStream_t bitD3;
BIT_DStream_t bitD4;
const size_t length1 = MEM_readLE16(istart);
const size_t length2 = MEM_readLE16(istart+2);
const size_t length3 = MEM_readLE16(istart+4);
size_t length4;
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
length4 = cSrcSize - (length1 + length2 + length3 + 6);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
errorCode = BIT_initDStream(&bitD1, istart1, length1);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD2, istart2, length2);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD3, istart3, length3);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD4, istart4, length4);
if (HUF_isError(errorCode)) return errorCode;
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
{
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t errorCode;
errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
if (HUF_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
ip += errorCode;
cSrcSize -= errorCode;
return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
/***************************/
/* double-symbols decoding */
/***************************/
static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
const U32* rankValOrigin, const int minWeight,
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
U32 nbBitsBaseline, U16 baseSeq)
{
HUF_DEltX4 DElt;
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
U32 s;
/* get pre-calculated rankVal */
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill skipped values */
if (minWeight>1)
{
U32 i, skipSize = rankVal[minWeight];
MEM_writeLE16(&(DElt.sequence), baseSeq);
DElt.nbBits = (BYTE)(consumed);
DElt.length = 1;
for (i = 0; i < skipSize; i++)
DTable[i] = DElt;
}
/* fill DTable */
for (s=0; s<sortedListSize; s++) /* note : sortedSymbols already skipped */
{
const U32 symbol = sortedSymbols[s].symbol;
const U32 weight = sortedSymbols[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 length = 1 << (sizeLog-nbBits);
const U32 start = rankVal[weight];
U32 i = start;
const U32 end = start + length;
MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
DElt.nbBits = (BYTE)(nbBits + consumed);
DElt.length = 2;
do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
rankVal[weight] += length;
}
}
typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList, const U32 sortedListSize,
const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
const U32 nbBitsBaseline)
{
U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
const U32 minBits = nbBitsBaseline - maxWeight;
U32 s;
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill DTable */
for (s=0; s<sortedListSize; s++)
{
const U16 symbol = sortedList[s].symbol;
const U32 weight = sortedList[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 start = rankVal[weight];
const U32 length = 1 << (targetLog-nbBits);
if (targetLog-nbBits >= minBits) /* enough room for a second symbol */
{
U32 sortedRank;
int minWeight = nbBits + scaleLog;
if (minWeight < 1) minWeight = 1;
sortedRank = rankStart[minWeight];
HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
rankValOrigin[nbBits], minWeight,
sortedList+sortedRank, sortedListSize-sortedRank,
nbBitsBaseline, symbol);
}
else
{
U32 i;
const U32 end = start + length;
HUF_DEltX4 DElt;
MEM_writeLE16(&(DElt.sequence), symbol);
DElt.nbBits = (BYTE)(nbBits);
DElt.length = 1;
for (i = start; i < end; i++)
DTable[i] = DElt;
}
rankVal[weight] += length;
}
}
static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
{
BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
U32* const rankStart = rankStart0+1;
rankVal_t rankVal;
U32 tableLog, maxW, sizeOfSort, nbSymbols;
const U32 memLog = DTable[0];
size_t iSize;
void* dtPtr = DTable;
HUF_DEltX4* const dt = ((HUF_DEltX4*)dtPtr) + 1;
HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */
if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
//memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
/* find maxWeight */
for (maxW = tableLog; rankStats[maxW]==0; maxW--)
{ if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */
/* Get start index of each weight */
{
U32 w, nextRankStart = 0;
for (w=1; w<=maxW; w++)
{
U32 current = nextRankStart;
nextRankStart += rankStats[w];
rankStart[w] = current;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
sizeOfSort = nextRankStart;
}
/* sort symbols by weight */
{
U32 s;
for (s=0; s<nbSymbols; s++)
{
U32 w = weightList[s];
U32 r = rankStart[w]++;
sortedSymbol[r].symbol = (BYTE)s;
sortedSymbol[r].weight = (BYTE)w;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
/* Build rankVal */
{
const U32 minBits = tableLog+1 - maxW;
U32 nextRankVal = 0;
U32 w, consumed;
const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
U32* rankVal0 = rankVal[0];
for (w=1; w<=maxW; w++)
{
U32 current = nextRankVal;
nextRankVal += rankStats[w] << (w+rescale);
rankVal0[w] = current;
}
for (consumed = minBits; consumed <= memLog - minBits; consumed++)
{
U32* rankValPtr = rankVal[consumed];
for (w = 1; w <= maxW; w++)
{
rankValPtr[w] = rankVal0[w] >> consumed;
}
}
}
HUF_fillDTableX4(dt, memLog,
sortedSymbol, sizeOfSort,
rankStart0, rankVal, maxW,
tableLog+1);
return iSize;
}
static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
{
const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 2);
BIT_skipBits(DStream, dt[val].nbBits);
return dt[val].length;
}
static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
{
const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 1);
if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
else
{
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
{
BIT_skipBits(DStream, dt[val].nbBits);
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
}
}
return 1;
}
#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 8 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
{
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
}
/* closer to the end */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
while (p <= pEnd-2)
HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
if (p < pEnd)
p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
return p-pStart;
}
static size_t HUF_decompress4X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U32* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable;
const HUF_DEltX4* const dt = ((const HUF_DEltX4*)dtPtr) +1;
const U32 dtLog = DTable[0];
size_t errorCode;
/* Init */
BIT_DStream_t bitD1;
BIT_DStream_t bitD2;
BIT_DStream_t bitD3;
BIT_DStream_t bitD4;
const size_t length1 = MEM_readLE16(istart);
const size_t length2 = MEM_readLE16(istart+2);
const size_t length3 = MEM_readLE16(istart+4);
size_t length4;
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
length4 = cSrcSize - (length1 + length2 + length3 + 6);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
errorCode = BIT_initDStream(&bitD1, istart1, length1);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD2, istart2, length2);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD3, istart3, length3);
if (HUF_isError(errorCode)) return errorCode;
errorCode = BIT_initDStream(&bitD4, istart4, length4);
if (HUF_isError(errorCode)) return errorCode;
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
{
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize;
cSrcSize -= hSize;
return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
/**********************************/
/* Generic decompression selector */
/**********************************/
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
{
/* single, double, quad */
{{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
{{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
{{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
{{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
{{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
{{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
{{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
{{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
{{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
{{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
{{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
{{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
{{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
{{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
{{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
{{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
};
typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, NULL };
/* estimate decompression time */
U32 Q;
const U32 D256 = (U32)(dstSize >> 8);
U32 Dtime[3];
U32 algoNb = 0;
int n;
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
/* decoder timing evaluation */
Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
for (n=0; n<3; n++)
Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
if (Dtime[1] < Dtime[0]) algoNb = 1;
return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
//return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */
//return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */
//return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */
}
#endif /* ZSTD_CCOMMON_H_MODULE */
/*
zstd - decompression module fo v0.4 legacy format
Copyright (C) 2015-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
- ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
*/
/* ***************************************************************
* Tuning parameters
*****************************************************************/
/*!
* HEAPMODE :
* Select how default decompression function ZSTD_decompress() will allocate memory,
* in memory stack (0), or in memory heap (1, requires malloc())
*/
#ifndef ZSTD_HEAPMODE
# define ZSTD_HEAPMODE 1
#endif
/* *******************************************************
* Includes
*********************************************************/
#include <stdlib.h> /* calloc */
#include <string.h> /* memcpy, memmove */
#include <stdio.h> /* debug : printf */
/* *******************************************************
* Compiler specifics
*********************************************************/
#ifdef _MSC_VER /* Visual Studio */
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
#endif
/* *************************************
* Local types
***************************************/
typedef struct
{
blockType_t blockType;
U32 origSize;
} blockProperties_t;
/* *******************************************************
* Memory operations
**********************************************************/
static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
/* *************************************
* Error Management
***************************************/
/*! ZSTD_isError
* tells if a return value is an error code */
static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
/* *************************************************************
* Context management
***************************************************************/
typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTD_dStage;
struct ZSTDv04_Dctx_s
{
U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
const void* previousDstEnd;
const void* base;
const void* vBase;
const void* dictEnd;
size_t expected;
size_t headerSize;
ZSTD_parameters params;
blockType_t bType;
ZSTD_dStage stage;
const BYTE* litPtr;
size_t litSize;
BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
BYTE headerBuffer[ZSTD_frameHeaderSize_max];
}; /* typedef'd to ZSTD_DCtx within "zstd_static.h" */
static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
{
dctx->expected = ZSTD_frameHeaderSize_min;
dctx->stage = ZSTDds_getFrameHeaderSize;
dctx->previousDstEnd = NULL;
dctx->base = NULL;
dctx->vBase = NULL;
dctx->dictEnd = NULL;
return 0;
}
static ZSTD_DCtx* ZSTD_createDCtx(void)
{
ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
if (dctx==NULL) return NULL;
ZSTD_resetDCtx(dctx);
return dctx;
}
static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
{
free(dctx);
return 0;
}
/* *************************************************************
* Decompression section
***************************************************************/
/** ZSTD_decodeFrameHeader_Part1
* decode the 1st part of the Frame Header, which tells Frame Header size.
* srcSize must be == ZSTD_frameHeaderSize_min
* @return : the full size of the Frame Header */
static size_t ZSTD_decodeFrameHeader_Part1(ZSTD_DCtx* zc, const void* src, size_t srcSize)
{
U32 magicNumber;
if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong);
magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
zc->headerSize = ZSTD_frameHeaderSize_min;
return zc->headerSize;
}
static size_t ZSTD_getFrameParams(ZSTD_parameters* params, const void* src, size_t srcSize)
{
U32 magicNumber;
if (srcSize < ZSTD_frameHeaderSize_min) return ZSTD_frameHeaderSize_max;
magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTD_MAGICNUMBER) return ERROR(prefix_unknown);
memset(params, 0, sizeof(*params));
params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported); /* reserved bits */
return 0;
}
/** ZSTD_decodeFrameHeader_Part2
* decode the full Frame Header
* srcSize must be the size provided by ZSTD_decodeFrameHeader_Part1
* @return : 0, or an error code, which can be tested using ZSTD_isError() */
static size_t ZSTD_decodeFrameHeader_Part2(ZSTD_DCtx* zc, const void* src, size_t srcSize)
{
size_t result;
if (srcSize != zc->headerSize) return ERROR(srcSize_wrong);
result = ZSTD_getFrameParams(&(zc->params), src, srcSize);
if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
return result;
}
static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
{
const BYTE* const in = (const BYTE* const)src;
BYTE headerFlags;
U32 cSize;
if (srcSize < 3) return ERROR(srcSize_wrong);
headerFlags = *in;
cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
bpPtr->blockType = (blockType_t)(headerFlags >> 6);
bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
if (bpPtr->blockType == bt_end) return 0;
if (bpPtr->blockType == bt_rle) return 1;
return cSize;
}
static size_t ZSTD_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
if (srcSize > 0) {
memcpy(dst, src, srcSize);
}
return srcSize;
}
/** ZSTD_decompressLiterals
@return : nb of bytes read from src, or an error code*/
static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
*maxDstSizePtr = litSize;
return litCSize + 5;
}
/** ZSTD_decodeLiteralsBlock
@return : nb of bytes read from src (< srcSize ) */
static size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
{
const BYTE* const istart = (const BYTE*) src;
/* any compressed block with literals segment must be at least this size */
if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
switch(*istart & 3)
{
/* compressed */
case 0:
{
size_t litSize = BLOCKSIZE;
const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, 8);
return readSize; /* works if it's an error too */
}
case IS_RAW:
{
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
{
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
if (litSize > srcSize-3) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart, litSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, 8);
return litSize+3;
}
/* direct reference into compressed stream */
dctx->litPtr = istart+3;
dctx->litSize = litSize;
return litSize+3; }
case IS_RLE:
{
const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
memset(dctx->litBuffer, istart[3], litSize + 8);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return 4;
}
default:
return ERROR(corruption_detected); /* forbidden nominal case */
}
}
static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
const void* src, size_t srcSize)
{
const BYTE* const istart = (const BYTE* const)src;
const BYTE* ip = istart;
const BYTE* const iend = istart + srcSize;
U32 LLtype, Offtype, MLtype;
U32 LLlog, Offlog, MLlog;
size_t dumpsLength;
/* check */
if (srcSize < 5) return ERROR(srcSize_wrong);
/* SeqHead */
*nbSeq = MEM_readLE16(ip); ip+=2;
LLtype = *ip >> 6;
Offtype = (*ip >> 4) & 3;
MLtype = (*ip >> 2) & 3;
if (*ip & 2)
{
dumpsLength = ip[2];
dumpsLength += ip[1] << 8;
ip += 3;
}
else
{
dumpsLength = ip[1];
dumpsLength += (ip[0] & 1) << 8;
ip += 2;
}
*dumpsPtr = ip;
ip += dumpsLength;
*dumpsLengthPtr = dumpsLength;
/* check */
if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
/* sequences */
{
S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL >= MaxOff */
size_t headerSize;
/* Build DTables */
switch(LLtype)
{
case bt_rle :
LLlog = 0;
FSE_buildDTable_rle(DTableLL, *ip++); break;
case bt_raw :
LLlog = LLbits;
FSE_buildDTable_raw(DTableLL, LLbits); break;
default :
{ U32 max = MaxLL;
headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (LLlog > LLFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableLL, norm, max, LLlog);
} }
switch(Offtype)
{
case bt_rle :
Offlog = 0;
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
break;
case bt_raw :
Offlog = Offbits;
FSE_buildDTable_raw(DTableOffb, Offbits); break;
default :
{ U32 max = MaxOff;
headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (Offlog > OffFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableOffb, norm, max, Offlog);
} }
switch(MLtype)
{
case bt_rle :
MLlog = 0;
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
FSE_buildDTable_rle(DTableML, *ip++); break;
case bt_raw :
MLlog = MLbits;
FSE_buildDTable_raw(DTableML, MLbits); break;
default :
{ U32 max = MaxML;
headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
if (FSE_isError(headerSize)) return ERROR(GENERIC);
if (MLlog > MLFSELog) return ERROR(corruption_detected);
ip += headerSize;
FSE_buildDTable(DTableML, norm, max, MLlog);
} } }
return ip-istart;
}
typedef struct {
size_t litLength;
size_t offset;
size_t matchLength;
} seq_t;
typedef struct {
BIT_DStream_t DStream;
FSE_DState_t stateLL;
FSE_DState_t stateOffb;
FSE_DState_t stateML;
size_t prevOffset;
const BYTE* dumps;
const BYTE* dumpsEnd;
} seqState_t;
static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
{
size_t litLength;
size_t prevOffset;
size_t offset;
size_t matchLength;
const BYTE* dumps = seqState->dumps;
const BYTE* const de = seqState->dumpsEnd;
/* Literal length */
litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
prevOffset = litLength ? seq->offset : seqState->prevOffset;
if (litLength == MaxLL) {
const U32 add = dumps<de ? *dumps++ : 0;
if (add < 255) litLength += add;
else if (dumps + 3 <= de) {
litLength = MEM_readLE24(dumps);
dumps += 3;
}
if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
/* Offset */
{ static const U32 offsetPrefix[MaxOff+1] = {
1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
U32 offsetCode, nbBits;
offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */
if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
nbBits = offsetCode - 1;
if (offsetCode==0) nbBits = 0; /* cmove */
offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
if (offsetCode==0) offset = prevOffset; /* cmove */
if (offsetCode | !litLength) seqState->prevOffset = seq->offset; /* cmove */
}
/* MatchLength */
matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
if (matchLength == MaxML) {
const U32 add = dumps<de ? *dumps++ : 0;
if (add < 255) matchLength += add;
else if (dumps + 3 <= de){
matchLength = MEM_readLE24(dumps);
dumps += 3;
}
if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
matchLength += MINMATCH;
/* save result */
seq->litLength = litLength;
seq->offset = offset;
seq->matchLength = matchLength;
seqState->dumps = dumps;
}
static size_t ZSTD_execSequence(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
{
static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
BYTE* const oLitEnd = op + sequence.litLength;
const size_t sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
BYTE* const oend_8 = oend-8;
const BYTE* const litEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
/* check */
if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */
/* copy Literals */
ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
op = oLitEnd;
*litPtr = litEnd; /* update for next sequence */
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - base))
{
/* offset beyond prefix */
if (sequence.offset > (size_t)(oLitEnd - vBase))
return ERROR(corruption_detected);
match = dictEnd - (base-match);
if (match + sequence.matchLength <= dictEnd)
{
memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{
size_t length1 = dictEnd - match;
memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = base;
if (op > oend_8 || sequence.matchLength < MINMATCH) {
while (op < oMatchEnd) *op++ = *match++;
return sequenceLength;
}
}
}
/* Requirement: op <= oend_8 */
/* match within prefix */
if (sequence.offset < 8) {
/* close range match, overlap */
const int sub2 = dec64table[sequence.offset];
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
match += dec32table[sequence.offset];
ZSTD_copy4(op+4, match);
match -= sub2;
} else {
ZSTD_copy8(op, match);
}
op += 8; match += 8;
if (oMatchEnd > oend-(16-MINMATCH))
{
if (op < oend_8)
{
ZSTD_wildcopy(op, match, oend_8 - op);
match += oend_8 - op;
op = oend_8;
}
while (op < oMatchEnd) *op++ = *match++;
}
else
{
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8, but must be signed */
}
return sequenceLength;
}
static size_t ZSTD_decompressSequences(
ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE* const)dst;
BYTE* op = ostart;
BYTE* const oend = ostart + maxDstSize;
size_t errorCode, dumpsLength;
const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize;
int nbSeq;
const BYTE* dumps;
U32* DTableLL = dctx->LLTable;
U32* DTableML = dctx->MLTable;
U32* DTableOffb = dctx->OffTable;
const BYTE* const base = (const BYTE*) (dctx->base);
const BYTE* const vBase = (const BYTE*) (dctx->vBase);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
/* Build Decoding Tables */
errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
DTableLL, DTableML, DTableOffb,
ip, iend-ip);
if (ZSTD_isError(errorCode)) return errorCode;
ip += errorCode;
/* Regen sequences */
{
seq_t sequence;
seqState_t seqState;
memset(&sequence, 0, sizeof(sequence));
sequence.offset = 4;
seqState.dumps = dumps;
seqState.dumpsEnd = dumps + dumpsLength;
seqState.prevOffset = 4;
errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
if (ERR_isError(errorCode)) return ERROR(corruption_detected);
FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; )
{
size_t oneSeqSize;
nbSeq--;
ZSTD_decodeSequence(&sequence, &seqState);
oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
}
/* check if reached exact end */
if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* DStream should be entirely and exactly consumed; otherwise data is corrupted */
/* last literal segment */
{
size_t lastLLSize = litEnd - litPtr;
if (litPtr > litEnd) return ERROR(corruption_detected);
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
if (lastLLSize > 0) {
if (op != litPtr) memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
}
return op-ostart;
}
static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
{
if (dst != dctx->previousDstEnd) /* not contiguous */
{
dctx->dictEnd = dctx->previousDstEnd;
dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
dctx->base = dst;
dctx->previousDstEnd = dst;
}
}
static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize)
{
/* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
size_t litCSize;
if (srcSize > BLOCKSIZE) return ERROR(corruption_detected);
/* Decode literals sub-block */
litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
if (ZSTD_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
return ZSTD_decompressSequences(dctx, dst, maxDstSize, ip, srcSize);
}
static size_t ZSTD_decompress_usingDict(ZSTD_DCtx* ctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize,
const void* dict, size_t dictSize)
{
const BYTE* ip = (const BYTE*)src;
const BYTE* iend = ip + srcSize;
BYTE* const ostart = (BYTE* const)dst;
BYTE* op = ostart;
BYTE* const oend = ostart + maxDstSize;
size_t remainingSize = srcSize;
blockProperties_t blockProperties;
/* init */
ZSTD_resetDCtx(ctx);
if (dict)
{
ZSTD_decompress_insertDictionary(ctx, dict, dictSize);
ctx->dictEnd = ctx->previousDstEnd;
ctx->vBase = (const char*)dst - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));
ctx->base = dst;
}
else
{
ctx->vBase = ctx->base = ctx->dictEnd = dst;
}
/* Frame Header */
{
size_t frameHeaderSize;
if (srcSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
frameHeaderSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);
if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
if (srcSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
ip += frameHeaderSize; remainingSize -= frameHeaderSize;
frameHeaderSize = ZSTD_decodeFrameHeader_Part2(ctx, src, frameHeaderSize);
if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
}
/* Loop on each block */
while (1)
{
size_t decodedSize=0;
size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
if (ZSTD_isError(cBlockSize)) return cBlockSize;
ip += ZSTD_blockHeaderSize;
remainingSize -= ZSTD_blockHeaderSize;
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
switch(blockProperties.blockType)
{
case bt_compressed:
decodedSize = ZSTD_decompressBlock_internal(ctx, op, oend-op, ip, cBlockSize);
break;
case bt_raw :
decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet supported */
break;
case bt_end :
/* end of frame */
if (remainingSize) return ERROR(srcSize_wrong);
break;
default:
return ERROR(GENERIC); /* impossible */
}
if (cBlockSize == 0) break; /* bt_end */
if (ZSTD_isError(decodedSize)) return decodedSize;
op += decodedSize;
ip += cBlockSize;
remainingSize -= cBlockSize;
}
return op-ostart;
}
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
const BYTE* ip = (const BYTE*)src;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
blockProperties_t blockProperties;
/* Frame Header */
if (srcSize < ZSTD_frameHeaderSize_min) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
ip += ZSTD_frameHeaderSize_min; remainingSize -= ZSTD_frameHeaderSize_min;
/* Loop on each block */
while (1)
{
size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTD_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
ip += ZSTD_blockHeaderSize;
remainingSize -= ZSTD_blockHeaderSize;
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
if (cBlockSize == 0) break; /* bt_end */
ip += cBlockSize;
remainingSize -= cBlockSize;
nbBlocks++;
}
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * BLOCKSIZE;
}
/* ******************************
* Streaming Decompression API
********************************/
static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
{
return dctx->expected;
}
static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
/* Sanity check */
if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
ZSTD_checkContinuity(ctx, dst);
/* Decompress : frame header; part 1 */
switch (ctx->stage)
{
case ZSTDds_getFrameHeaderSize :
/* get frame header size */
if (srcSize != ZSTD_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */
ctx->headerSize = ZSTD_decodeFrameHeader_Part1(ctx, src, ZSTD_frameHeaderSize_min);
if (ZSTD_isError(ctx->headerSize)) return ctx->headerSize;
memcpy(ctx->headerBuffer, src, ZSTD_frameHeaderSize_min);
if (ctx->headerSize > ZSTD_frameHeaderSize_min) return ERROR(GENERIC); /* impossible */
ctx->expected = 0; /* not necessary to copy more */
/* fallthrough */
case ZSTDds_decodeFrameHeader:
/* get frame header */
{ size_t const result = ZSTD_decodeFrameHeader_Part2(ctx, ctx->headerBuffer, ctx->headerSize);
if (ZSTD_isError(result)) return result;
ctx->expected = ZSTD_blockHeaderSize;
ctx->stage = ZSTDds_decodeBlockHeader;
return 0;
}
case ZSTDds_decodeBlockHeader:
/* Decode block header */
{ blockProperties_t bp;
size_t const blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
if (ZSTD_isError(blockSize)) return blockSize;
if (bp.blockType == bt_end)
{
ctx->expected = 0;
ctx->stage = ZSTDds_getFrameHeaderSize;
}
else
{
ctx->expected = blockSize;
ctx->bType = bp.blockType;
ctx->stage = ZSTDds_decompressBlock;
}
return 0;
}
case ZSTDds_decompressBlock:
{
/* Decompress : block content */
size_t rSize;
switch(ctx->bType)
{
case bt_compressed:
rSize = ZSTD_decompressBlock_internal(ctx, dst, maxDstSize, src, srcSize);
break;
case bt_raw :
rSize = ZSTD_copyRawBlock(dst, maxDstSize, src, srcSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet handled */
break;
case bt_end : /* should never happen (filtered at phase 1) */
rSize = 0;
break;
default:
return ERROR(GENERIC);
}
ctx->stage = ZSTDds_decodeBlockHeader;
ctx->expected = ZSTD_blockHeaderSize;
ctx->previousDstEnd = (char*)dst + rSize;
return rSize;
}
default:
return ERROR(GENERIC); /* impossible */
}
}
static void ZSTD_decompress_insertDictionary(ZSTD_DCtx* ctx, const void* dict, size_t dictSize)
{
ctx->dictEnd = ctx->previousDstEnd;
ctx->vBase = (const char*)dict - ((const char*)(ctx->previousDstEnd) - (const char*)(ctx->base));
ctx->base = dict;
ctx->previousDstEnd = (const char*)dict + dictSize;
}
/*
Buffered version of Zstd compression library
Copyright (C) 2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
- ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
*/
/* The objects defined into this file should be considered experimental.
* They are not labelled stable, as their prototype may change in the future.
* You can use them for tests, provide feedback, or if you can endure risk of future changes.
*/
/* *************************************
* Includes
***************************************/
#include <stdlib.h>
/** ************************************************
* Streaming decompression
*
* A ZBUFF_DCtx object is required to track streaming operation.
* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
* Use ZBUFF_decompressInit() to start a new decompression operation.
* ZBUFF_DCtx objects can be reused multiple times.
*
* Use ZBUFF_decompressContinue() repetitively to consume your input.
* *srcSizePtr and *maxDstSizePtr can be any size.
* The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
* Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
* The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst .
* return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
* or 0 when a frame is completely decoded
* or an error code, which can be tested using ZBUFF_isError().
*
* Hint : recommended buffer sizes (not compulsory)
* output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
* input : just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
* **************************************************/
typedef enum { ZBUFFds_init, ZBUFFds_readHeader, ZBUFFds_loadHeader, ZBUFFds_decodeHeader,
ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFF_dStage;
/* *** Resource management *** */
#define ZSTD_frameHeaderSize_max 5 /* too magical, should come from reference */
struct ZBUFFv04_DCtx_s {
ZSTD_DCtx* zc;
ZSTD_parameters params;
char* inBuff;
size_t inBuffSize;
size_t inPos;
char* outBuff;
size_t outBuffSize;
size_t outStart;
size_t outEnd;
size_t hPos;
const char* dict;
size_t dictSize;
ZBUFF_dStage stage;
unsigned char headerBuffer[ZSTD_frameHeaderSize_max];
}; /* typedef'd to ZBUFF_DCtx within "zstd_buffered.h" */
typedef ZBUFFv04_DCtx ZBUFF_DCtx;
static ZBUFF_DCtx* ZBUFF_createDCtx(void)
{
ZBUFF_DCtx* zbc = (ZBUFF_DCtx*)malloc(sizeof(ZBUFF_DCtx));
if (zbc==NULL) return NULL;
memset(zbc, 0, sizeof(*zbc));
zbc->zc = ZSTD_createDCtx();
zbc->stage = ZBUFFds_init;
return zbc;
}
static size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbc)
{
if (zbc==NULL) return 0; /* support free on null */
ZSTD_freeDCtx(zbc->zc);
free(zbc->inBuff);
free(zbc->outBuff);
free(zbc);
return 0;
}
/* *** Initialization *** */
static size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbc)
{
zbc->stage = ZBUFFds_readHeader;
zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = zbc->dictSize = 0;
return ZSTD_resetDCtx(zbc->zc);
}
static size_t ZBUFF_decompressWithDictionary(ZBUFF_DCtx* zbc, const void* src, size_t srcSize)
{
zbc->dict = (const char*)src;
zbc->dictSize = srcSize;
return 0;
}
static size_t ZBUFF_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
size_t length = MIN(maxDstSize, srcSize);
if (length > 0) {
memcpy(dst, src, length);
}
return length;
}
/* *** Decompression *** */
static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
{
const char* const istart = (const char*)src;
const char* ip = istart;
const char* const iend = istart + *srcSizePtr;
char* const ostart = (char*)dst;
char* op = ostart;
char* const oend = ostart + *maxDstSizePtr;
U32 notDone = 1;
DEBUGLOG(5, "ZBUFF_decompressContinue");
while (notDone)
{
switch(zbc->stage)
{
case ZBUFFds_init :
DEBUGLOG(5, "ZBUFF_decompressContinue: stage==ZBUFFds_init => ERROR(init_missing)");
return ERROR(init_missing);
case ZBUFFds_readHeader :
/* read header from src */
{ size_t const headerSize = ZSTD_getFrameParams(&(zbc->params), src, *srcSizePtr);
if (ZSTD_isError(headerSize)) return headerSize;
if (headerSize) {
/* not enough input to decode header : tell how many bytes would be necessary */
memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr);
zbc->hPos += *srcSizePtr;
*maxDstSizePtr = 0;
zbc->stage = ZBUFFds_loadHeader;
return headerSize - zbc->hPos;
}
zbc->stage = ZBUFFds_decodeHeader;
break;
}
case ZBUFFds_loadHeader:
/* complete header from src */
{ size_t headerSize = ZBUFF_limitCopy(
zbc->headerBuffer + zbc->hPos, ZSTD_frameHeaderSize_max - zbc->hPos,
src, *srcSizePtr);
zbc->hPos += headerSize;
ip += headerSize;
headerSize = ZSTD_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos);
if (ZSTD_isError(headerSize)) return headerSize;
if (headerSize) {
/* not enough input to decode header : tell how many bytes would be necessary */
*maxDstSizePtr = 0;
return headerSize - zbc->hPos;
} }
/* intentional fallthrough */
case ZBUFFds_decodeHeader:
/* apply header to create / resize buffers */
{ size_t const neededOutSize = (size_t)1 << zbc->params.windowLog;
size_t const neededInSize = BLOCKSIZE; /* a block is never > BLOCKSIZE */
if (zbc->inBuffSize < neededInSize) {
free(zbc->inBuff);
zbc->inBuffSize = neededInSize;
zbc->inBuff = (char*)malloc(neededInSize);
if (zbc->inBuff == NULL) return ERROR(memory_allocation);
}
if (zbc->outBuffSize < neededOutSize) {
free(zbc->outBuff);
zbc->outBuffSize = neededOutSize;
zbc->outBuff = (char*)malloc(neededOutSize);
if (zbc->outBuff == NULL) return ERROR(memory_allocation);
} }
if (zbc->dictSize)
ZSTD_decompress_insertDictionary(zbc->zc, zbc->dict, zbc->dictSize);
if (zbc->hPos) {
/* some data already loaded into headerBuffer : transfer into inBuff */
memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos);
zbc->inPos = zbc->hPos;
zbc->hPos = 0;
zbc->stage = ZBUFFds_load;
break;
}
zbc->stage = ZBUFFds_read;
/* fall-through */
case ZBUFFds_read:
{
size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
if (neededInSize==0) /* end of frame */
{
zbc->stage = ZBUFFds_init;
notDone = 0;
break;
}
if ((size_t)(iend-ip) >= neededInSize)
{
/* directly decode from src */
size_t decodedSize = ZSTD_decompressContinue(zbc->zc,
zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
ip, neededInSize);
if (ZSTD_isError(decodedSize)) return decodedSize;
ip += neededInSize;
if (!decodedSize) break; /* this was just a header */
zbc->outEnd = zbc->outStart + decodedSize;
zbc->stage = ZBUFFds_flush;
break;
}
if (ip==iend) { notDone = 0; break; } /* no more input */
zbc->stage = ZBUFFds_load;
}
/* fall-through */
case ZBUFFds_load:
{
size_t neededInSize = ZSTD_nextSrcSizeToDecompress(zbc->zc);
size_t toLoad = neededInSize - zbc->inPos; /* should always be <= remaining space within inBuff */
size_t loadedSize;
if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected); /* should never happen */
loadedSize = ZBUFF_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip);
ip += loadedSize;
zbc->inPos += loadedSize;
if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */
{
size_t decodedSize = ZSTD_decompressContinue(zbc->zc,
zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
zbc->inBuff, neededInSize);
if (ZSTD_isError(decodedSize)) return decodedSize;
zbc->inPos = 0; /* input is consumed */
if (!decodedSize) { zbc->stage = ZBUFFds_read; break; } /* this was just a header */
zbc->outEnd = zbc->outStart + decodedSize;
zbc->stage = ZBUFFds_flush;
/* ZBUFFds_flush follows */
}
}
/* fall-through */
case ZBUFFds_flush:
{
size_t toFlushSize = zbc->outEnd - zbc->outStart;
size_t flushedSize = ZBUFF_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize);
op += flushedSize;
zbc->outStart += flushedSize;
if (flushedSize == toFlushSize)
{
zbc->stage = ZBUFFds_read;
if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize)
zbc->outStart = zbc->outEnd = 0;
break;
}
/* cannot flush everything */
notDone = 0;
break;
}
default: return ERROR(GENERIC); /* impossible */
}
}
*srcSizePtr = ip-istart;
*maxDstSizePtr = op-ostart;
{
size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zbc->zc);
if (nextSrcSizeHint > 3) nextSrcSizeHint+= 3; /* get the next block header while at it */
nextSrcSizeHint -= zbc->inPos; /* already loaded*/
return nextSrcSizeHint;
}
}
/* *************************************
* Tool functions
***************************************/
unsigned ZBUFFv04_isError(size_t errorCode) { return ERR_isError(errorCode); }
const char* ZBUFFv04_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
size_t ZBUFFv04_recommendedDInSize() { return BLOCKSIZE + 3; }
size_t ZBUFFv04_recommendedDOutSize() { return BLOCKSIZE; }
/*- ========================================================================= -*/
/* final wrapping stage */
size_t ZSTDv04_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
return ZSTD_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0);
}
size_t ZSTDv04_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE==1)
size_t regenSize;
ZSTD_DCtx* dctx = ZSTD_createDCtx();
if (dctx==NULL) return ERROR(memory_allocation);
regenSize = ZSTDv04_decompressDCtx(dctx, dst, maxDstSize, src, srcSize);
ZSTD_freeDCtx(dctx);
return regenSize;
#else
ZSTD_DCtx dctx;
return ZSTDv04_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize);
#endif
}
size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx) { return ZSTD_resetDCtx(dctx); }
size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx)
{
return ZSTD_nextSrcSizeToDecompress(dctx);
}
size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
return ZSTD_decompressContinue(dctx, dst, maxDstSize, src, srcSize);
}
ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void) { return ZBUFF_createDCtx(); }
size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx) { return ZBUFF_freeDCtx(dctx); }
size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx) { return ZBUFF_decompressInit(dctx); }
size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* src, size_t srcSize)
{ return ZBUFF_decompressWithDictionary(dctx, src, srcSize); }
size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
{
DEBUGLOG(5, "ZBUFFv04_decompressContinue");
return ZBUFF_decompressContinue(dctx, dst, maxDstSizePtr, src, srcSizePtr);
}
ZSTD_DCtx* ZSTDv04_createDCtx(void) { return ZSTD_createDCtx(); }
size_t ZSTDv04_freeDCtx(ZSTD_DCtx* dctx) { return ZSTD_freeDCtx(dctx); }
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTD_V04_H_91868324769238
#define ZSTD_V04_H_91868324769238
#if defined (__cplusplus)
extern "C" {
#endif
/* *************************************
* Includes
***************************************/
#include <stddef.h> /* size_t */
/* *************************************
* Simple one-step function
***************************************/
/**
ZSTDv04_decompress() : decompress ZSTD frames compliant with v0.4.x format
compressedSize : is the exact source size
maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated.
It must be equal or larger than originalSize, otherwise decompression will fail.
return : the number of bytes decompressed into destination buffer (originalSize)
or an errorCode if it fails (which can be tested using ZSTDv01_isError())
*/
size_t ZSTDv04_decompress( void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize);
/**
ZSTDv04_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.4.x format
srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
cSize (output parameter) : the number of bytes that would be read to decompress this frame
or an error code if it fails (which can be tested using ZSTDv01_isError())
dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
or ZSTD_CONTENTSIZE_ERROR if an error occurs
note : assumes `cSize` and `dBound` are _not_ NULL.
*/
void ZSTDv04_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
/**
ZSTDv04_isError() : tells if the result of ZSTDv04_decompress() is an error
*/
unsigned ZSTDv04_isError(size_t code);
/* *************************************
* Advanced functions
***************************************/
typedef struct ZSTDv04_Dctx_s ZSTDv04_Dctx;
ZSTDv04_Dctx* ZSTDv04_createDCtx(void);
size_t ZSTDv04_freeDCtx(ZSTDv04_Dctx* dctx);
size_t ZSTDv04_decompressDCtx(ZSTDv04_Dctx* dctx,
void* dst, size_t maxOriginalSize,
const void* src, size_t compressedSize);
/* *************************************
* Direct Streaming
***************************************/
size_t ZSTDv04_resetDCtx(ZSTDv04_Dctx* dctx);
size_t ZSTDv04_nextSrcSizeToDecompress(ZSTDv04_Dctx* dctx);
size_t ZSTDv04_decompressContinue(ZSTDv04_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize);
/**
Use above functions alternatively.
ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
Result is the number of bytes regenerated within 'dst'.
It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
*/
/* *************************************
* Buffered Streaming
***************************************/
typedef struct ZBUFFv04_DCtx_s ZBUFFv04_DCtx;
ZBUFFv04_DCtx* ZBUFFv04_createDCtx(void);
size_t ZBUFFv04_freeDCtx(ZBUFFv04_DCtx* dctx);
size_t ZBUFFv04_decompressInit(ZBUFFv04_DCtx* dctx);
size_t ZBUFFv04_decompressWithDictionary(ZBUFFv04_DCtx* dctx, const void* dict, size_t dictSize);
size_t ZBUFFv04_decompressContinue(ZBUFFv04_DCtx* dctx, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr);
/** ************************************************
* Streaming decompression
*
* A ZBUFF_DCtx object is required to track streaming operation.
* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources.
* Use ZBUFF_decompressInit() to start a new decompression operation.
* ZBUFF_DCtx objects can be reused multiple times.
*
* Optionally, a reference to a static dictionary can be set, using ZBUFF_decompressWithDictionary()
* It must be the same content as the one set during compression phase.
* Dictionary content must remain accessible during the decompression process.
*
* Use ZBUFF_decompressContinue() repetitively to consume your input.
* *srcSizePtr and *maxDstSizePtr can be any size.
* The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
* The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst.
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
* or 0 when a frame is completely decoded
* or an error code, which can be tested using ZBUFF_isError().
*
* Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize / ZBUFF_recommendedDOutSize
* output : ZBUFF_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
* input : ZBUFF_recommendedDInSize==128Kb+3; just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
* **************************************************/
unsigned ZBUFFv04_isError(size_t errorCode);
const char* ZBUFFv04_getErrorName(size_t errorCode);
/** The below functions provide recommended buffer sizes for Compression or Decompression operations.
* These sizes are not compulsory, they just tend to offer better latency */
size_t ZBUFFv04_recommendedDInSize(void);
size_t ZBUFFv04_recommendedDOutSize(void);
/* *************************************
* Prefix - version detection
***************************************/
#define ZSTDv04_magicNumber 0xFD2FB524 /* v0.4 */
#if defined (__cplusplus)
}
#endif
#endif /* ZSTD_V04_H_91868324769238 */
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/*- Dependencies -*/
#include "zstd_v05.h"
#include "../common/error_private.h"
/* ******************************************************************
mem.h
low-level memory access routines
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef MEM_H_MODULE
#define MEM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/*-****************************************
* Dependencies
******************************************/
#include <stddef.h> /* size_t, ptrdiff_t */
#include <string.h> /* memcpy */
/*-****************************************
* Compiler specifics
******************************************/
#if defined(__GNUC__)
# define MEM_STATIC static __attribute__((unused))
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define MEM_STATIC static inline
#elif defined(_MSC_VER)
# define MEM_STATIC static __inline
#else
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif
/*-**************************************************************
* Basic Types
*****************************************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# if defined(_AIX)
# include <inttypes.h>
# else
# include <stdint.h> /* intptr_t */
# endif
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef signed short S16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
typedef signed long long S64;
#endif
/*-**************************************************************
* Memory I/O
*****************************************************************/
/* MEM_FORCE_MEMORY_ACCESS :
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets depending on alignment.
* In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define MEM_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
# define MEM_FORCE_MEMORY_ACCESS 1
# endif
#endif
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
MEM_STATIC unsigned MEM_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
/* violates C standard, by lying on structure alignment.
Only use if no other choice to achieve best performance on target platform */
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign*)memPtr)->u64 = value; }
#else
/* default method, safe and standard.
can sometimes prove slower */
MEM_STATIC U16 MEM_read16(const void* memPtr)
{
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U32 MEM_read32(const void* memPtr)
{
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U64 MEM_read64(const void* memPtr)
{
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
{
memcpy(memPtr, &value, sizeof(value));
}
MEM_STATIC void MEM_write32(void* memPtr, U32 value)
{
memcpy(memPtr, &value, sizeof(value));
}
MEM_STATIC void MEM_write64(void* memPtr, U64 value)
{
memcpy(memPtr, &value, sizeof(value));
}
#endif /* MEM_FORCE_MEMORY_ACCESS */
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read16(memPtr);
else {
const BYTE* p = (const BYTE*)memPtr;
return (U16)(p[0] + (p[1]<<8));
}
}
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
{
if (MEM_isLittleEndian()) {
MEM_write16(memPtr, val);
} else {
BYTE* p = (BYTE*)memPtr;
p[0] = (BYTE)val;
p[1] = (BYTE)(val>>8);
}
}
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read32(memPtr);
else {
const BYTE* p = (const BYTE*)memPtr;
return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
}
}
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read64(memPtr);
else {
const BYTE* p = (const BYTE*)memPtr;
return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
+ ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
}
}
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
{
if (MEM_32bits())
return (size_t)MEM_readLE32(memPtr);
else
return (size_t)MEM_readLE64(memPtr);
}
#if defined (__cplusplus)
}
#endif
#endif /* MEM_H_MODULE */
/*
zstd - standard compression library
Header File for static linking only
Copyright (C) 2014-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd homepage : http://www.zstd.net
*/
#ifndef ZSTD_STATIC_H
#define ZSTD_STATIC_H
/* The prototypes defined within this file are considered experimental.
* They should not be used in the context DLL as they may change in the future.
* Prefer static linking if you need them, to control breaking version changes issues.
*/
#if defined (__cplusplus)
extern "C" {
#endif
/*-*************************************
* Types
***************************************/
#define ZSTDv05_WINDOWLOG_ABSOLUTEMIN 11
/*-*************************************
* Advanced functions
***************************************/
/*- Advanced Decompression functions -*/
/*! ZSTDv05_decompress_usingPreparedDCtx() :
* Same as ZSTDv05_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
* It avoids reloading the dictionary each time.
* `preparedDCtx` must have been properly initialized using ZSTDv05_decompressBegin_usingDict().
* Requires 2 contexts : 1 for reference, which will not be modified, and 1 to run the decompression operation */
size_t ZSTDv05_decompress_usingPreparedDCtx(
ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* preparedDCtx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize);
/* **************************************
* Streaming functions (direct mode)
****************************************/
size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx);
/*
Streaming decompression, direct mode (bufferless)
A ZSTDv05_DCtx object is required to track streaming operations.
Use ZSTDv05_createDCtx() / ZSTDv05_freeDCtx() to manage it.
A ZSTDv05_DCtx object can be re-used multiple times.
First typical operation is to retrieve frame parameters, using ZSTDv05_getFrameParams().
This operation is independent, and just needs enough input data to properly decode the frame header.
Objective is to retrieve *params.windowlog, to know minimum amount of memory required during decoding.
Result : 0 when successful, it means the ZSTDv05_parameters structure has been filled.
>0 : means there is not enough data into src. Provides the expected size to successfully decode header.
errorCode, which can be tested using ZSTDv05_isError()
Start decompression, with ZSTDv05_decompressBegin() or ZSTDv05_decompressBegin_usingDict()
Alternatively, you can copy a prepared context, using ZSTDv05_copyDCtx()
Then use ZSTDv05_nextSrcSizeToDecompress() and ZSTDv05_decompressContinue() alternatively.
ZSTDv05_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv05_decompressContinue().
ZSTDv05_decompressContinue() requires this exact amount of bytes, or it will fail.
ZSTDv05_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
@result of ZSTDv05_decompressContinue() is the number of bytes regenerated within 'dst'.
It can be zero, which is not an error; it just means ZSTDv05_decompressContinue() has decoded some header.
A frame is fully decoded when ZSTDv05_nextSrcSizeToDecompress() returns zero.
Context can then be reset to start a new decompression.
*/
/* **************************************
* Block functions
****************************************/
/*! Block functions produce and decode raw zstd blocks, without frame metadata.
User will have to take in charge required information to regenerate data, such as block sizes.
A few rules to respect :
- Uncompressed block size must be <= 128 KB
- Compressing or decompressing requires a context structure
+ Use ZSTDv05_createCCtx() and ZSTDv05_createDCtx()
- It is necessary to init context before starting
+ compression : ZSTDv05_compressBegin()
+ decompression : ZSTDv05_decompressBegin()
+ variants _usingDict() are also allowed
+ copyCCtx() and copyDCtx() work too
- When a block is considered not compressible enough, ZSTDv05_compressBlock() result will be zero.
In which case, nothing is produced into `dst`.
+ User must test for such outcome and deal directly with uncompressed data
+ ZSTDv05_decompressBlock() doesn't accept uncompressed data as input !!
*/
size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
#if defined (__cplusplus)
}
#endif
#endif /* ZSTDv05_STATIC_H */
/*
zstd_internal - common functions to include
Header File for include
Copyright (C) 2014-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
*/
#ifndef ZSTD_CCOMMON_H_MODULE
#define ZSTD_CCOMMON_H_MODULE
/*-*************************************
* Common macros
***************************************/
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define MAX(a,b) ((a)>(b) ? (a) : (b))
/*-*************************************
* Common constants
***************************************/
#define ZSTDv05_DICT_MAGIC 0xEC30A435
#define KB *(1 <<10)
#define MB *(1 <<20)
#define GB *(1U<<30)
#define BLOCKSIZE (128 KB) /* define, for static allocation */
static const size_t ZSTDv05_blockHeaderSize = 3;
static const size_t ZSTDv05_frameHeaderSize_min = 5;
#define ZSTDv05_frameHeaderSize_max 5 /* define, for static allocation */
#define BITv057 128
#define BITv056 64
#define BITv055 32
#define BITv054 16
#define BITv051 2
#define BITv050 1
#define IS_HUFv05 0
#define IS_PCH 1
#define IS_RAW 2
#define IS_RLE 3
#define MINMATCH 4
#define REPCODE_STARTVALUE 1
#define Litbits 8
#define MLbits 7
#define LLbits 6
#define Offbits 5
#define MaxLit ((1<<Litbits) - 1)
#define MaxML ((1<<MLbits) - 1)
#define MaxLL ((1<<LLbits) - 1)
#define MaxOff ((1<<Offbits)- 1)
#define MLFSEv05Log 10
#define LLFSEv05Log 10
#define OffFSEv05Log 9
#define MaxSeq MAX(MaxLL, MaxML)
#define FSEv05_ENCODING_RAW 0
#define FSEv05_ENCODING_RLE 1
#define FSEv05_ENCODING_STATIC 2
#define FSEv05_ENCODING_DYNAMIC 3
#define HufLog 12
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
#define WILDCOPY_OVERLENGTH 8
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
/*-*******************************************
* Shared functions to include for inlining
*********************************************/
static void ZSTDv05_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
#define COPY8(d,s) { ZSTDv05_copy8(d,s); d+=8; s+=8; }
/*! ZSTDv05_wildcopy() :
* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
MEM_STATIC void ZSTDv05_wildcopy(void* dst, const void* src, ptrdiff_t length)
{
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
do
COPY8(op, ip)
while (op < oend);
}
/*-*******************************************
* Private interfaces
*********************************************/
typedef struct {
void* buffer;
U32* offsetStart;
U32* offset;
BYTE* offCodeStart;
BYTE* offCode;
BYTE* litStart;
BYTE* lit;
BYTE* litLengthStart;
BYTE* litLength;
BYTE* matchLengthStart;
BYTE* matchLength;
BYTE* dumpsStart;
BYTE* dumps;
/* opt */
U32* matchLengthFreq;
U32* litLengthFreq;
U32* litFreq;
U32* offCodeFreq;
U32 matchLengthSum;
U32 litLengthSum;
U32 litSum;
U32 offCodeSum;
} seqStore_t;
#endif /* ZSTDv05_CCOMMON_H_MODULE */
/* ******************************************************************
FSEv05 : Finite State Entropy coder
header file
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef FSEv05_H
#define FSEv05_H
#if defined (__cplusplus)
extern "C" {
#endif
/* *****************************************
* Includes
******************************************/
#include <stddef.h> /* size_t, ptrdiff_t */
/*-****************************************
* FSEv05 simple functions
******************************************/
size_t FSEv05_decompress(void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize);
/*!
FSEv05_decompress():
Decompress FSEv05 data from buffer 'cSrc', of size 'cSrcSize',
into already allocated destination buffer 'dst', of size 'maxDstSize'.
return : size of regenerated data (<= maxDstSize)
or an error code, which can be tested using FSEv05_isError()
** Important ** : FSEv05_decompress() doesn't decompress non-compressible nor RLE data !!!
Why ? : making this distinction requires a header.
Header management is intentionally delegated to the user layer, which can better manage special cases.
*/
/* *****************************************
* Tool functions
******************************************/
/* Error Management */
unsigned FSEv05_isError(size_t code); /* tells if a return value is an error code */
const char* FSEv05_getErrorName(size_t code); /* provides error code string (useful for debugging) */
/* *****************************************
* FSEv05 detailed API
******************************************/
/* *** DECOMPRESSION *** */
/*!
FSEv05_readNCount():
Read compactly saved 'normalizedCounter' from 'rBuffer'.
return : size read from 'rBuffer'
or an errorCode, which can be tested using FSEv05_isError()
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
size_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
/*!
Constructor and Destructor of type FSEv05_DTable
Note that its size depends on 'tableLog' */
typedef unsigned FSEv05_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
FSEv05_DTable* FSEv05_createDTable(unsigned tableLog);
void FSEv05_freeDTable(FSEv05_DTable* dt);
/*!
FSEv05_buildDTable():
Builds 'dt', which must be already allocated, using FSEv05_createDTable()
@return : 0,
or an errorCode, which can be tested using FSEv05_isError() */
size_t FSEv05_buildDTable (FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
/*!
FSEv05_decompress_usingDTable():
Decompress compressed source @cSrc of size @cSrcSize using `dt`
into `dst` which must be already allocated.
@return : size of regenerated data (necessarily <= @dstCapacity)
or an errorCode, which can be tested using FSEv05_isError() */
size_t FSEv05_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv05_DTable* dt);
#if defined (__cplusplus)
}
#endif
#endif /* FSEv05_H */
/* ******************************************************************
bitstream
Part of FSEv05 library
header file (to include)
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef BITv05STREAM_H_MODULE
#define BITv05STREAM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/*
* This API consists of small unitary functions, which highly benefit from being inlined.
* Since link-time-optimization is not available for all compilers,
* these functions are defined into a .h to be included.
*/
/*-********************************************
* bitStream decoding API (read backward)
**********************************************/
typedef struct
{
size_t bitContainer;
unsigned bitsConsumed;
const char* ptr;
const char* start;
} BITv05_DStream_t;
typedef enum { BITv05_DStream_unfinished = 0,
BITv05_DStream_endOfBuffer = 1,
BITv05_DStream_completed = 2,
BITv05_DStream_overflow = 3 } BITv05_DStream_status; /* result of BITv05_reloadDStream() */
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
MEM_STATIC size_t BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits);
MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD);
MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* bitD);
/*-****************************************
* unsafe API
******************************************/
MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits);
/* faster, but works only if nbBits >= 1 */
/*-**************************************************************
* Helper functions
****************************************************************/
MEM_STATIC unsigned BITv05_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r=0;
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
unsigned r;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
return r;
# endif
}
/*-********************************************************
* bitStream decoding
**********************************************************/
/*!BITv05_initDStream
* Initialize a BITv05_DStream_t.
* @bitD : a pointer to an already allocated BITv05_DStream_t structure
* @srcBuffer must point at the beginning of a bitStream
* @srcSize must be the exact size of the bitStream
* @result : size of stream (== srcSize) or an errorCode if a problem is detected
*/
MEM_STATIC size_t BITv05_initDStream(BITv05_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
if (srcSize >= sizeof(size_t)) { /* normal case */
U32 contain32;
bitD->start = (const char*)srcBuffer;
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
bitD->bitsConsumed = 8 - BITv05_highbit32(contain32);
} else {
U32 contain32;
bitD->start = (const char*)srcBuffer;
bitD->ptr = bitD->start;
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; /* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; /* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; /* fall-through */
default: break;
}
contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
bitD->bitsConsumed = 8 - BITv05_highbit32(contain32);
bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
}
return srcSize;
}
MEM_STATIC size_t BITv05_lookBits(BITv05_DStream_t* bitD, U32 nbBits)
{
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
}
/*! BITv05_lookBitsFast :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BITv05_lookBitsFast(BITv05_DStream_t* bitD, U32 nbBits)
{
const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
}
MEM_STATIC void BITv05_skipBits(BITv05_DStream_t* bitD, U32 nbBits)
{
bitD->bitsConsumed += nbBits;
}
MEM_STATIC size_t BITv05_readBits(BITv05_DStream_t* bitD, unsigned nbBits)
{
size_t value = BITv05_lookBits(bitD, nbBits);
BITv05_skipBits(bitD, nbBits);
return value;
}
/*!BITv05_readBitsFast :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, unsigned nbBits)
{
size_t value = BITv05_lookBitsFast(bitD, nbBits);
BITv05_skipBits(bitD, nbBits);
return value;
}
MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD)
{
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
return BITv05_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
bitD->ptr -= bitD->bitsConsumed >> 3;
bitD->bitsConsumed &= 7;
bitD->bitContainer = MEM_readLEST(bitD->ptr);
return BITv05_DStream_unfinished;
}
if (bitD->ptr == bitD->start) {
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv05_DStream_endOfBuffer;
return BITv05_DStream_completed;
}
{
U32 nbBytes = bitD->bitsConsumed >> 3;
BITv05_DStream_status result = BITv05_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start) {
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
result = BITv05_DStream_endOfBuffer;
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes*8;
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
return result;
}
}
/*! BITv05_endOfDStream
* @return Tells if DStream has reached its exact end
*/
MEM_STATIC unsigned BITv05_endOfDStream(const BITv05_DStream_t* DStream)
{
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
}
#if defined (__cplusplus)
}
#endif
#endif /* BITv05STREAM_H_MODULE */
/* ******************************************************************
FSEv05 : Finite State Entropy coder
header file for static linking (only)
Copyright (C) 2013-2015, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef FSEv05_STATIC_H
#define FSEv05_STATIC_H
#if defined (__cplusplus)
extern "C" {
#endif
/* *****************************************
* Static allocation
*******************************************/
/* It is possible to statically allocate FSEv05 CTable/DTable as a table of unsigned using below macros */
#define FSEv05_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
/* *****************************************
* FSEv05 advanced API
*******************************************/
size_t FSEv05_buildDTable_raw (FSEv05_DTable* dt, unsigned nbBits);
/* build a fake FSEv05_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
size_t FSEv05_buildDTable_rle (FSEv05_DTable* dt, unsigned char symbolValue);
/* build a fake FSEv05_DTable, designed to always generate the same symbolValue */
/* *****************************************
* FSEv05 symbol decompression API
*******************************************/
typedef struct
{
size_t state;
const void* table; /* precise table may vary, depending on U16 */
} FSEv05_DState_t;
static void FSEv05_initDState(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD, const FSEv05_DTable* dt);
static unsigned char FSEv05_decodeSymbol(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD);
static unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr);
/* *****************************************
* FSEv05 unsafe API
*******************************************/
static unsigned char FSEv05_decodeSymbolFast(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD);
/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
/* *****************************************
* Implementation of inlined functions
*******************************************/
/* decompression */
typedef struct {
U16 tableLog;
U16 fastMode;
} FSEv05_DTableHeader; /* sizeof U32 */
typedef struct
{
unsigned short newState;
unsigned char symbol;
unsigned char nbBits;
} FSEv05_decode_t; /* size == U32 */
MEM_STATIC void FSEv05_initDState(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD, const FSEv05_DTable* dt)
{
const void* ptr = dt;
const FSEv05_DTableHeader* const DTableH = (const FSEv05_DTableHeader*)ptr;
DStatePtr->state = BITv05_readBits(bitD, DTableH->tableLog);
BITv05_reloadDStream(bitD);
DStatePtr->table = dt + 1;
}
MEM_STATIC BYTE FSEv05_peakSymbol(FSEv05_DState_t* DStatePtr)
{
const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
return DInfo.symbol;
}
MEM_STATIC BYTE FSEv05_decodeSymbol(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD)
{
const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
const U32 nbBits = DInfo.nbBits;
BYTE symbol = DInfo.symbol;
size_t lowBits = BITv05_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
MEM_STATIC BYTE FSEv05_decodeSymbolFast(FSEv05_DState_t* DStatePtr, BITv05_DStream_t* bitD)
{
const FSEv05_decode_t DInfo = ((const FSEv05_decode_t*)(DStatePtr->table))[DStatePtr->state];
const U32 nbBits = DInfo.nbBits;
BYTE symbol = DInfo.symbol;
size_t lowBits = BITv05_readBitsFast(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
MEM_STATIC unsigned FSEv05_endOfDState(const FSEv05_DState_t* DStatePtr)
{
return DStatePtr->state == 0;
}
#if defined (__cplusplus)
}
#endif
#endif /* FSEv05_STATIC_H */
/* ******************************************************************
FSEv05 : Finite State Entropy coder
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSEv05 source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef FSEv05_COMMONDEFS_ONLY
/* **************************************************************
* Tuning parameters
****************************************************************/
/*!MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
#define FSEv05_MAX_MEMORY_USAGE 14
#define FSEv05_DEFAULT_MEMORY_USAGE 13
/*!FSEv05_MAX_SYMBOL_VALUE :
* Maximum symbol value authorized.
* Required for proper stack allocation */
#define FSEv05_MAX_SYMBOL_VALUE 255
/* **************************************************************
* template functions type & suffix
****************************************************************/
#define FSEv05_FUNCTION_TYPE BYTE
#define FSEv05_FUNCTION_EXTENSION
#define FSEv05_DECODE_TYPE FSEv05_decode_t
#endif /* !FSEv05_COMMONDEFS_ONLY */
/* **************************************************************
* Compiler specifics
****************************************************************/
#ifdef _MSC_VER /* Visual Studio */
# define FORCE_INLINE static __forceinline
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
#else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline))
# else
# define FORCE_INLINE static inline
# endif
# else
# define FORCE_INLINE static
# endif /* __STDC_VERSION__ */
#endif
/* **************************************************************
* Includes
****************************************************************/
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
/* ***************************************************************
* Constants
*****************************************************************/
#define FSEv05_MAX_TABLELOG (FSEv05_MAX_MEMORY_USAGE-2)
#define FSEv05_MAX_TABLESIZE (1U<<FSEv05_MAX_TABLELOG)
#define FSEv05_MAXTABLESIZE_MASK (FSEv05_MAX_TABLESIZE-1)
#define FSEv05_DEFAULT_TABLELOG (FSEv05_DEFAULT_MEMORY_USAGE-2)
#define FSEv05_MIN_TABLELOG 5
#define FSEv05_TABLELOG_ABSOLUTE_MAX 15
#if FSEv05_MAX_TABLELOG > FSEv05_TABLELOG_ABSOLUTE_MAX
#error "FSEv05_MAX_TABLELOG > FSEv05_TABLELOG_ABSOLUTE_MAX is not supported"
#endif
/* **************************************************************
* Error Management
****************************************************************/
#define FSEv05_STATIC_ASSERT(c) { enum { FSEv05_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* **************************************************************
* Complex types
****************************************************************/
typedef unsigned DTable_max_t[FSEv05_DTABLE_SIZE_U32(FSEv05_MAX_TABLELOG)];
/* **************************************************************
* Templates
****************************************************************/
/*
designed to be included
for type-specific functions (template emulation in C)
Objective is to write these functions only once, for improved maintenance
*/
/* safety checks */
#ifndef FSEv05_FUNCTION_EXTENSION
# error "FSEv05_FUNCTION_EXTENSION must be defined"
#endif
#ifndef FSEv05_FUNCTION_TYPE
# error "FSEv05_FUNCTION_TYPE must be defined"
#endif
/* Function names */
#define FSEv05_CAT(X,Y) X##Y
#define FSEv05_FUNCTION_NAME(X,Y) FSEv05_CAT(X,Y)
#define FSEv05_TYPE_NAME(X,Y) FSEv05_CAT(X,Y)
/* Function templates */
static U32 FSEv05_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
FSEv05_DTable* FSEv05_createDTable (unsigned tableLog)
{
if (tableLog > FSEv05_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv05_TABLELOG_ABSOLUTE_MAX;
return (FSEv05_DTable*)malloc( FSEv05_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
}
void FSEv05_freeDTable (FSEv05_DTable* dt)
{
free(dt);
}
size_t FSEv05_buildDTable(FSEv05_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
FSEv05_DTableHeader DTableH;
void* const tdPtr = dt+1; /* because dt is unsigned, 32-bits aligned on 32-bits */
FSEv05_DECODE_TYPE* const tableDecode = (FSEv05_DECODE_TYPE*) (tdPtr);
const U32 tableSize = 1 << tableLog;
const U32 tableMask = tableSize-1;
const U32 step = FSEv05_tableStep(tableSize);
U16 symbolNext[FSEv05_MAX_SYMBOL_VALUE+1];
U32 position = 0;
U32 highThreshold = tableSize-1;
const S16 largeLimit= (S16)(1 << (tableLog-1));
U32 noLarge = 1;
U32 s;
/* Sanity Checks */
if (maxSymbolValue > FSEv05_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
if (tableLog > FSEv05_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
/* Init, lay down lowprob symbols */
memset(tableDecode, 0, sizeof(FSEv05_FUNCTION_TYPE) * (maxSymbolValue+1) ); /* useless init, but keep static analyzer happy, and we don't need to performance optimize legacy decoders */
DTableH.tableLog = (U16)tableLog;
for (s=0; s<=maxSymbolValue; s++) {
if (normalizedCounter[s]==-1) {
tableDecode[highThreshold--].symbol = (FSEv05_FUNCTION_TYPE)s;
symbolNext[s] = 1;
} else {
if (normalizedCounter[s] >= largeLimit) noLarge=0;
symbolNext[s] = normalizedCounter[s];
} }
/* Spread symbols */
for (s=0; s<=maxSymbolValue; s++) {
int i;
for (i=0; i<normalizedCounter[s]; i++) {
tableDecode[position].symbol = (FSEv05_FUNCTION_TYPE)s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
} }
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
/* Build Decoding table */
{
U32 i;
for (i=0; i<tableSize; i++) {
FSEv05_FUNCTION_TYPE symbol = (FSEv05_FUNCTION_TYPE)(tableDecode[i].symbol);
U16 nextState = symbolNext[symbol]++;
tableDecode[i].nbBits = (BYTE) (tableLog - BITv05_highbit32 ((U32)nextState) );
tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
} }
DTableH.fastMode = (U16)noLarge;
memcpy(dt, &DTableH, sizeof(DTableH));
return 0;
}
#ifndef FSEv05_COMMONDEFS_ONLY
/*-****************************************
* FSEv05 helper functions
******************************************/
unsigned FSEv05_isError(size_t code) { return ERR_isError(code); }
const char* FSEv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
/*-**************************************************************
* FSEv05 NCount encoding-decoding
****************************************************************/
static short FSEv05_abs(short a) { return a<0 ? -a : a; }
size_t FSEv05_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
const BYTE* const istart = (const BYTE*) headerBuffer;
const BYTE* const iend = istart + hbSize;
const BYTE* ip = istart;
int nbBits;
int remaining;
int threshold;
U32 bitStream;
int bitCount;
unsigned charnum = 0;
int previous0 = 0;
if (hbSize < 4) return ERROR(srcSize_wrong);
bitStream = MEM_readLE32(ip);
nbBits = (bitStream & 0xF) + FSEv05_MIN_TABLELOG; /* extract tableLog */
if (nbBits > FSEv05_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
bitStream >>= 4;
bitCount = 4;
*tableLogPtr = nbBits;
remaining = (1<<nbBits)+1;
threshold = 1<<nbBits;
nbBits++;
while ((remaining>1) && (charnum<=*maxSVPtr)) {
if (previous0) {
unsigned n0 = charnum;
while ((bitStream & 0xFFFF) == 0xFFFF) {
n0+=24;
if (ip < iend-5) {
ip+=2;
bitStream = MEM_readLE32(ip) >> bitCount;
} else {
bitStream >>= 16;
bitCount+=16;
} }
while ((bitStream & 3) == 3) {
n0+=3;
bitStream>>=2;
bitCount+=2;
}
n0 += bitStream & 3;
bitCount += 2;
if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
while (charnum < n0) normalizedCounter[charnum++] = 0;
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
ip += bitCount>>3;
bitCount &= 7;
bitStream = MEM_readLE32(ip) >> bitCount;
}
else
bitStream >>= 2;
}
{
const short max = (short)((2*threshold-1)-remaining);
short count;
if ((bitStream & (threshold-1)) < (U32)max) {
count = (short)(bitStream & (threshold-1));
bitCount += nbBits-1;
} else {
count = (short)(bitStream & (2*threshold-1));
if (count >= threshold) count -= max;
bitCount += nbBits;
}
count--; /* extra accuracy */
remaining -= FSEv05_abs(count);
normalizedCounter[charnum++] = count;
previous0 = !count;
while (remaining < threshold) {
nbBits--;
threshold >>= 1;
}
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
ip += bitCount>>3;
bitCount &= 7;
} else {
bitCount -= (int)(8 * (iend - 4 - ip));
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
} }
if (remaining != 1) return ERROR(GENERIC);
*maxSVPtr = charnum-1;
ip += (bitCount+7)>>3;
if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
return ip-istart;
}
/*-*******************************************************
* Decompression (Byte symbols)
*********************************************************/
size_t FSEv05_buildDTable_rle (FSEv05_DTable* dt, BYTE symbolValue)
{
void* ptr = dt;
FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSEv05_decode_t* const cell = (FSEv05_decode_t*)dPtr;
DTableH->tableLog = 0;
DTableH->fastMode = 0;
cell->newState = 0;
cell->symbol = symbolValue;
cell->nbBits = 0;
return 0;
}
size_t FSEv05_buildDTable_raw (FSEv05_DTable* dt, unsigned nbBits)
{
void* ptr = dt;
FSEv05_DTableHeader* const DTableH = (FSEv05_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSEv05_decode_t* const dinfo = (FSEv05_decode_t*)dPtr;
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSymbolValue = tableMask;
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* Build Decoding Table */
DTableH->tableLog = (U16)nbBits;
DTableH->fastMode = 1;
for (s=0; s<=maxSymbolValue; s++) {
dinfo[s].newState = 0;
dinfo[s].symbol = (BYTE)s;
dinfo[s].nbBits = (BYTE)nbBits;
}
return 0;
}
FORCE_INLINE size_t FSEv05_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const FSEv05_DTable* dt, const unsigned fast)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const omax = op + maxDstSize;
BYTE* const olimit = omax-3;
BITv05_DStream_t bitD;
FSEv05_DState_t state1;
FSEv05_DState_t state2;
size_t errorCode;
/* Init */
errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
if (FSEv05_isError(errorCode)) return errorCode;
FSEv05_initDState(&state1, &bitD, dt);
FSEv05_initDState(&state2, &bitD, dt);
#define FSEv05_GETSYMBOL(statePtr) fast ? FSEv05_decodeSymbolFast(statePtr, &bitD) : FSEv05_decodeSymbol(statePtr, &bitD)
/* 4 symbols per loop */
for ( ; (BITv05_reloadDStream(&bitD)==BITv05_DStream_unfinished) && (op<olimit) ; op+=4) {
op[0] = FSEv05_GETSYMBOL(&state1);
if (FSEv05_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BITv05_reloadDStream(&bitD);
op[1] = FSEv05_GETSYMBOL(&state2);
if (FSEv05_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
{ if (BITv05_reloadDStream(&bitD) > BITv05_DStream_unfinished) { op+=2; break; } }
op[2] = FSEv05_GETSYMBOL(&state1);
if (FSEv05_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BITv05_reloadDStream(&bitD);
op[3] = FSEv05_GETSYMBOL(&state2);
}
/* tail */
/* note : BITv05_reloadDStream(&bitD) >= FSEv05_DStream_partiallyFilled; Ends at exactly BITv05_DStream_completed */
while (1) {
if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state1))) )
break;
*op++ = FSEv05_GETSYMBOL(&state1);
if ( (BITv05_reloadDStream(&bitD)>BITv05_DStream_completed) || (op==omax) || (BITv05_endOfDStream(&bitD) && (fast || FSEv05_endOfDState(&state2))) )
break;
*op++ = FSEv05_GETSYMBOL(&state2);
}
/* end ? */
if (BITv05_endOfDStream(&bitD) && FSEv05_endOfDState(&state1) && FSEv05_endOfDState(&state2))
return op-ostart;
if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */
return ERROR(corruption_detected);
}
size_t FSEv05_decompress_usingDTable(void* dst, size_t originalSize,
const void* cSrc, size_t cSrcSize,
const FSEv05_DTable* dt)
{
const void* ptr = dt;
const FSEv05_DTableHeader* DTableH = (const FSEv05_DTableHeader*)ptr;
const U32 fastMode = DTableH->fastMode;
/* select fast mode (static) */
if (fastMode) return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
return FSEv05_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
}
size_t FSEv05_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* const istart = (const BYTE*)cSrc;
const BYTE* ip = istart;
short counting[FSEv05_MAX_SYMBOL_VALUE+1];
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
unsigned tableLog;
unsigned maxSymbolValue = FSEv05_MAX_SYMBOL_VALUE;
size_t errorCode;
if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
/* normal FSEv05 decoding mode */
errorCode = FSEv05_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
if (FSEv05_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
ip += errorCode;
cSrcSize -= errorCode;
errorCode = FSEv05_buildDTable (dt, counting, maxSymbolValue, tableLog);
if (FSEv05_isError(errorCode)) return errorCode;
/* always return, even if it is an error code */
return FSEv05_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
}
#endif /* FSEv05_COMMONDEFS_ONLY */
/* ******************************************************************
Huff0 : Huffman coder, part of New Generation Entropy library
header file
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef HUFF0_H
#define HUFF0_H
#if defined (__cplusplus)
extern "C" {
#endif
/* ****************************************
* Huff0 simple functions
******************************************/
size_t HUFv05_decompress(void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize);
/*!
HUFv05_decompress():
Decompress Huff0 data from buffer 'cSrc', of size 'cSrcSize',
into already allocated destination buffer 'dst', of size 'dstSize'.
@dstSize : must be the **exact** size of original (uncompressed) data.
Note : in contrast with FSEv05, HUFv05_decompress can regenerate
RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
because it knows size to regenerate.
@return : size of regenerated data (== dstSize)
or an error code, which can be tested using HUFv05_isError()
*/
/* ****************************************
* Tool functions
******************************************/
/* Error Management */
unsigned HUFv05_isError(size_t code); /* tells if a return value is an error code */
const char* HUFv05_getErrorName(size_t code); /* provides error code string (useful for debugging) */
#if defined (__cplusplus)
}
#endif
#endif /* HUF0_H */
/* ******************************************************************
Huff0 : Huffman codec, part of New Generation Entropy library
header file, for static linking only
Copyright (C) 2013-2016, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef HUF0_STATIC_H
#define HUF0_STATIC_H
#if defined (__cplusplus)
extern "C" {
#endif
/* ****************************************
* Static allocation
******************************************/
/* static allocation of Huff0's DTable */
#define HUFv05_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog))
#define HUFv05_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
unsigned short DTable[HUFv05_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
#define HUFv05_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
unsigned int DTable[HUFv05_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
#define HUFv05_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
unsigned int DTable[HUFv05_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
/* ****************************************
* Advanced decompression functions
******************************************/
size_t HUFv05_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
size_t HUFv05_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
/* ****************************************
* Huff0 detailed API
******************************************/
/*!
HUFv05_decompress() does the following:
1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
2. build Huffman table from save, using HUFv05_readDTableXn()
3. decode 1 or 4 segments in parallel using HUFv05_decompressSXn_usingDTable
*/
size_t HUFv05_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
size_t HUFv05_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
size_t HUFv05_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
/* single stream variants */
size_t HUFv05_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
size_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
size_t HUFv05_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
size_t HUFv05_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
#if defined (__cplusplus)
}
#endif
#endif /* HUF0_STATIC_H */
/* ******************************************************************
Huff0 : Huffman coder, part of New Generation Entropy library
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSEv05+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/* **************************************************************
* Compiler specifics
****************************************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
/* inline is defined */
#elif defined(_MSC_VER)
# define inline __inline
#else
# define inline /* disable inline */
#endif
#ifdef _MSC_VER /* Visual Studio */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif
/* **************************************************************
* Includes
****************************************************************/
#include <stdlib.h> /* malloc, free, qsort */
#include <string.h> /* memcpy, memset */
#include <stdio.h> /* printf (debug) */
/* **************************************************************
* Constants
****************************************************************/
#define HUFv05_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUFv05_MAX_TABLELOG. Beyond that value, code does not work */
#define HUFv05_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUFv05_ABSOLUTEMAX_TABLELOG */
#define HUFv05_DEFAULT_TABLELOG HUFv05_MAX_TABLELOG /* tableLog by default, when not specified */
#define HUFv05_MAX_SYMBOL_VALUE 255
#if (HUFv05_MAX_TABLELOG > HUFv05_ABSOLUTEMAX_TABLELOG)
# error "HUFv05_MAX_TABLELOG is too large !"
#endif
/* **************************************************************
* Error Management
****************************************************************/
unsigned HUFv05_isError(size_t code) { return ERR_isError(code); }
const char* HUFv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
#define HUFv05_STATIC_ASSERT(c) { enum { HUFv05_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* *******************************************************
* Huff0 : Huffman block decompression
*********************************************************/
typedef struct { BYTE byte; BYTE nbBits; } HUFv05_DEltX2; /* single-symbol decoding */
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv05_DEltX4; /* double-symbols decoding */
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
/*! HUFv05_readStats
Read compact Huffman tree, saved by HUFv05_writeCTable
@huffWeight : destination buffer
@return : size read from `src`
*/
static size_t HUFv05_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize)
{
U32 weightTotal;
U32 tableLog;
const BYTE* ip = (const BYTE*) src;
size_t iSize;
size_t oSize;
U32 n;
if (!srcSize) return ERROR(srcSize_wrong);
iSize = ip[0];
/* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */
if (iSize >= 128) { /* special header */
if (iSize >= (242)) { /* RLE */
static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
oSize = l[iSize-242];
memset(huffWeight, 1, hwSize);
iSize = 0;
}
else { /* Incompressible */
oSize = iSize - 127;
iSize = ((oSize+1)/2);
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
if (oSize >= hwSize) return ERROR(corruption_detected);
ip += 1;
for (n=0; n<oSize; n+=2) {
huffWeight[n] = ip[n/2] >> 4;
huffWeight[n+1] = ip[n/2] & 15;
} } }
else { /* header compressed with FSEv05 (normal case) */
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
oSize = FSEv05_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
if (FSEv05_isError(oSize)) return oSize;
}
/* collect weight stats */
memset(rankStats, 0, (HUFv05_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
weightTotal = 0;
for (n=0; n<oSize; n++) {
if (huffWeight[n] >= HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
rankStats[huffWeight[n]]++;
weightTotal += (1 << huffWeight[n]) >> 1;
}
if (weightTotal == 0) return ERROR(corruption_detected);
/* get last non-null symbol weight (implied, total must be 2^n) */
tableLog = BITv05_highbit32(weightTotal) + 1;
if (tableLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
{ /* determine last weight */
U32 total = 1 << tableLog;
U32 rest = total - weightTotal;
U32 verif = 1 << BITv05_highbit32(rest);
U32 lastWeight = BITv05_highbit32(rest) + 1;
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
huffWeight[oSize] = (BYTE)lastWeight;
rankStats[lastWeight]++;
}
/* check tree construction validity */
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
/* results */
*nbSymbolsPtr = (U32)(oSize+1);
*tableLogPtr = tableLog;
return iSize+1;
}
/*-***************************/
/* single-symbol decoding */
/*-***************************/
size_t HUFv05_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
{
BYTE huffWeight[HUFv05_MAX_SYMBOL_VALUE + 1];
U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
size_t iSize;
U32 nbSymbols = 0;
U32 n;
U32 nextRankStart;
void* const dtPtr = DTable + 1;
HUFv05_DEltX2* const dt = (HUFv05_DEltX2*)dtPtr;
HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
/* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUFv05_readStats(huffWeight, HUFv05_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
if (HUFv05_isError(iSize)) return iSize;
/* check result */
if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
DTable[0] = (U16)tableLog; /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */
/* Prepare ranks */
nextRankStart = 0;
for (n=1; n<=tableLog; n++) {
U32 current = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = current;
}
/* fill DTable */
for (n=0; n<nbSymbols; n++) {
const U32 w = huffWeight[n];
const U32 length = (1 << w) >> 1;
U32 i;
HUFv05_DEltX2 D;
D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
for (i = rankVal[w]; i < rankVal[w] + length; i++)
dt[i] = D;
rankVal[w] += length;
}
return iSize;
}
static BYTE HUFv05_decodeSymbolX2(BITv05_DStream_t* Dstream, const HUFv05_DEltX2* dt, const U32 dtLog)
{
const size_t val = BITv05_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
const BYTE c = dt[val].byte;
BITv05_skipBits(Dstream, dt[val].nbBits);
return c;
}
#define HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
*ptr++ = HUFv05_decodeSymbolX2(DStreamPtr, dt, dtLog)
#define HUFv05_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \
HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
#define HUFv05_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
HUFv05_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
static inline size_t HUFv05_decodeStreamX2(BYTE* p, BITv05_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv05_DEltX2* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 4 symbols at a time */
while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-4)) {
HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr);
HUFv05_DECODE_SYMBOLX2_1(p, bitDPtr);
HUFv05_DECODE_SYMBOLX2_2(p, bitDPtr);
HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
}
/* closer to the end */
while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd))
HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
/* no more data to retrieve from bitstream, hence no need to reload */
while (p < pEnd)
HUFv05_DECODE_SYMBOLX2_0(p, bitDPtr);
return pEnd-pStart;
}
size_t HUFv05_decompress1X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U16* DTable)
{
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + dstSize;
const U32 dtLog = DTable[0];
const void* dtPtr = DTable;
const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr)+1;
BITv05_DStream_t bitD;
if (dstSize <= cSrcSize) return ERROR(dstSize_tooSmall);
{ size_t const errorCode = BITv05_initDStream(&bitD, cSrc, cSrcSize);
if (HUFv05_isError(errorCode)) return errorCode; }
HUFv05_decodeStreamX2(op, &bitD, oend, dt, dtLog);
/* check */
if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected);
return dstSize;
}
size_t HUFv05_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t errorCode;
errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize);
if (HUFv05_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
ip += errorCode;
cSrcSize -= errorCode;
return HUFv05_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
size_t HUFv05_decompress4X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U16* DTable)
{
/* Check */
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable;
const HUFv05_DEltX2* const dt = ((const HUFv05_DEltX2*)dtPtr) +1;
const U32 dtLog = DTable[0];
size_t errorCode;
/* Init */
BITv05_DStream_t bitD1;
BITv05_DStream_t bitD2;
BITv05_DStream_t bitD3;
BITv05_DStream_t bitD4;
const size_t length1 = MEM_readLE16(istart);
const size_t length2 = MEM_readLE16(istart+2);
const size_t length3 = MEM_readLE16(istart+4);
size_t length4;
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
length4 = cSrcSize - (length1 + length2 + length3 + 6);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
errorCode = BITv05_initDStream(&bitD1, istart1, length1);
if (HUFv05_isError(errorCode)) return errorCode;
errorCode = BITv05_initDStream(&bitD2, istart2, length2);
if (HUFv05_isError(errorCode)) return errorCode;
errorCode = BITv05_initDStream(&bitD3, istart3, length3);
if (HUFv05_isError(errorCode)) return errorCode;
errorCode = BITv05_initDStream(&bitD4, istart4, length4);
if (HUFv05_isError(errorCode)) return errorCode;
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {
HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
HUFv05_DECODE_SYMBOLX2_1(op1, &bitD1);
HUFv05_DECODE_SYMBOLX2_1(op2, &bitD2);
HUFv05_DECODE_SYMBOLX2_1(op3, &bitD3);
HUFv05_DECODE_SYMBOLX2_1(op4, &bitD4);
HUFv05_DECODE_SYMBOLX2_2(op1, &bitD1);
HUFv05_DECODE_SYMBOLX2_2(op2, &bitD2);
HUFv05_DECODE_SYMBOLX2_2(op3, &bitD3);
HUFv05_DECODE_SYMBOLX2_2(op4, &bitD4);
HUFv05_DECODE_SYMBOLX2_0(op1, &bitD1);
HUFv05_DECODE_SYMBOLX2_0(op2, &bitD2);
HUFv05_DECODE_SYMBOLX2_0(op3, &bitD3);
HUFv05_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUFv05_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
HUFv05_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
HUFv05_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
HUFv05_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
size_t HUFv05_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv05_CREATE_STATIC_DTABLEX2(DTable, HUFv05_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t errorCode;
errorCode = HUFv05_readDTableX2 (DTable, cSrc, cSrcSize);
if (HUFv05_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
ip += errorCode;
cSrcSize -= errorCode;
return HUFv05_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
/* *************************/
/* double-symbols decoding */
/* *************************/
static void HUFv05_fillDTableX4Level2(HUFv05_DEltX4* DTable, U32 sizeLog, const U32 consumed,
const U32* rankValOrigin, const int minWeight,
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
U32 nbBitsBaseline, U16 baseSeq)
{
HUFv05_DEltX4 DElt;
U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];
U32 s;
/* get pre-calculated rankVal */
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill skipped values */
if (minWeight>1) {
U32 i, skipSize = rankVal[minWeight];
MEM_writeLE16(&(DElt.sequence), baseSeq);
DElt.nbBits = (BYTE)(consumed);
DElt.length = 1;
for (i = 0; i < skipSize; i++)
DTable[i] = DElt;
}
/* fill DTable */
for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
const U32 symbol = sortedSymbols[s].symbol;
const U32 weight = sortedSymbols[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 length = 1 << (sizeLog-nbBits);
const U32 start = rankVal[weight];
U32 i = start;
const U32 end = start + length;
MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
DElt.nbBits = (BYTE)(nbBits + consumed);
DElt.length = 2;
do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
rankVal[weight] += length;
}
}
typedef U32 rankVal_t[HUFv05_ABSOLUTEMAX_TABLELOG][HUFv05_ABSOLUTEMAX_TABLELOG + 1];
static void HUFv05_fillDTableX4(HUFv05_DEltX4* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList, const U32 sortedListSize,
const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
const U32 nbBitsBaseline)
{
U32 rankVal[HUFv05_ABSOLUTEMAX_TABLELOG + 1];
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
const U32 minBits = nbBitsBaseline - maxWeight;
U32 s;
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill DTable */
for (s=0; s<sortedListSize; s++) {
const U16 symbol = sortedList[s].symbol;
const U32 weight = sortedList[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 start = rankVal[weight];
const U32 length = 1 << (targetLog-nbBits);
if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
U32 sortedRank;
int minWeight = nbBits + scaleLog;
if (minWeight < 1) minWeight = 1;
sortedRank = rankStart[minWeight];
HUFv05_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
rankValOrigin[nbBits], minWeight,
sortedList+sortedRank, sortedListSize-sortedRank,
nbBitsBaseline, symbol);
} else {
U32 i;
const U32 end = start + length;
HUFv05_DEltX4 DElt;
MEM_writeLE16(&(DElt.sequence), symbol);
DElt.nbBits = (BYTE)(nbBits);
DElt.length = 1;
for (i = start; i < end; i++)
DTable[i] = DElt;
}
rankVal[weight] += length;
}
}
size_t HUFv05_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize)
{
BYTE weightList[HUFv05_MAX_SYMBOL_VALUE + 1];
sortedSymbol_t sortedSymbol[HUFv05_MAX_SYMBOL_VALUE + 1];
U32 rankStats[HUFv05_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
U32 rankStart0[HUFv05_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
U32* const rankStart = rankStart0+1;
rankVal_t rankVal;
U32 tableLog, maxW, sizeOfSort, nbSymbols;
const U32 memLog = DTable[0];
size_t iSize;
void* dtPtr = DTable;
HUFv05_DEltX4* const dt = ((HUFv05_DEltX4*)dtPtr) + 1;
HUFv05_STATIC_ASSERT(sizeof(HUFv05_DEltX4) == sizeof(unsigned)); /* if compilation fails here, assertion is false */
if (memLog > HUFv05_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
/* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUFv05_readStats(weightList, HUFv05_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
if (HUFv05_isError(iSize)) return iSize;
/* check result */
if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
/* find maxWeight */
for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
/* Get start index of each weight */
{
U32 w, nextRankStart = 0;
for (w=1; w<=maxW; w++) {
U32 current = nextRankStart;
nextRankStart += rankStats[w];
rankStart[w] = current;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
sizeOfSort = nextRankStart;
}
/* sort symbols by weight */
{
U32 s;
for (s=0; s<nbSymbols; s++) {
U32 w = weightList[s];
U32 r = rankStart[w]++;
sortedSymbol[r].symbol = (BYTE)s;
sortedSymbol[r].weight = (BYTE)w;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
/* Build rankVal */
{
const U32 minBits = tableLog+1 - maxW;
U32 nextRankVal = 0;
U32 w, consumed;
const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
U32* rankVal0 = rankVal[0];
for (w=1; w<=maxW; w++) {
U32 current = nextRankVal;
nextRankVal += rankStats[w] << (w+rescale);
rankVal0[w] = current;
}
for (consumed = minBits; consumed <= memLog - minBits; consumed++) {
U32* rankValPtr = rankVal[consumed];
for (w = 1; w <= maxW; w++) {
rankValPtr[w] = rankVal0[w] >> consumed;
} } }
HUFv05_fillDTableX4(dt, memLog,
sortedSymbol, sizeOfSort,
rankStart0, rankVal, maxW,
tableLog+1);
return iSize;
}
static U32 HUFv05_decodeSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog)
{
const size_t val = BITv05_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 2);
BITv05_skipBits(DStream, dt[val].nbBits);
return dt[val].length;
}
static U32 HUFv05_decodeLastSymbolX4(void* op, BITv05_DStream_t* DStream, const HUFv05_DEltX4* dt, const U32 dtLog)
{
const size_t val = BITv05_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 1);
if (dt[val].length==1) BITv05_skipBits(DStream, dt[val].nbBits);
else {
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
BITv05_skipBits(DStream, dt[val].nbBits);
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
} }
return 1;
}
#define HUFv05_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUFv05_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUFv05_MAX_TABLELOG<=12)) \
ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUFv05_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
ptr += HUFv05_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
static inline size_t HUFv05_decodeStreamX4(BYTE* p, BITv05_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv05_DEltX4* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 8 symbols at a time */
while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p < pEnd-7)) {
HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr);
HUFv05_DECODE_SYMBOLX4_1(p, bitDPtr);
HUFv05_DECODE_SYMBOLX4_2(p, bitDPtr);
HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);
}
/* closer to the end */
while ((BITv05_reloadDStream(bitDPtr) == BITv05_DStream_unfinished) && (p <= pEnd-2))
HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr);
while (p <= pEnd-2)
HUFv05_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
if (p < pEnd)
p += HUFv05_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
return p-pStart;
}
size_t HUFv05_decompress1X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const unsigned* DTable)
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const U32 dtLog = DTable[0];
const void* const dtPtr = DTable;
const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1;
size_t errorCode;
/* Init */
BITv05_DStream_t bitD;
errorCode = BITv05_initDStream(&bitD, istart, cSrcSize);
if (HUFv05_isError(errorCode)) return errorCode;
/* finish bitStreams one by one */
HUFv05_decodeStreamX4(ostart, &bitD, oend, dt, dtLog);
/* check */
if (!BITv05_endOfDStream(&bitD)) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
size_t HUFv05_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize);
if (HUFv05_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize;
cSrcSize -= hSize;
return HUFv05_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
size_t HUFv05_decompress4X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const unsigned* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable;
const HUFv05_DEltX4* const dt = ((const HUFv05_DEltX4*)dtPtr) +1;
const U32 dtLog = DTable[0];
size_t errorCode;
/* Init */
BITv05_DStream_t bitD1;
BITv05_DStream_t bitD2;
BITv05_DStream_t bitD3;
BITv05_DStream_t bitD4;
const size_t length1 = MEM_readLE16(istart);
const size_t length2 = MEM_readLE16(istart+2);
const size_t length3 = MEM_readLE16(istart+4);
size_t length4;
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
length4 = cSrcSize - (length1 + length2 + length3 + 6);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
errorCode = BITv05_initDStream(&bitD1, istart1, length1);
if (HUFv05_isError(errorCode)) return errorCode;
errorCode = BITv05_initDStream(&bitD2, istart2, length2);
if (HUFv05_isError(errorCode)) return errorCode;
errorCode = BITv05_initDStream(&bitD3, istart3, length3);
if (HUFv05_isError(errorCode)) return errorCode;
errorCode = BITv05_initDStream(&bitD4, istart4, length4);
if (HUFv05_isError(errorCode)) return errorCode;
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
for ( ; (endSignal==BITv05_DStream_unfinished) && (op4<(oend-7)) ; ) {
HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1);
HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2);
HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3);
HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4);
HUFv05_DECODE_SYMBOLX4_1(op1, &bitD1);
HUFv05_DECODE_SYMBOLX4_1(op2, &bitD2);
HUFv05_DECODE_SYMBOLX4_1(op3, &bitD3);
HUFv05_DECODE_SYMBOLX4_1(op4, &bitD4);
HUFv05_DECODE_SYMBOLX4_2(op1, &bitD1);
HUFv05_DECODE_SYMBOLX4_2(op2, &bitD2);
HUFv05_DECODE_SYMBOLX4_2(op3, &bitD3);
HUFv05_DECODE_SYMBOLX4_2(op4, &bitD4);
HUFv05_DECODE_SYMBOLX4_0(op1, &bitD1);
HUFv05_DECODE_SYMBOLX4_0(op2, &bitD2);
HUFv05_DECODE_SYMBOLX4_0(op3, &bitD3);
HUFv05_DECODE_SYMBOLX4_0(op4, &bitD4);
endSignal = BITv05_reloadDStream(&bitD1) | BITv05_reloadDStream(&bitD2) | BITv05_reloadDStream(&bitD3) | BITv05_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUFv05_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
HUFv05_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
HUFv05_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
HUFv05_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BITv05_endOfDStream(&bitD1) & BITv05_endOfDStream(&bitD2) & BITv05_endOfDStream(&bitD3) & BITv05_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
size_t HUFv05_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv05_CREATE_STATIC_DTABLEX4(DTable, HUFv05_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUFv05_readDTableX4 (DTable, cSrc, cSrcSize);
if (HUFv05_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize;
cSrcSize -= hSize;
return HUFv05_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
/* ********************************/
/* Generic decompression selector */
/* ********************************/
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
{
/* single, double, quad */
{{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
{{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
{{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
{{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
{{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
{{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
{{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
{{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
{{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
{{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
{{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
{{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
{{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
{{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
{{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
{{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
};
typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
size_t HUFv05_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
static const decompressionAlgo decompress[3] = { HUFv05_decompress4X2, HUFv05_decompress4X4, NULL };
/* estimate decompression time */
U32 Q;
const U32 D256 = (U32)(dstSize >> 8);
U32 Dtime[3];
U32 algoNb = 0;
int n;
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize >= dstSize) return ERROR(corruption_detected); /* invalid, or not compressed, but not compressed already dealt with */
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
/* decoder timing evaluation */
Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
for (n=0; n<3; n++)
Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
if (Dtime[1] < Dtime[0]) algoNb = 1;
return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
/* return HUFv05_decompress4X2(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams single-symbol decoding */
/* return HUFv05_decompress4X4(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams double-symbols decoding */
/* return HUFv05_decompress4X6(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams quad-symbols decoding */
}
/*
zstd - standard compression library
Copyright (C) 2014-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
*/
/* ***************************************************************
* Tuning parameters
*****************************************************************/
/*!
* HEAPMODE :
* Select how default decompression function ZSTDv05_decompress() will allocate memory,
* in memory stack (0), or in memory heap (1, requires malloc())
*/
#ifndef ZSTDv05_HEAPMODE
# define ZSTDv05_HEAPMODE 1
#endif
/*-*******************************************************
* Dependencies
*********************************************************/
#include <stdlib.h> /* calloc */
#include <string.h> /* memcpy, memmove */
#include <stdio.h> /* debug only : printf */
/*-*******************************************************
* Compiler specifics
*********************************************************/
#ifdef _MSC_VER /* Visual Studio */
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
#endif
/*-*************************************
* Local types
***************************************/
typedef struct
{
blockType_t blockType;
U32 origSize;
} blockProperties_t;
/* *******************************************************
* Memory operations
**********************************************************/
static void ZSTDv05_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
/* *************************************
* Error Management
***************************************/
/*! ZSTDv05_isError() :
* tells if a return value is an error code */
unsigned ZSTDv05_isError(size_t code) { return ERR_isError(code); }
/*! ZSTDv05_getErrorName() :
* provides error code string (useful for debugging) */
const char* ZSTDv05_getErrorName(size_t code) { return ERR_getErrorName(code); }
/* *************************************************************
* Context management
***************************************************************/
typedef enum { ZSTDv05ds_getFrameHeaderSize, ZSTDv05ds_decodeFrameHeader,
ZSTDv05ds_decodeBlockHeader, ZSTDv05ds_decompressBlock } ZSTDv05_dStage;
struct ZSTDv05_DCtx_s
{
FSEv05_DTable LLTable[FSEv05_DTABLE_SIZE_U32(LLFSEv05Log)];
FSEv05_DTable OffTable[FSEv05_DTABLE_SIZE_U32(OffFSEv05Log)];
FSEv05_DTable MLTable[FSEv05_DTABLE_SIZE_U32(MLFSEv05Log)];
unsigned hufTableX4[HUFv05_DTABLE_SIZE(HufLog)];
const void* previousDstEnd;
const void* base;
const void* vBase;
const void* dictEnd;
size_t expected;
size_t headerSize;
ZSTDv05_parameters params;
blockType_t bType; /* used in ZSTDv05_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
ZSTDv05_dStage stage;
U32 flagStaticTables;
const BYTE* litPtr;
size_t litSize;
BYTE litBuffer[BLOCKSIZE + WILDCOPY_OVERLENGTH];
BYTE headerBuffer[ZSTDv05_frameHeaderSize_max];
}; /* typedef'd to ZSTDv05_DCtx within "zstd_static.h" */
size_t ZSTDv05_sizeofDCtx (void); /* Hidden declaration */
size_t ZSTDv05_sizeofDCtx (void) { return sizeof(ZSTDv05_DCtx); }
size_t ZSTDv05_decompressBegin(ZSTDv05_DCtx* dctx)
{
dctx->expected = ZSTDv05_frameHeaderSize_min;
dctx->stage = ZSTDv05ds_getFrameHeaderSize;
dctx->previousDstEnd = NULL;
dctx->base = NULL;
dctx->vBase = NULL;
dctx->dictEnd = NULL;
dctx->hufTableX4[0] = HufLog;
dctx->flagStaticTables = 0;
return 0;
}
ZSTDv05_DCtx* ZSTDv05_createDCtx(void)
{
ZSTDv05_DCtx* dctx = (ZSTDv05_DCtx*)malloc(sizeof(ZSTDv05_DCtx));
if (dctx==NULL) return NULL;
ZSTDv05_decompressBegin(dctx);
return dctx;
}
size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx)
{
free(dctx);
return 0; /* reserved as a potential error code in the future */
}
void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx)
{
memcpy(dstDCtx, srcDCtx,
sizeof(ZSTDv05_DCtx) - (BLOCKSIZE+WILDCOPY_OVERLENGTH + ZSTDv05_frameHeaderSize_max)); /* no need to copy workspace */
}
/* *************************************************************
* Decompression section
***************************************************************/
/* Frame format description
Frame Header - [ Block Header - Block ] - Frame End
1) Frame Header
- 4 bytes - Magic Number : ZSTDv05_MAGICNUMBER (defined within zstd_internal.h)
- 1 byte - Window Descriptor
2) Block Header
- 3 bytes, starting with a 2-bits descriptor
Uncompressed, Compressed, Frame End, unused
3) Block
See Block Format Description
4) Frame End
- 3 bytes, compatible with Block Header
*/
/* Block format description
Block = Literal Section - Sequences Section
Prerequisite : size of (compressed) block, maximum size of regenerated data
1) Literal Section
1.1) Header : 1-5 bytes
flags: 2 bits
00 compressed by Huff0
01 unused
10 is Raw (uncompressed)
11 is Rle
Note : using 01 => Huff0 with precomputed table ?
Note : delta map ? => compressed ?
1.1.1) Huff0-compressed literal block : 3-5 bytes
srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
srcSize < 1 KB => 3 bytes (2-2-10-10)
srcSize < 16KB => 4 bytes (2-2-14-14)
else => 5 bytes (2-2-18-18)
big endian convention
1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
size : 5 bits: (IS_RAW<<6) + (0<<4) + size
12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
size&255
20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
size>>8&255
size&255
1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
size : 5 bits: (IS_RLE<<6) + (0<<4) + size
12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
size&255
20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
size>>8&255
size&255
1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
srcSize < 1 KB => 3 bytes (2-2-10-10)
srcSize < 16KB => 4 bytes (2-2-14-14)
else => 5 bytes (2-2-18-18)
big endian convention
1- CTable available (stored into workspace ?)
2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
1.2) Literal block content
1.2.1) Huff0 block, using sizes from header
See Huff0 format
1.2.2) Huff0 block, using prepared table
1.2.3) Raw content
1.2.4) single byte
2) Sequences section
TO DO
*/
/** ZSTDv05_decodeFrameHeader_Part1() :
* decode the 1st part of the Frame Header, which tells Frame Header size.
* srcSize must be == ZSTDv05_frameHeaderSize_min.
* @return : the full size of the Frame Header */
static size_t ZSTDv05_decodeFrameHeader_Part1(ZSTDv05_DCtx* zc, const void* src, size_t srcSize)
{
U32 magicNumber;
if (srcSize != ZSTDv05_frameHeaderSize_min)
return ERROR(srcSize_wrong);
magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);
zc->headerSize = ZSTDv05_frameHeaderSize_min;
return zc->headerSize;
}
size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize)
{
U32 magicNumber;
if (srcSize < ZSTDv05_frameHeaderSize_min) return ZSTDv05_frameHeaderSize_max;
magicNumber = MEM_readLE32(src);
if (magicNumber != ZSTDv05_MAGICNUMBER) return ERROR(prefix_unknown);
memset(params, 0, sizeof(*params));
params->windowLog = (((const BYTE*)src)[4] & 15) + ZSTDv05_WINDOWLOG_ABSOLUTEMIN;
if ((((const BYTE*)src)[4] >> 4) != 0) return ERROR(frameParameter_unsupported); /* reserved bits */
return 0;
}
/** ZSTDv05_decodeFrameHeader_Part2() :
* decode the full Frame Header.
* srcSize must be the size provided by ZSTDv05_decodeFrameHeader_Part1().
* @return : 0, or an error code, which can be tested using ZSTDv05_isError() */
static size_t ZSTDv05_decodeFrameHeader_Part2(ZSTDv05_DCtx* zc, const void* src, size_t srcSize)
{
size_t result;
if (srcSize != zc->headerSize)
return ERROR(srcSize_wrong);
result = ZSTDv05_getFrameParams(&(zc->params), src, srcSize);
if ((MEM_32bits()) && (zc->params.windowLog > 25)) return ERROR(frameParameter_unsupported);
return result;
}
static size_t ZSTDv05_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
{
BYTE headerFlags;
U32 cSize;
if (srcSize < 3)
return ERROR(srcSize_wrong);
headerFlags = *in;
cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
bpPtr->blockType = (blockType_t)(headerFlags >> 6);
bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
if (bpPtr->blockType == bt_end) return 0;
if (bpPtr->blockType == bt_rle) return 1;
return cSize;
}
static size_t ZSTDv05_copyRawBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
if (dst==NULL) return ERROR(dstSize_tooSmall);
if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
memcpy(dst, src, srcSize);
return srcSize;
}
/*! ZSTDv05_decodeLiteralsBlock() :
@return : nb of bytes read from src (< srcSize ) */
static size_t ZSTDv05_decodeLiteralsBlock(ZSTDv05_DCtx* dctx,
const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
{
const BYTE* const istart = (const BYTE*) src;
/* any compressed block with literals segment must be at least this size */
if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
switch(istart[0]>> 6)
{
case IS_HUFv05:
{
size_t litSize, litCSize, singleStream=0;
U32 lhSize = ((istart[0]) >> 4) & 3;
if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
switch(lhSize)
{
case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
/* 2 - 2 - 10 - 10 */
lhSize=3;
singleStream = istart[0] & 16;
litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
litCSize = ((istart[1] & 3) << 8) + istart[2];
break;
case 2:
/* 2 - 2 - 14 - 14 */
lhSize=4;
litSize = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
litCSize = ((istart[2] & 63) << 8) + istart[3];
break;
case 3:
/* 2 - 2 - 18 - 18 */
lhSize=5;
litSize = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
litCSize = ((istart[2] & 3) << 16) + (istart[3] << 8) + istart[4];
break;
}
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
if (HUFv05_isError(singleStream ?
HUFv05_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) :
HUFv05_decompress (dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
return ERROR(corruption_detected);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return litCSize + lhSize;
}
case IS_PCH:
{
size_t errorCode;
size_t litSize, litCSize;
U32 lhSize = ((istart[0]) >> 4) & 3;
if (lhSize != 1) /* only case supported for now : small litSize, single stream */
return ERROR(corruption_detected);
if (!dctx->flagStaticTables)
return ERROR(dictionary_corrupted);
/* 2 - 2 - 10 - 10 */
lhSize=3;
litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
litCSize = ((istart[1] & 3) << 8) + istart[2];
if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
errorCode = HUFv05_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);
if (HUFv05_isError(errorCode)) return ERROR(corruption_detected);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return litCSize + lhSize;
}
case IS_RAW:
{
size_t litSize;
U32 lhSize = ((istart[0]) >> 4) & 3;
switch(lhSize)
{
case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
lhSize=1;
litSize = istart[0] & 31;
break;
case 2:
litSize = ((istart[0] & 15) << 8) + istart[1];
break;
case 3:
litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
break;
}
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart+lhSize, litSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return lhSize+litSize;
}
/* direct reference into compressed stream */
dctx->litPtr = istart+lhSize;
dctx->litSize = litSize;
return lhSize+litSize;
}
case IS_RLE:
{
size_t litSize;
U32 lhSize = ((istart[0]) >> 4) & 3;
switch(lhSize)
{
case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
lhSize = 1;
litSize = istart[0] & 31;
break;
case 2:
litSize = ((istart[0] & 15) << 8) + istart[1];
break;
case 3:
litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
break;
}
if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return lhSize+1;
}
default:
return ERROR(corruption_detected); /* impossible */
}
}
static size_t ZSTDv05_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
FSEv05_DTable* DTableLL, FSEv05_DTable* DTableML, FSEv05_DTable* DTableOffb,
const void* src, size_t srcSize, U32 flagStaticTable)
{
const BYTE* ip = istart;
const BYTE* const iend = istart + srcSize;
U32 LLtype, Offtype, MLtype;
unsigned LLlog, Offlog, MLlog;
size_t dumpsLength;
/* check */
if (srcSize < MIN_SEQUENCES_SIZE)
return ERROR(srcSize_wrong);
/* SeqHead */
*nbSeq = *ip++;
if (*nbSeq==0) return 1;
if (*nbSeq >= 128) {
if (ip >= iend) return ERROR(srcSize_wrong);
*nbSeq = ((nbSeq[0]-128)<<8) + *ip++;
}
if (ip >= iend) return ERROR(srcSize_wrong);
LLtype = *ip >> 6;
Offtype = (*ip >> 4) & 3;
MLtype = (*ip >> 2) & 3;
if (*ip & 2) {
if (ip+3 > iend) return ERROR(srcSize_wrong);
dumpsLength = ip[2];
dumpsLength += ip[1] << 8;
ip += 3;
} else {
if (ip+2 > iend) return ERROR(srcSize_wrong);
dumpsLength = ip[1];
dumpsLength += (ip[0] & 1) << 8;
ip += 2;
}
*dumpsPtr = ip;
ip += dumpsLength;
*dumpsLengthPtr = dumpsLength;
/* check */
if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
/* sequences */
{
S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL >= MaxOff */
size_t headerSize;
/* Build DTables */
switch(LLtype)
{
case FSEv05_ENCODING_RLE :
LLlog = 0;
FSEv05_buildDTable_rle(DTableLL, *ip++);
break;
case FSEv05_ENCODING_RAW :
LLlog = LLbits;
FSEv05_buildDTable_raw(DTableLL, LLbits);
break;
case FSEv05_ENCODING_STATIC:
if (!flagStaticTable) return ERROR(corruption_detected);
break;
case FSEv05_ENCODING_DYNAMIC :
default : /* impossible */
{ unsigned max = MaxLL;
headerSize = FSEv05_readNCount(norm, &max, &LLlog, ip, iend-ip);
if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
if (LLlog > LLFSEv05Log) return ERROR(corruption_detected);
ip += headerSize;
FSEv05_buildDTable(DTableLL, norm, max, LLlog);
} }
switch(Offtype)
{
case FSEv05_ENCODING_RLE :
Offlog = 0;
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
FSEv05_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
break;
case FSEv05_ENCODING_RAW :
Offlog = Offbits;
FSEv05_buildDTable_raw(DTableOffb, Offbits);
break;
case FSEv05_ENCODING_STATIC:
if (!flagStaticTable) return ERROR(corruption_detected);
break;
case FSEv05_ENCODING_DYNAMIC :
default : /* impossible */
{ unsigned max = MaxOff;
headerSize = FSEv05_readNCount(norm, &max, &Offlog, ip, iend-ip);
if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
if (Offlog > OffFSEv05Log) return ERROR(corruption_detected);
ip += headerSize;
FSEv05_buildDTable(DTableOffb, norm, max, Offlog);
} }
switch(MLtype)
{
case FSEv05_ENCODING_RLE :
MLlog = 0;
if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
FSEv05_buildDTable_rle(DTableML, *ip++);
break;
case FSEv05_ENCODING_RAW :
MLlog = MLbits;
FSEv05_buildDTable_raw(DTableML, MLbits);
break;
case FSEv05_ENCODING_STATIC:
if (!flagStaticTable) return ERROR(corruption_detected);
break;
case FSEv05_ENCODING_DYNAMIC :
default : /* impossible */
{ unsigned max = MaxML;
headerSize = FSEv05_readNCount(norm, &max, &MLlog, ip, iend-ip);
if (FSEv05_isError(headerSize)) return ERROR(GENERIC);
if (MLlog > MLFSEv05Log) return ERROR(corruption_detected);
ip += headerSize;
FSEv05_buildDTable(DTableML, norm, max, MLlog);
} } }
return ip-istart;
}
typedef struct {
size_t litLength;
size_t matchLength;
size_t offset;
} seq_t;
typedef struct {
BITv05_DStream_t DStream;
FSEv05_DState_t stateLL;
FSEv05_DState_t stateOffb;
FSEv05_DState_t stateML;
size_t prevOffset;
const BYTE* dumps;
const BYTE* dumpsEnd;
} seqState_t;
static void ZSTDv05_decodeSequence(seq_t* seq, seqState_t* seqState)
{
size_t litLength;
size_t prevOffset;
size_t offset;
size_t matchLength;
const BYTE* dumps = seqState->dumps;
const BYTE* const de = seqState->dumpsEnd;
/* Literal length */
litLength = FSEv05_peakSymbol(&(seqState->stateLL));
prevOffset = litLength ? seq->offset : seqState->prevOffset;
if (litLength == MaxLL) {
const U32 add = *dumps++;
if (add < 255) litLength += add;
else if (dumps + 2 <= de) {
litLength = MEM_readLE16(dumps);
dumps += 2;
if ((litLength & 1) && dumps < de) {
litLength += *dumps << 16;
dumps += 1;
}
litLength>>=1;
}
if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
/* Offset */
{
static const U32 offsetPrefix[MaxOff+1] = {
1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
U32 offsetCode = FSEv05_peakSymbol(&(seqState->stateOffb)); /* <= maxOff, by table construction */
U32 nbBits = offsetCode - 1;
if (offsetCode==0) nbBits = 0; /* cmove */
offset = offsetPrefix[offsetCode] + BITv05_readBits(&(seqState->DStream), nbBits);
if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream));
if (offsetCode==0) offset = prevOffset; /* repcode, cmove */
if (offsetCode | !litLength) seqState->prevOffset = seq->offset; /* cmove */
FSEv05_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* update */
}
/* Literal length update */
FSEv05_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); /* update */
if (MEM_32bits()) BITv05_reloadDStream(&(seqState->DStream));
/* MatchLength */
matchLength = FSEv05_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
if (matchLength == MaxML) {
const U32 add = dumps<de ? *dumps++ : 0;
if (add < 255) matchLength += add;
else if (dumps + 2 <= de) {
matchLength = MEM_readLE16(dumps);
dumps += 2;
if ((matchLength & 1) && dumps < de) {
matchLength += *dumps << 16;
dumps += 1;
}
matchLength >>= 1;
}
if (dumps >= de) { dumps = de-1; } /* late correction, to avoid read overflow (data is now corrupted anyway) */
}
matchLength += MINMATCH;
/* save result */
seq->litLength = litLength;
seq->offset = offset;
seq->matchLength = matchLength;
seqState->dumps = dumps;
#if 0 /* debug */
{
static U64 totalDecoded = 0;
printf("pos %6u : %3u literals & match %3u bytes at distance %6u \n",
(U32)(totalDecoded), (U32)litLength, (U32)matchLength, (U32)offset);
totalDecoded += litLength + matchLength;
}
#endif
}
static size_t ZSTDv05_execSequence(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
{
static const int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
BYTE* const oLitEnd = op + sequence.litLength;
const size_t sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
BYTE* const oend_8 = oend-8;
const BYTE* const litEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
/* check */
if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
if (litEnd > litLimit) return ERROR(corruption_detected); /* risk read beyond lit buffer */
/* copy Literals */
ZSTDv05_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
op = oLitEnd;
*litPtr = litEnd; /* update for next sequence */
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - base)) {
/* offset beyond prefix */
if (sequence.offset > (size_t)(oLitEnd - vBase))
return ERROR(corruption_detected);
match = dictEnd - (base-match);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{
size_t length1 = dictEnd - match;
memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = base;
if (op > oend_8 || sequence.matchLength < MINMATCH) {
while (op < oMatchEnd) *op++ = *match++;
return sequenceLength;
}
} }
/* Requirement: op <= oend_8 */
/* match within prefix */
if (sequence.offset < 8) {
/* close range match, overlap */
const int sub2 = dec64table[sequence.offset];
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
match += dec32table[sequence.offset];
ZSTDv05_copy4(op+4, match);
match -= sub2;
} else {
ZSTDv05_copy8(op, match);
}
op += 8; match += 8;
if (oMatchEnd > oend-(16-MINMATCH)) {
if (op < oend_8) {
ZSTDv05_wildcopy(op, match, oend_8 - op);
match += oend_8 - op;
op = oend_8;
}
while (op < oMatchEnd)
*op++ = *match++;
} else {
ZSTDv05_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
}
return sequenceLength;
}
static size_t ZSTDv05_decompressSequences(
ZSTDv05_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* op = ostart;
BYTE* const oend = ostart + maxDstSize;
size_t errorCode, dumpsLength=0;
const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize;
int nbSeq=0;
const BYTE* dumps = NULL;
unsigned* DTableLL = dctx->LLTable;
unsigned* DTableML = dctx->MLTable;
unsigned* DTableOffb = dctx->OffTable;
const BYTE* const base = (const BYTE*) (dctx->base);
const BYTE* const vBase = (const BYTE*) (dctx->vBase);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
/* Build Decoding Tables */
errorCode = ZSTDv05_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
DTableLL, DTableML, DTableOffb,
ip, seqSize, dctx->flagStaticTables);
if (ZSTDv05_isError(errorCode)) return errorCode;
ip += errorCode;
/* Regen sequences */
if (nbSeq) {
seq_t sequence;
seqState_t seqState;
memset(&sequence, 0, sizeof(sequence));
sequence.offset = REPCODE_STARTVALUE;
seqState.dumps = dumps;
seqState.dumpsEnd = dumps + dumpsLength;
seqState.prevOffset = REPCODE_STARTVALUE;
errorCode = BITv05_initDStream(&(seqState.DStream), ip, iend-ip);
if (ERR_isError(errorCode)) return ERROR(corruption_detected);
FSEv05_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
FSEv05_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
FSEv05_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
for ( ; (BITv05_reloadDStream(&(seqState.DStream)) <= BITv05_DStream_completed) && nbSeq ; ) {
size_t oneSeqSize;
nbSeq--;
ZSTDv05_decodeSequence(&sequence, &seqState);
oneSeqSize = ZSTDv05_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
if (ZSTDv05_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
}
/* check if reached exact end */
if (nbSeq) return ERROR(corruption_detected);
}
/* last literal segment */
{
size_t lastLLSize = litEnd - litPtr;
if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
if (lastLLSize > 0) {
memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
return op-ostart;
}
static void ZSTDv05_checkContinuity(ZSTDv05_DCtx* dctx, const void* dst)
{
if (dst != dctx->previousDstEnd) { /* not contiguous */
dctx->dictEnd = dctx->previousDstEnd;
dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
dctx->base = dst;
dctx->previousDstEnd = dst;
}
}
static size_t ZSTDv05_decompressBlock_internal(ZSTDv05_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{ /* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
size_t litCSize;
if (srcSize >= BLOCKSIZE) return ERROR(srcSize_wrong);
/* Decode literals sub-block */
litCSize = ZSTDv05_decodeLiteralsBlock(dctx, src, srcSize);
if (ZSTDv05_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
return ZSTDv05_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
}
size_t ZSTDv05_decompressBlock(ZSTDv05_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
ZSTDv05_checkContinuity(dctx, dst);
return ZSTDv05_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
}
/*! ZSTDv05_decompress_continueDCtx
* dctx must have been properly initialized */
static size_t ZSTDv05_decompress_continueDCtx(ZSTDv05_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const BYTE* iend = ip + srcSize;
BYTE* op = ostart;
BYTE* const oend = ostart + maxDstSize;
size_t remainingSize = srcSize;
blockProperties_t blockProperties;
memset(&blockProperties, 0, sizeof(blockProperties));
/* Frame Header */
{ size_t frameHeaderSize;
if (srcSize < ZSTDv05_frameHeaderSize_min+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong);
frameHeaderSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min);
if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize;
if (srcSize < frameHeaderSize+ZSTDv05_blockHeaderSize) return ERROR(srcSize_wrong);
ip += frameHeaderSize; remainingSize -= frameHeaderSize;
frameHeaderSize = ZSTDv05_decodeFrameHeader_Part2(dctx, src, frameHeaderSize);
if (ZSTDv05_isError(frameHeaderSize)) return frameHeaderSize;
}
/* Loop on each block */
while (1)
{
size_t decodedSize=0;
size_t cBlockSize = ZSTDv05_getcBlockSize(ip, iend-ip, &blockProperties);
if (ZSTDv05_isError(cBlockSize)) return cBlockSize;
ip += ZSTDv05_blockHeaderSize;
remainingSize -= ZSTDv05_blockHeaderSize;
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
switch(blockProperties.blockType)
{
case bt_compressed:
decodedSize = ZSTDv05_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
break;
case bt_raw :
decodedSize = ZSTDv05_copyRawBlock(op, oend-op, ip, cBlockSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet supported */
break;
case bt_end :
/* end of frame */
if (remainingSize) return ERROR(srcSize_wrong);
break;
default:
return ERROR(GENERIC); /* impossible */
}
if (cBlockSize == 0) break; /* bt_end */
if (ZSTDv05_isError(decodedSize)) return decodedSize;
op += decodedSize;
ip += cBlockSize;
remainingSize -= cBlockSize;
}
return op-ostart;
}
size_t ZSTDv05_decompress_usingPreparedDCtx(ZSTDv05_DCtx* dctx, const ZSTDv05_DCtx* refDCtx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize)
{
ZSTDv05_copyDCtx(dctx, refDCtx);
ZSTDv05_checkContinuity(dctx, dst);
return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize);
}
size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* src, size_t srcSize,
const void* dict, size_t dictSize)
{
ZSTDv05_decompressBegin_usingDict(dctx, dict, dictSize);
ZSTDv05_checkContinuity(dctx, dst);
return ZSTDv05_decompress_continueDCtx(dctx, dst, maxDstSize, src, srcSize);
}
size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
return ZSTDv05_decompress_usingDict(dctx, dst, maxDstSize, src, srcSize, NULL, 0);
}
size_t ZSTDv05_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
#if defined(ZSTDv05_HEAPMODE) && (ZSTDv05_HEAPMODE==1)
size_t regenSize;
ZSTDv05_DCtx* dctx = ZSTDv05_createDCtx();
if (dctx==NULL) return ERROR(memory_allocation);
regenSize = ZSTDv05_decompressDCtx(dctx, dst, maxDstSize, src, srcSize);
ZSTDv05_freeDCtx(dctx);
return regenSize;
#else
ZSTDv05_DCtx dctx;
return ZSTDv05_decompressDCtx(&dctx, dst, maxDstSize, src, srcSize);
#endif
}
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
const BYTE* ip = (const BYTE*)src;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
blockProperties_t blockProperties;
/* Frame Header */
if (srcSize < ZSTDv05_frameHeaderSize_min) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
if (MEM_readLE32(src) != ZSTDv05_MAGICNUMBER) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
ip += ZSTDv05_frameHeaderSize_min; remainingSize -= ZSTDv05_frameHeaderSize_min;
/* Loop on each block */
while (1)
{
size_t cBlockSize = ZSTDv05_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTDv05_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
ip += ZSTDv05_blockHeaderSize;
remainingSize -= ZSTDv05_blockHeaderSize;
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
if (cBlockSize == 0) break; /* bt_end */
ip += cBlockSize;
remainingSize -= cBlockSize;
nbBlocks++;
}
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * BLOCKSIZE;
}
/* ******************************
* Streaming Decompression API
********************************/
size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx)
{
return dctx->expected;
}
size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
/* Sanity check */
if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
ZSTDv05_checkContinuity(dctx, dst);
/* Decompress : frame header; part 1 */
switch (dctx->stage)
{
case ZSTDv05ds_getFrameHeaderSize :
/* get frame header size */
if (srcSize != ZSTDv05_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */
dctx->headerSize = ZSTDv05_decodeFrameHeader_Part1(dctx, src, ZSTDv05_frameHeaderSize_min);
if (ZSTDv05_isError(dctx->headerSize)) return dctx->headerSize;
memcpy(dctx->headerBuffer, src, ZSTDv05_frameHeaderSize_min);
if (dctx->headerSize > ZSTDv05_frameHeaderSize_min) return ERROR(GENERIC); /* should never happen */
dctx->expected = 0; /* not necessary to copy more */
/* fallthrough */
case ZSTDv05ds_decodeFrameHeader:
/* get frame header */
{ size_t const result = ZSTDv05_decodeFrameHeader_Part2(dctx, dctx->headerBuffer, dctx->headerSize);
if (ZSTDv05_isError(result)) return result;
dctx->expected = ZSTDv05_blockHeaderSize;
dctx->stage = ZSTDv05ds_decodeBlockHeader;
return 0;
}
case ZSTDv05ds_decodeBlockHeader:
{
/* Decode block header */
blockProperties_t bp;
size_t blockSize = ZSTDv05_getcBlockSize(src, ZSTDv05_blockHeaderSize, &bp);
if (ZSTDv05_isError(blockSize)) return blockSize;
if (bp.blockType == bt_end) {
dctx->expected = 0;
dctx->stage = ZSTDv05ds_getFrameHeaderSize;
}
else {
dctx->expected = blockSize;
dctx->bType = bp.blockType;
dctx->stage = ZSTDv05ds_decompressBlock;
}
return 0;
}
case ZSTDv05ds_decompressBlock:
{
/* Decompress : block content */
size_t rSize;
switch(dctx->bType)
{
case bt_compressed:
rSize = ZSTDv05_decompressBlock_internal(dctx, dst, maxDstSize, src, srcSize);
break;
case bt_raw :
rSize = ZSTDv05_copyRawBlock(dst, maxDstSize, src, srcSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet handled */
break;
case bt_end : /* should never happen (filtered at phase 1) */
rSize = 0;
break;
default:
return ERROR(GENERIC); /* impossible */
}
dctx->stage = ZSTDv05ds_decodeBlockHeader;
dctx->expected = ZSTDv05_blockHeaderSize;
dctx->previousDstEnd = (char*)dst + rSize;
return rSize;
}
default:
return ERROR(GENERIC); /* impossible */
}
}
static void ZSTDv05_refDictContent(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
{
dctx->dictEnd = dctx->previousDstEnd;
dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
dctx->base = dict;
dctx->previousDstEnd = (const char*)dict + dictSize;
}
static size_t ZSTDv05_loadEntropy(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
{
size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, errorCode, litlengthHeaderSize;
short offcodeNCount[MaxOff+1];
unsigned offcodeMaxValue=MaxOff, offcodeLog;
short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
hSize = HUFv05_readDTableX4(dctx->hufTableX4, dict, dictSize);
if (HUFv05_isError(hSize)) return ERROR(dictionary_corrupted);
dict = (const char*)dict + hSize;
dictSize -= hSize;
offcodeHeaderSize = FSEv05_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize);
if (FSEv05_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
if (offcodeLog > OffFSEv05Log) return ERROR(dictionary_corrupted);
errorCode = FSEv05_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
dict = (const char*)dict + offcodeHeaderSize;
dictSize -= offcodeHeaderSize;
matchlengthHeaderSize = FSEv05_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize);
if (FSEv05_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
if (matchlengthLog > MLFSEv05Log) return ERROR(dictionary_corrupted);
errorCode = FSEv05_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
dict = (const char*)dict + matchlengthHeaderSize;
dictSize -= matchlengthHeaderSize;
litlengthHeaderSize = FSEv05_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize);
if (litlengthLog > LLFSEv05Log) return ERROR(dictionary_corrupted);
if (FSEv05_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
errorCode = FSEv05_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
if (FSEv05_isError(errorCode)) return ERROR(dictionary_corrupted);
dctx->flagStaticTables = 1;
return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize;
}
static size_t ZSTDv05_decompress_insertDictionary(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
{
size_t eSize;
U32 magic = MEM_readLE32(dict);
if (magic != ZSTDv05_DICT_MAGIC) {
/* pure content mode */
ZSTDv05_refDictContent(dctx, dict, dictSize);
return 0;
}
/* load entropy tables */
dict = (const char*)dict + 4;
dictSize -= 4;
eSize = ZSTDv05_loadEntropy(dctx, dict, dictSize);
if (ZSTDv05_isError(eSize)) return ERROR(dictionary_corrupted);
/* reference dictionary content */
dict = (const char*)dict + eSize;
dictSize -= eSize;
ZSTDv05_refDictContent(dctx, dict, dictSize);
return 0;
}
size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize)
{
size_t errorCode;
errorCode = ZSTDv05_decompressBegin(dctx);
if (ZSTDv05_isError(errorCode)) return errorCode;
if (dict && dictSize) {
errorCode = ZSTDv05_decompress_insertDictionary(dctx, dict, dictSize);
if (ZSTDv05_isError(errorCode)) return ERROR(dictionary_corrupted);
}
return 0;
}
/*
Buffered version of Zstd compression library
Copyright (C) 2015-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd source repository : https://github.com/Cyan4973/zstd
- ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
*/
/* The objects defined into this file should be considered experimental.
* They are not labelled stable, as their prototype may change in the future.
* You can use them for tests, provide feedback, or if you can endure risk of future changes.
*/
/* *************************************
* Constants
***************************************/
static size_t ZBUFFv05_blockHeaderSize = 3;
/* *** Compression *** */
static size_t ZBUFFv05_limitCopy(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
size_t length = MIN(maxDstSize, srcSize);
if (length > 0) {
memcpy(dst, src, length);
}
return length;
}
/** ************************************************
* Streaming decompression
*
* A ZBUFFv05_DCtx object is required to track streaming operation.
* Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.
* Use ZBUFFv05_decompressInit() to start a new decompression operation.
* ZBUFFv05_DCtx objects can be reused multiple times.
*
* Use ZBUFFv05_decompressContinue() repetitively to consume your input.
* *srcSizePtr and *maxDstSizePtr can be any size.
* The function will report how many bytes were read or written by modifying *srcSizePtr and *maxDstSizePtr.
* Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input.
* The content of dst will be overwritten (up to *maxDstSizePtr) at each function call, so save its content if it matters or change dst .
* return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency)
* or 0 when a frame is completely decoded
* or an error code, which can be tested using ZBUFFv05_isError().
*
* Hint : recommended buffer sizes (not compulsory)
* output : 128 KB block size is the internal unit, it ensures it's always possible to write a full block when it's decoded.
* input : just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
* **************************************************/
typedef enum { ZBUFFv05ds_init, ZBUFFv05ds_readHeader, ZBUFFv05ds_loadHeader, ZBUFFv05ds_decodeHeader,
ZBUFFv05ds_read, ZBUFFv05ds_load, ZBUFFv05ds_flush } ZBUFFv05_dStage;
/* *** Resource management *** */
#define ZSTDv05_frameHeaderSize_max 5 /* too magical, should come from reference */
struct ZBUFFv05_DCtx_s {
ZSTDv05_DCtx* zc;
ZSTDv05_parameters params;
char* inBuff;
size_t inBuffSize;
size_t inPos;
char* outBuff;
size_t outBuffSize;
size_t outStart;
size_t outEnd;
size_t hPos;
ZBUFFv05_dStage stage;
unsigned char headerBuffer[ZSTDv05_frameHeaderSize_max];
}; /* typedef'd to ZBUFFv05_DCtx within "zstd_buffered.h" */
ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void)
{
ZBUFFv05_DCtx* zbc = (ZBUFFv05_DCtx*)malloc(sizeof(ZBUFFv05_DCtx));
if (zbc==NULL) return NULL;
memset(zbc, 0, sizeof(*zbc));
zbc->zc = ZSTDv05_createDCtx();
zbc->stage = ZBUFFv05ds_init;
return zbc;
}
size_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* zbc)
{
if (zbc==NULL) return 0; /* support free on null */
ZSTDv05_freeDCtx(zbc->zc);
free(zbc->inBuff);
free(zbc->outBuff);
free(zbc);
return 0;
}
/* *** Initialization *** */
size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* zbc, const void* dict, size_t dictSize)
{
zbc->stage = ZBUFFv05ds_readHeader;
zbc->hPos = zbc->inPos = zbc->outStart = zbc->outEnd = 0;
return ZSTDv05_decompressBegin_usingDict(zbc->zc, dict, dictSize);
}
size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* zbc)
{
return ZBUFFv05_decompressInitDictionary(zbc, NULL, 0);
}
/* *** Decompression *** */
size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* zbc, void* dst, size_t* maxDstSizePtr, const void* src, size_t* srcSizePtr)
{
const char* const istart = (const char*)src;
const char* ip = istart;
const char* const iend = istart + *srcSizePtr;
char* const ostart = (char*)dst;
char* op = ostart;
char* const oend = ostart + *maxDstSizePtr;
U32 notDone = 1;
while (notDone) {
switch(zbc->stage)
{
case ZBUFFv05ds_init :
return ERROR(init_missing);
case ZBUFFv05ds_readHeader :
/* read header from src */
{
size_t headerSize = ZSTDv05_getFrameParams(&(zbc->params), src, *srcSizePtr);
if (ZSTDv05_isError(headerSize)) return headerSize;
if (headerSize) {
/* not enough input to decode header : tell how many bytes would be necessary */
memcpy(zbc->headerBuffer+zbc->hPos, src, *srcSizePtr);
zbc->hPos += *srcSizePtr;
*maxDstSizePtr = 0;
zbc->stage = ZBUFFv05ds_loadHeader;
return headerSize - zbc->hPos;
}
zbc->stage = ZBUFFv05ds_decodeHeader;
break;
}
/* fall-through */
case ZBUFFv05ds_loadHeader:
/* complete header from src */
{
size_t headerSize = ZBUFFv05_limitCopy(
zbc->headerBuffer + zbc->hPos, ZSTDv05_frameHeaderSize_max - zbc->hPos,
src, *srcSizePtr);
zbc->hPos += headerSize;
ip += headerSize;
headerSize = ZSTDv05_getFrameParams(&(zbc->params), zbc->headerBuffer, zbc->hPos);
if (ZSTDv05_isError(headerSize)) return headerSize;
if (headerSize) {
/* not enough input to decode header : tell how many bytes would be necessary */
*maxDstSizePtr = 0;
return headerSize - zbc->hPos;
}
/* zbc->stage = ZBUFFv05ds_decodeHeader; break; */ /* useless : stage follows */
}
/* fall-through */
case ZBUFFv05ds_decodeHeader:
/* apply header to create / resize buffers */
{
size_t neededOutSize = (size_t)1 << zbc->params.windowLog;
size_t neededInSize = BLOCKSIZE; /* a block is never > BLOCKSIZE */
if (zbc->inBuffSize < neededInSize) {
free(zbc->inBuff);
zbc->inBuffSize = neededInSize;
zbc->inBuff = (char*)malloc(neededInSize);
if (zbc->inBuff == NULL) return ERROR(memory_allocation);
}
if (zbc->outBuffSize < neededOutSize) {
free(zbc->outBuff);
zbc->outBuffSize = neededOutSize;
zbc->outBuff = (char*)malloc(neededOutSize);
if (zbc->outBuff == NULL) return ERROR(memory_allocation);
} }
if (zbc->hPos) {
/* some data already loaded into headerBuffer : transfer into inBuff */
memcpy(zbc->inBuff, zbc->headerBuffer, zbc->hPos);
zbc->inPos = zbc->hPos;
zbc->hPos = 0;
zbc->stage = ZBUFFv05ds_load;
break;
}
zbc->stage = ZBUFFv05ds_read;
/* fall-through */
case ZBUFFv05ds_read:
{
size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
if (neededInSize==0) { /* end of frame */
zbc->stage = ZBUFFv05ds_init;
notDone = 0;
break;
}
if ((size_t)(iend-ip) >= neededInSize) {
/* directly decode from src */
size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc,
zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
ip, neededInSize);
if (ZSTDv05_isError(decodedSize)) return decodedSize;
ip += neededInSize;
if (!decodedSize) break; /* this was just a header */
zbc->outEnd = zbc->outStart + decodedSize;
zbc->stage = ZBUFFv05ds_flush;
break;
}
if (ip==iend) { notDone = 0; break; } /* no more input */
zbc->stage = ZBUFFv05ds_load;
}
/* fall-through */
case ZBUFFv05ds_load:
{
size_t neededInSize = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
size_t toLoad = neededInSize - zbc->inPos; /* should always be <= remaining space within inBuff */
size_t loadedSize;
if (toLoad > zbc->inBuffSize - zbc->inPos) return ERROR(corruption_detected); /* should never happen */
loadedSize = ZBUFFv05_limitCopy(zbc->inBuff + zbc->inPos, toLoad, ip, iend-ip);
ip += loadedSize;
zbc->inPos += loadedSize;
if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */
{
size_t decodedSize = ZSTDv05_decompressContinue(zbc->zc,
zbc->outBuff + zbc->outStart, zbc->outBuffSize - zbc->outStart,
zbc->inBuff, neededInSize);
if (ZSTDv05_isError(decodedSize)) return decodedSize;
zbc->inPos = 0; /* input is consumed */
if (!decodedSize) { zbc->stage = ZBUFFv05ds_read; break; } /* this was just a header */
zbc->outEnd = zbc->outStart + decodedSize;
zbc->stage = ZBUFFv05ds_flush;
/* break; */ /* ZBUFFv05ds_flush follows */
}
}
/* fall-through */
case ZBUFFv05ds_flush:
{
size_t toFlushSize = zbc->outEnd - zbc->outStart;
size_t flushedSize = ZBUFFv05_limitCopy(op, oend-op, zbc->outBuff + zbc->outStart, toFlushSize);
op += flushedSize;
zbc->outStart += flushedSize;
if (flushedSize == toFlushSize) {
zbc->stage = ZBUFFv05ds_read;
if (zbc->outStart + BLOCKSIZE > zbc->outBuffSize)
zbc->outStart = zbc->outEnd = 0;
break;
}
/* cannot flush everything */
notDone = 0;
break;
}
default: return ERROR(GENERIC); /* impossible */
} }
*srcSizePtr = ip-istart;
*maxDstSizePtr = op-ostart;
{ size_t nextSrcSizeHint = ZSTDv05_nextSrcSizeToDecompress(zbc->zc);
if (nextSrcSizeHint > ZBUFFv05_blockHeaderSize) nextSrcSizeHint+= ZBUFFv05_blockHeaderSize; /* get next block header too */
nextSrcSizeHint -= zbc->inPos; /* already loaded*/
return nextSrcSizeHint;
}
}
/* *************************************
* Tool functions
***************************************/
unsigned ZBUFFv05_isError(size_t errorCode) { return ERR_isError(errorCode); }
const char* ZBUFFv05_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
size_t ZBUFFv05_recommendedDInSize(void) { return BLOCKSIZE + ZBUFFv05_blockHeaderSize /* block header size*/ ; }
size_t ZBUFFv05_recommendedDOutSize(void) { return BLOCKSIZE; }
BYTE* const ostart = (BYTE* const)dst;
BYTE* const ostart = (BYTE* const)dst;
const BYTE* const istart = (const BYTE* const)src;
const BYTE* const in = (const BYTE* const)src;
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTDv05_H
#define ZSTDv05_H
#if defined (__cplusplus)
extern "C" {
#endif
/*-*************************************
* Dependencies
***************************************/
#include <stddef.h> /* size_t */
#include "../common/mem.h" /* U64, U32 */
/* *************************************
* Simple functions
***************************************/
/*! ZSTDv05_decompress() :
`compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
`dstCapacity` must be large enough, equal or larger than originalSize.
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
or an errorCode if it fails (which can be tested using ZSTDv05_isError()) */
size_t ZSTDv05_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
/**
ZSTDv05_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.5.x format
srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
cSize (output parameter) : the number of bytes that would be read to decompress this frame
or an error code if it fails (which can be tested using ZSTDv01_isError())
dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
or ZSTD_CONTENTSIZE_ERROR if an error occurs
note : assumes `cSize` and `dBound` are _not_ NULL.
*/
void ZSTDv05_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
/* *************************************
* Helper functions
***************************************/
/* Error Management */
unsigned ZSTDv05_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
const char* ZSTDv05_getErrorName(size_t code); /*!< provides readable string for an error code */
/* *************************************
* Explicit memory management
***************************************/
/** Decompression context */
typedef struct ZSTDv05_DCtx_s ZSTDv05_DCtx;
ZSTDv05_DCtx* ZSTDv05_createDCtx(void);
size_t ZSTDv05_freeDCtx(ZSTDv05_DCtx* dctx); /*!< @return : errorCode */
/** ZSTDv05_decompressDCtx() :
* Same as ZSTDv05_decompress(), but requires an already allocated ZSTDv05_DCtx (see ZSTDv05_createDCtx()) */
size_t ZSTDv05_decompressDCtx(ZSTDv05_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/*-***********************
* Simple Dictionary API
*************************/
/*! ZSTDv05_decompress_usingDict() :
* Decompression using a pre-defined Dictionary content (see dictBuilder).
* Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
* Note : dict can be NULL, in which case, it's equivalent to ZSTDv05_decompressDCtx() */
size_t ZSTDv05_decompress_usingDict(ZSTDv05_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize);
/*-************************
* Advanced Streaming API
***************************/
typedef enum { ZSTDv05_fast, ZSTDv05_greedy, ZSTDv05_lazy, ZSTDv05_lazy2, ZSTDv05_btlazy2, ZSTDv05_opt, ZSTDv05_btopt } ZSTDv05_strategy;
typedef struct {
U64 srcSize;
U32 windowLog; /* the only useful information to retrieve */
U32 contentLog; U32 hashLog; U32 searchLog; U32 searchLength; U32 targetLength; ZSTDv05_strategy strategy;
} ZSTDv05_parameters;
size_t ZSTDv05_getFrameParams(ZSTDv05_parameters* params, const void* src, size_t srcSize);
size_t ZSTDv05_decompressBegin_usingDict(ZSTDv05_DCtx* dctx, const void* dict, size_t dictSize);
void ZSTDv05_copyDCtx(ZSTDv05_DCtx* dstDCtx, const ZSTDv05_DCtx* srcDCtx);
size_t ZSTDv05_nextSrcSizeToDecompress(ZSTDv05_DCtx* dctx);
size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/*-***********************
* ZBUFF API
*************************/
typedef struct ZBUFFv05_DCtx_s ZBUFFv05_DCtx;
ZBUFFv05_DCtx* ZBUFFv05_createDCtx(void);
size_t ZBUFFv05_freeDCtx(ZBUFFv05_DCtx* dctx);
size_t ZBUFFv05_decompressInit(ZBUFFv05_DCtx* dctx);
size_t ZBUFFv05_decompressInitDictionary(ZBUFFv05_DCtx* dctx, const void* dict, size_t dictSize);
size_t ZBUFFv05_decompressContinue(ZBUFFv05_DCtx* dctx,
void* dst, size_t* dstCapacityPtr,
const void* src, size_t* srcSizePtr);
/*-***************************************************************************
* Streaming decompression
*
* A ZBUFFv05_DCtx object is required to track streaming operations.
* Use ZBUFFv05_createDCtx() and ZBUFFv05_freeDCtx() to create/release resources.
* Use ZBUFFv05_decompressInit() to start a new decompression operation,
* or ZBUFFv05_decompressInitDictionary() if decompression requires a dictionary.
* Note that ZBUFFv05_DCtx objects can be reused multiple times.
*
* Use ZBUFFv05_decompressContinue() repetitively to consume your input.
* *srcSizePtr and *dstCapacityPtr can be any size.
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
* The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change @dst.
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency)
* or 0 when a frame is completely decoded
* or an error code, which can be tested using ZBUFFv05_isError().
*
* Hint : recommended buffer sizes (not compulsory) : ZBUFFv05_recommendedDInSize() / ZBUFFv05_recommendedDOutSize()
* output : ZBUFFv05_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
* input : ZBUFFv05_recommendedDInSize==128Kb+3; just follow indications from ZBUFFv05_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
* *******************************************************************************/
/* *************************************
* Tool functions
***************************************/
unsigned ZBUFFv05_isError(size_t errorCode);
const char* ZBUFFv05_getErrorName(size_t errorCode);
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
* These sizes are just hints, and tend to offer better latency */
size_t ZBUFFv05_recommendedDInSize(void);
size_t ZBUFFv05_recommendedDOutSize(void);
/*-*************************************
* Constants
***************************************/
#define ZSTDv05_MAGICNUMBER 0xFD2FB525 /* v0.5 */
#if defined (__cplusplus)
}
#endif
#endif /* ZSTDv0505_H */
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/*- Dependencies -*/
#include "zstd_v06.h"
#include <stddef.h> /* size_t, ptrdiff_t */
#include <string.h> /* memcpy */
#include <stdlib.h> /* malloc, free, qsort */
#include "../common/error_private.h"
/* ******************************************************************
mem.h
low-level memory access routines
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef MEM_H_MODULE
#define MEM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/*-****************************************
* Compiler specifics
******************************************/
#if defined(_MSC_VER) /* Visual Studio */
# include <stdlib.h> /* _byteswap_ulong */
# include <intrin.h> /* _byteswap_* */
#endif
#if defined(__GNUC__)
# define MEM_STATIC static __attribute__((unused))
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define MEM_STATIC static inline
#elif defined(_MSC_VER)
# define MEM_STATIC static __inline
#else
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif
/*-**************************************************************
* Basic Types
*****************************************************************/
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# if defined(_AIX)
# include <inttypes.h>
# else
# include <stdint.h> /* intptr_t */
# endif
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef signed short S16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
typedef signed long long S64;
#endif
/*-**************************************************************
* Memory I/O
*****************************************************************/
/* MEM_FORCE_MEMORY_ACCESS :
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets depending on alignment.
* In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define MEM_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
# define MEM_FORCE_MEMORY_ACCESS 1
# endif
#endif
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
MEM_STATIC unsigned MEM_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
/* violates C standard, by lying on structure alignment.
Only use if no other choice to achieve best performance on target platform */
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
#else
/* default method, safe and standard.
can sometimes prove slower */
MEM_STATIC U16 MEM_read16(const void* memPtr)
{
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U32 MEM_read32(const void* memPtr)
{
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U64 MEM_read64(const void* memPtr)
{
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
{
memcpy(memPtr, &value, sizeof(value));
}
#endif /* MEM_FORCE_MEMORY_ACCESS */
MEM_STATIC U32 MEM_swap32(U32 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_ulong(in);
#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
return __builtin_bswap32(in);
#else
return ((in << 24) & 0xff000000 ) |
((in << 8) & 0x00ff0000 ) |
((in >> 8) & 0x0000ff00 ) |
((in >> 24) & 0x000000ff );
#endif
}
MEM_STATIC U64 MEM_swap64(U64 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_uint64(in);
#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
return __builtin_bswap64(in);
#else
return ((in << 56) & 0xff00000000000000ULL) |
((in << 40) & 0x00ff000000000000ULL) |
((in << 24) & 0x0000ff0000000000ULL) |
((in << 8) & 0x000000ff00000000ULL) |
((in >> 8) & 0x00000000ff000000ULL) |
((in >> 24) & 0x0000000000ff0000ULL) |
((in >> 40) & 0x000000000000ff00ULL) |
((in >> 56) & 0x00000000000000ffULL);
#endif
}
/*=== Little endian r/w ===*/
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read16(memPtr);
else {
const BYTE* p = (const BYTE*)memPtr;
return (U16)(p[0] + (p[1]<<8));
}
}
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
{
if (MEM_isLittleEndian()) {
MEM_write16(memPtr, val);
} else {
BYTE* p = (BYTE*)memPtr;
p[0] = (BYTE)val;
p[1] = (BYTE)(val>>8);
}
}
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read32(memPtr);
else
return MEM_swap32(MEM_read32(memPtr));
}
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read64(memPtr);
else
return MEM_swap64(MEM_read64(memPtr));
}
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
{
if (MEM_32bits())
return (size_t)MEM_readLE32(memPtr);
else
return (size_t)MEM_readLE64(memPtr);
}
#if defined (__cplusplus)
}
#endif
#endif /* MEM_H_MODULE */
/*
zstd - standard compression library
Header File for static linking only
Copyright (C) 2014-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd homepage : http://www.zstd.net
*/
#ifndef ZSTDv06_STATIC_H
#define ZSTDv06_STATIC_H
/* The prototypes defined within this file are considered experimental.
* They should not be used in the context DLL as they may change in the future.
* Prefer static linking if you need them, to control breaking version changes issues.
*/
#if defined (__cplusplus)
extern "C" {
#endif
/*- Advanced Decompression functions -*/
/*! ZSTDv06_decompress_usingPreparedDCtx() :
* Same as ZSTDv06_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
* It avoids reloading the dictionary each time.
* `preparedDCtx` must have been properly initialized using ZSTDv06_decompressBegin_usingDict().
* Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */
ZSTDLIBv06_API size_t ZSTDv06_decompress_usingPreparedDCtx(
ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize);
#define ZSTDv06_FRAMEHEADERSIZE_MAX 13 /* for static allocation */
static const size_t ZSTDv06_frameHeaderSize_min = 5;
static const size_t ZSTDv06_frameHeaderSize_max = ZSTDv06_FRAMEHEADERSIZE_MAX;
ZSTDLIBv06_API size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx);
/*
Streaming decompression, direct mode (bufferless)
A ZSTDv06_DCtx object is required to track streaming operations.
Use ZSTDv06_createDCtx() / ZSTDv06_freeDCtx() to manage it.
A ZSTDv06_DCtx object can be re-used multiple times.
First optional operation is to retrieve frame parameters, using ZSTDv06_getFrameParams(), which doesn't consume the input.
It can provide the minimum size of rolling buffer required to properly decompress data,
and optionally the final size of uncompressed content.
(Note : content size is an optional info that may not be present. 0 means : content size unknown)
Frame parameters are extracted from the beginning of compressed frame.
The amount of data to read is variable, from ZSTDv06_frameHeaderSize_min to ZSTDv06_frameHeaderSize_max (so if `srcSize` >= ZSTDv06_frameHeaderSize_max, it will always work)
If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result.
Result : 0 when successful, it means the ZSTDv06_frameParams structure has been filled.
>0 : means there is not enough data into `src`. Provides the expected size to successfully decode header.
errorCode, which can be tested using ZSTDv06_isError()
Start decompression, with ZSTDv06_decompressBegin() or ZSTDv06_decompressBegin_usingDict().
Alternatively, you can copy a prepared context, using ZSTDv06_copyDCtx().
Then use ZSTDv06_nextSrcSizeToDecompress() and ZSTDv06_decompressContinue() alternatively.
ZSTDv06_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv06_decompressContinue().
ZSTDv06_decompressContinue() requires this exact amount of bytes, or it will fail.
ZSTDv06_decompressContinue() needs previous data blocks during decompression, up to (1 << windowlog).
They should preferably be located contiguously, prior to current block. Alternatively, a round buffer is also possible.
@result of ZSTDv06_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity)
It can be zero, which is not an error; it just means ZSTDv06_decompressContinue() has decoded some header.
A frame is fully decoded when ZSTDv06_nextSrcSizeToDecompress() returns zero.
Context can then be reset to start a new decompression.
*/
/* **************************************
* Block functions
****************************************/
/*! Block functions produce and decode raw zstd blocks, without frame metadata.
User will have to take in charge required information to regenerate data, such as compressed and content sizes.
A few rules to respect :
- Uncompressed block size must be <= ZSTDv06_BLOCKSIZE_MAX (128 KB)
- Compressing or decompressing requires a context structure
+ Use ZSTDv06_createCCtx() and ZSTDv06_createDCtx()
- It is necessary to init context before starting
+ compression : ZSTDv06_compressBegin()
+ decompression : ZSTDv06_decompressBegin()
+ variants _usingDict() are also allowed
+ copyCCtx() and copyDCtx() work too
- When a block is considered not compressible enough, ZSTDv06_compressBlock() result will be zero.
In which case, nothing is produced into `dst`.
+ User must test for such outcome and deal directly with uncompressed data
+ ZSTDv06_decompressBlock() doesn't accept uncompressed data as input !!
*/
#define ZSTDv06_BLOCKSIZE_MAX (128 * 1024) /* define, for static allocation */
ZSTDLIBv06_API size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
#if defined (__cplusplus)
}
#endif
#endif /* ZSTDv06_STATIC_H */
/*
zstd_internal - common functions to include
Header File for include
Copyright (C) 2014-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd homepage : https://www.zstd.net
*/
#ifndef ZSTDv06_CCOMMON_H_MODULE
#define ZSTDv06_CCOMMON_H_MODULE
/*-*************************************
* Common macros
***************************************/
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define MAX(a,b) ((a)>(b) ? (a) : (b))
/*-*************************************
* Common constants
***************************************/
#define ZSTDv06_DICT_MAGIC 0xEC30A436
#define ZSTDv06_REP_NUM 3
#define ZSTDv06_REP_INIT ZSTDv06_REP_NUM
#define ZSTDv06_REP_MOVE (ZSTDv06_REP_NUM-1)
#define KB *(1 <<10)
#define MB *(1 <<20)
#define GB *(1U<<30)
#define BIT7 128
#define BIT6 64
#define BIT5 32
#define BIT4 16
#define BIT1 2
#define BIT0 1
#define ZSTDv06_WINDOWLOG_ABSOLUTEMIN 12
static const size_t ZSTDv06_fcs_fieldSize[4] = { 0, 1, 2, 8 };
#define ZSTDv06_BLOCKHEADERSIZE 3 /* because C standard does not allow a static const value to be defined using another static const value .... :( */
static const size_t ZSTDv06_blockHeaderSize = ZSTDv06_BLOCKHEADERSIZE;
typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
#define HufLog 12
#define IS_HUF 0
#define IS_PCH 1
#define IS_RAW 2
#define IS_RLE 3
#define LONGNBSEQ 0x7F00
#define MINMATCH 3
#define EQUAL_READ32 4
#define REPCODE_STARTVALUE 1
#define Litbits 8
#define MaxLit ((1<<Litbits) - 1)
#define MaxML 52
#define MaxLL 35
#define MaxOff 28
#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
#define MLFSELog 9
#define LLFSELog 9
#define OffFSELog 8
#define FSEv06_ENCODING_RAW 0
#define FSEv06_ENCODING_RLE 1
#define FSEv06_ENCODING_STATIC 2
#define FSEv06_ENCODING_DYNAMIC 3
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
13,14,15,16 };
static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
-1,-1,-1,-1 };
static const U32 LL_defaultNormLog = 6;
static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,
12,13,14,15,16 };
static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
-1,-1,-1,-1,-1 };
static const U32 ML_defaultNormLog = 6;
static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
static const U32 OF_defaultNormLog = 5;
/*-*******************************************
* Shared functions to include for inlining
*********************************************/
static void ZSTDv06_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
#define COPY8(d,s) { ZSTDv06_copy8(d,s); d+=8; s+=8; }
/*! ZSTDv06_wildcopy() :
* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
#define WILDCOPY_OVERLENGTH 8
MEM_STATIC void ZSTDv06_wildcopy(void* dst, const void* src, ptrdiff_t length)
{
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
do
COPY8(op, ip)
while (op < oend);
}
/*-*******************************************
* Private interfaces
*********************************************/
typedef struct {
U32 off;
U32 len;
} ZSTDv06_match_t;
typedef struct {
U32 price;
U32 off;
U32 mlen;
U32 litlen;
U32 rep[ZSTDv06_REP_INIT];
} ZSTDv06_optimal_t;
typedef struct { U32 unused; } ZSTDv06_stats_t;
typedef struct {
void* buffer;
U32* offsetStart;
U32* offset;
BYTE* offCodeStart;
BYTE* litStart;
BYTE* lit;
U16* litLengthStart;
U16* litLength;
BYTE* llCodeStart;
U16* matchLengthStart;
U16* matchLength;
BYTE* mlCodeStart;
U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
U32 longLengthPos;
/* opt */
ZSTDv06_optimal_t* priceTable;
ZSTDv06_match_t* matchTable;
U32* matchLengthFreq;
U32* litLengthFreq;
U32* litFreq;
U32* offCodeFreq;
U32 matchLengthSum;
U32 matchSum;
U32 litLengthSum;
U32 litSum;
U32 offCodeSum;
U32 log2matchLengthSum;
U32 log2matchSum;
U32 log2litLengthSum;
U32 log2litSum;
U32 log2offCodeSum;
U32 factor;
U32 cachedPrice;
U32 cachedLitLength;
const BYTE* cachedLiterals;
ZSTDv06_stats_t stats;
} seqStore_t;
void ZSTDv06_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);
#endif /* ZSTDv06_CCOMMON_H_MODULE */
/* ******************************************************************
FSE : Finite State Entropy codec
Public Prototypes declaration
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef FSEv06_H
#define FSEv06_H
#if defined (__cplusplus)
extern "C" {
#endif
/*-****************************************
* FSE simple functions
******************************************/
/*! FSEv06_decompress():
Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
into already allocated destination buffer 'dst', of size 'dstCapacity'.
@return : size of regenerated data (<= maxDstSize),
or an error code, which can be tested using FSEv06_isError() .
** Important ** : FSEv06_decompress() does not decompress non-compressible nor RLE data !!!
Why ? : making this distinction requires a header.
Header management is intentionally delegated to the user layer, which can better manage special cases.
*/
size_t FSEv06_decompress(void* dst, size_t dstCapacity,
const void* cSrc, size_t cSrcSize);
/*-*****************************************
* Tool functions
******************************************/
size_t FSEv06_compressBound(size_t size); /* maximum compressed size */
/* Error Management */
unsigned FSEv06_isError(size_t code); /* tells if a return value is an error code */
const char* FSEv06_getErrorName(size_t code); /* provides error code string (useful for debugging) */
/*-*****************************************
* FSE detailed API
******************************************/
/*!
FSEv06_decompress() does the following:
1. read normalized counters with readNCount()
2. build decoding table 'DTable' from normalized counters
3. decode the data stream using decoding table 'DTable'
The following API allows targeting specific sub-functions for advanced tasks.
For example, it's possible to compress several blocks using the same 'CTable',
or to save and provide normalized distribution using external method.
*/
/* *** DECOMPRESSION *** */
/*! FSEv06_readNCount():
Read compactly saved 'normalizedCounter' from 'rBuffer'.
@return : size read from 'rBuffer',
or an errorCode, which can be tested using FSEv06_isError().
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
/*! Constructor and Destructor of FSEv06_DTable.
Note that its size depends on 'tableLog' */
typedef unsigned FSEv06_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
FSEv06_DTable* FSEv06_createDTable(unsigned tableLog);
void FSEv06_freeDTable(FSEv06_DTable* dt);
/*! FSEv06_buildDTable():
Builds 'dt', which must be already allocated, using FSEv06_createDTable().
return : 0, or an errorCode, which can be tested using FSEv06_isError() */
size_t FSEv06_buildDTable (FSEv06_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
/*! FSEv06_decompress_usingDTable():
Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
into `dst` which must be already allocated.
@return : size of regenerated data (necessarily <= `dstCapacity`),
or an errorCode, which can be tested using FSEv06_isError() */
size_t FSEv06_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv06_DTable* dt);
/*!
Tutorial :
----------
(Note : these functions only decompress FSE-compressed blocks.
If block is uncompressed, use memcpy() instead
If block is a single repeated byte, use memset() instead )
The first step is to obtain the normalized frequencies of symbols.
This can be performed by FSEv06_readNCount() if it was saved using FSEv06_writeNCount().
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
or size the table to handle worst case situations (typically 256).
FSEv06_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
The result of FSEv06_readNCount() is the number of bytes read from 'rBuffer'.
Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
If there is an error, the function will return an error code, which can be tested using FSEv06_isError().
The next step is to build the decompression tables 'FSEv06_DTable' from 'normalizedCounter'.
This is performed by the function FSEv06_buildDTable().
The space required by 'FSEv06_DTable' must be already allocated using FSEv06_createDTable().
If there is an error, the function will return an error code, which can be tested using FSEv06_isError().
`FSEv06_DTable` can then be used to decompress `cSrc`, with FSEv06_decompress_usingDTable().
`cSrcSize` must be strictly correct, otherwise decompression will fail.
FSEv06_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
If there is an error, the function will return an error code, which can be tested using FSEv06_isError(). (ex: dst buffer too small)
*/
#if defined (__cplusplus)
}
#endif
#endif /* FSEv06_H */
/* ******************************************************************
bitstream
Part of FSE library
header file (to include)
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef BITSTREAM_H_MODULE
#define BITSTREAM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/*
* This API consists of small unitary functions, which must be inlined for best performance.
* Since link-time-optimization is not available for all compilers,
* these functions are defined into a .h to be included.
*/
/*=========================================
* Target specific
=========================================*/
#if defined(__BMI__) && defined(__GNUC__)
# include <immintrin.h> /* support for bextr (experimental) */
#endif
/*-********************************************
* bitStream decoding API (read backward)
**********************************************/
typedef struct
{
size_t bitContainer;
unsigned bitsConsumed;
const char* ptr;
const char* start;
} BITv06_DStream_t;
typedef enum { BITv06_DStream_unfinished = 0,
BITv06_DStream_endOfBuffer = 1,
BITv06_DStream_completed = 2,
BITv06_DStream_overflow = 3 } BITv06_DStream_status; /* result of BITv06_reloadDStream() */
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
MEM_STATIC size_t BITv06_readBits(BITv06_DStream_t* bitD, unsigned nbBits);
MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD);
MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* bitD);
/*-****************************************
* unsafe API
******************************************/
MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, unsigned nbBits);
/* faster, but works only if nbBits >= 1 */
/*-**************************************************************
* Internal functions
****************************************************************/
MEM_STATIC unsigned BITv06_highbit32 ( U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r=0;
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
unsigned r;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
return r;
# endif
}
/*-********************************************************
* bitStream decoding
**********************************************************/
/*! BITv06_initDStream() :
* Initialize a BITv06_DStream_t.
* `bitD` : a pointer to an already allocated BITv06_DStream_t structure.
* `srcSize` must be the *exact* size of the bitStream, in bytes.
* @return : size of stream (== srcSize) or an errorCode if a problem is detected
*/
MEM_STATIC size_t BITv06_initDStream(BITv06_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
bitD->start = (const char*)srcBuffer;
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */
bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); }
} else {
bitD->start = (const char*)srcBuffer;
bitD->ptr = bitD->start;
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */
default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */
bitD->bitsConsumed = 8 - BITv06_highbit32(lastByte); }
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
}
return srcSize;
}
MEM_STATIC size_t BITv06_lookBits(const BITv06_DStream_t* bitD, U32 nbBits)
{
U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
}
/*! BITv06_lookBitsFast() :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BITv06_lookBitsFast(const BITv06_DStream_t* bitD, U32 nbBits)
{
U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
}
MEM_STATIC void BITv06_skipBits(BITv06_DStream_t* bitD, U32 nbBits)
{
bitD->bitsConsumed += nbBits;
}
MEM_STATIC size_t BITv06_readBits(BITv06_DStream_t* bitD, U32 nbBits)
{
size_t const value = BITv06_lookBits(bitD, nbBits);
BITv06_skipBits(bitD, nbBits);
return value;
}
/*! BITv06_readBitsFast() :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, U32 nbBits)
{
size_t const value = BITv06_lookBitsFast(bitD, nbBits);
BITv06_skipBits(bitD, nbBits);
return value;
}
MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD)
{
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
return BITv06_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
bitD->ptr -= bitD->bitsConsumed >> 3;
bitD->bitsConsumed &= 7;
bitD->bitContainer = MEM_readLEST(bitD->ptr);
return BITv06_DStream_unfinished;
}
if (bitD->ptr == bitD->start) {
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv06_DStream_endOfBuffer;
return BITv06_DStream_completed;
}
{ U32 nbBytes = bitD->bitsConsumed >> 3;
BITv06_DStream_status result = BITv06_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start) {
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
result = BITv06_DStream_endOfBuffer;
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes*8;
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
return result;
}
}
/*! BITv06_endOfDStream() :
* @return Tells if DStream has exactly reached its end (all bits consumed).
*/
MEM_STATIC unsigned BITv06_endOfDStream(const BITv06_DStream_t* DStream)
{
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
}
#if defined (__cplusplus)
}
#endif
#endif /* BITSTREAM_H_MODULE */
/* ******************************************************************
FSE : Finite State Entropy coder
header file for static linking (only)
Copyright (C) 2013-2015, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef FSEv06_STATIC_H
#define FSEv06_STATIC_H
#if defined (__cplusplus)
extern "C" {
#endif
/* *****************************************
* Static allocation
*******************************************/
/* FSE buffer bounds */
#define FSEv06_NCOUNTBOUND 512
#define FSEv06_BLOCKBOUND(size) (size + (size>>7))
#define FSEv06_COMPRESSBOUND(size) (FSEv06_NCOUNTBOUND + FSEv06_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
#define FSEv06_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
/* *****************************************
* FSE advanced API
*******************************************/
size_t FSEv06_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
/* same as FSEv06_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr */
size_t FSEv06_buildDTable_raw (FSEv06_DTable* dt, unsigned nbBits);
/* build a fake FSEv06_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
size_t FSEv06_buildDTable_rle (FSEv06_DTable* dt, unsigned char symbolValue);
/* build a fake FSEv06_DTable, designed to always generate the same symbolValue */
/* *****************************************
* FSE symbol decompression API
*******************************************/
typedef struct
{
size_t state;
const void* table; /* precise table may vary, depending on U16 */
} FSEv06_DState_t;
static void FSEv06_initDState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD, const FSEv06_DTable* dt);
static unsigned char FSEv06_decodeSymbol(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD);
/* *****************************************
* FSE unsafe API
*******************************************/
static unsigned char FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD);
/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
/* *****************************************
* Implementation of inlined functions
*******************************************/
/* ====== Decompression ====== */
typedef struct {
U16 tableLog;
U16 fastMode;
} FSEv06_DTableHeader; /* sizeof U32 */
typedef struct
{
unsigned short newState;
unsigned char symbol;
unsigned char nbBits;
} FSEv06_decode_t; /* size == U32 */
MEM_STATIC void FSEv06_initDState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD, const FSEv06_DTable* dt)
{
const void* ptr = dt;
const FSEv06_DTableHeader* const DTableH = (const FSEv06_DTableHeader*)ptr;
DStatePtr->state = BITv06_readBits(bitD, DTableH->tableLog);
BITv06_reloadDStream(bitD);
DStatePtr->table = dt + 1;
}
MEM_STATIC BYTE FSEv06_peekSymbol(const FSEv06_DState_t* DStatePtr)
{
FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
return DInfo.symbol;
}
MEM_STATIC void FSEv06_updateState(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
{
FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
U32 const nbBits = DInfo.nbBits;
size_t const lowBits = BITv06_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
}
MEM_STATIC BYTE FSEv06_decodeSymbol(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
{
FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
U32 const nbBits = DInfo.nbBits;
BYTE const symbol = DInfo.symbol;
size_t const lowBits = BITv06_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
/*! FSEv06_decodeSymbolFast() :
unsafe, only works if no symbol has a probability > 50% */
MEM_STATIC BYTE FSEv06_decodeSymbolFast(FSEv06_DState_t* DStatePtr, BITv06_DStream_t* bitD)
{
FSEv06_decode_t const DInfo = ((const FSEv06_decode_t*)(DStatePtr->table))[DStatePtr->state];
U32 const nbBits = DInfo.nbBits;
BYTE const symbol = DInfo.symbol;
size_t const lowBits = BITv06_readBitsFast(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
#ifndef FSEv06_COMMONDEFS_ONLY
/* **************************************************************
* Tuning parameters
****************************************************************/
/*!MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
#define FSEv06_MAX_MEMORY_USAGE 14
#define FSEv06_DEFAULT_MEMORY_USAGE 13
/*!FSEv06_MAX_SYMBOL_VALUE :
* Maximum symbol value authorized.
* Required for proper stack allocation */
#define FSEv06_MAX_SYMBOL_VALUE 255
/* **************************************************************
* template functions type & suffix
****************************************************************/
#define FSEv06_FUNCTION_TYPE BYTE
#define FSEv06_FUNCTION_EXTENSION
#define FSEv06_DECODE_TYPE FSEv06_decode_t
#endif /* !FSEv06_COMMONDEFS_ONLY */
/* ***************************************************************
* Constants
*****************************************************************/
#define FSEv06_MAX_TABLELOG (FSEv06_MAX_MEMORY_USAGE-2)
#define FSEv06_MAX_TABLESIZE (1U<<FSEv06_MAX_TABLELOG)
#define FSEv06_MAXTABLESIZE_MASK (FSEv06_MAX_TABLESIZE-1)
#define FSEv06_DEFAULT_TABLELOG (FSEv06_DEFAULT_MEMORY_USAGE-2)
#define FSEv06_MIN_TABLELOG 5
#define FSEv06_TABLELOG_ABSOLUTE_MAX 15
#if FSEv06_MAX_TABLELOG > FSEv06_TABLELOG_ABSOLUTE_MAX
#error "FSEv06_MAX_TABLELOG > FSEv06_TABLELOG_ABSOLUTE_MAX is not supported"
#endif
#define FSEv06_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
#if defined (__cplusplus)
}
#endif
#endif /* FSEv06_STATIC_H */
/*
Common functions of New Generation Entropy library
Copyright (C) 2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
*************************************************************************** */
/*-****************************************
* FSE Error Management
******************************************/
unsigned FSEv06_isError(size_t code) { return ERR_isError(code); }
const char* FSEv06_getErrorName(size_t code) { return ERR_getErrorName(code); }
/* **************************************************************
* HUF Error Management
****************************************************************/
static unsigned HUFv06_isError(size_t code) { return ERR_isError(code); }
/*-**************************************************************
* FSE NCount encoding-decoding
****************************************************************/
static short FSEv06_abs(short a) { return a<0 ? -a : a; }
size_t FSEv06_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
const BYTE* const istart = (const BYTE*) headerBuffer;
const BYTE* const iend = istart + hbSize;
const BYTE* ip = istart;
int nbBits;
int remaining;
int threshold;
U32 bitStream;
int bitCount;
unsigned charnum = 0;
int previous0 = 0;
if (hbSize < 4) return ERROR(srcSize_wrong);
bitStream = MEM_readLE32(ip);
nbBits = (bitStream & 0xF) + FSEv06_MIN_TABLELOG; /* extract tableLog */
if (nbBits > FSEv06_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
bitStream >>= 4;
bitCount = 4;
*tableLogPtr = nbBits;
remaining = (1<<nbBits)+1;
threshold = 1<<nbBits;
nbBits++;
while ((remaining>1) && (charnum<=*maxSVPtr)) {
if (previous0) {
unsigned n0 = charnum;
while ((bitStream & 0xFFFF) == 0xFFFF) {
n0+=24;
if (ip < iend-5) {
ip+=2;
bitStream = MEM_readLE32(ip) >> bitCount;
} else {
bitStream >>= 16;
bitCount+=16;
} }
while ((bitStream & 3) == 3) {
n0+=3;
bitStream>>=2;
bitCount+=2;
}
n0 += bitStream & 3;
bitCount += 2;
if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
while (charnum < n0) normalizedCounter[charnum++] = 0;
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
ip += bitCount>>3;
bitCount &= 7;
bitStream = MEM_readLE32(ip) >> bitCount;
}
else
bitStream >>= 2;
}
{ short const max = (short)((2*threshold-1)-remaining);
short count;
if ((bitStream & (threshold-1)) < (U32)max) {
count = (short)(bitStream & (threshold-1));
bitCount += nbBits-1;
} else {
count = (short)(bitStream & (2*threshold-1));
if (count >= threshold) count -= max;
bitCount += nbBits;
}
count--; /* extra accuracy */
remaining -= FSEv06_abs(count);
normalizedCounter[charnum++] = count;
previous0 = !count;
while (remaining < threshold) {
nbBits--;
threshold >>= 1;
}
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
ip += bitCount>>3;
bitCount &= 7;
} else {
bitCount -= (int)(8 * (iend - 4 - ip));
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
} } /* while ((remaining>1) && (charnum<=*maxSVPtr)) */
if (remaining != 1) return ERROR(GENERIC);
*maxSVPtr = charnum-1;
ip += (bitCount+7)>>3;
if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
return ip-istart;
}
/* ******************************************************************
FSE : Finite State Entropy decoder
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/* **************************************************************
* Compiler specifics
****************************************************************/
#ifdef _MSC_VER /* Visual Studio */
# define FORCE_INLINE static __forceinline
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
#else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline))
# else
# define FORCE_INLINE static inline
# endif
# else
# define FORCE_INLINE static
# endif /* __STDC_VERSION__ */
#endif
/* **************************************************************
* Error Management
****************************************************************/
#define FSEv06_isError ERR_isError
#define FSEv06_STATIC_ASSERT(c) { enum { FSEv06_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* **************************************************************
* Complex types
****************************************************************/
typedef U32 DTable_max_t[FSEv06_DTABLE_SIZE_U32(FSEv06_MAX_TABLELOG)];
/* **************************************************************
* Templates
****************************************************************/
/*
designed to be included
for type-specific functions (template emulation in C)
Objective is to write these functions only once, for improved maintenance
*/
/* safety checks */
#ifndef FSEv06_FUNCTION_EXTENSION
# error "FSEv06_FUNCTION_EXTENSION must be defined"
#endif
#ifndef FSEv06_FUNCTION_TYPE
# error "FSEv06_FUNCTION_TYPE must be defined"
#endif
/* Function names */
#define FSEv06_CAT(X,Y) X##Y
#define FSEv06_FUNCTION_NAME(X,Y) FSEv06_CAT(X,Y)
#define FSEv06_TYPE_NAME(X,Y) FSEv06_CAT(X,Y)
/* Function templates */
FSEv06_DTable* FSEv06_createDTable (unsigned tableLog)
{
if (tableLog > FSEv06_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv06_TABLELOG_ABSOLUTE_MAX;
return (FSEv06_DTable*)malloc( FSEv06_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
}
void FSEv06_freeDTable (FSEv06_DTable* dt)
{
free(dt);
}
size_t FSEv06_buildDTable(FSEv06_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
FSEv06_DECODE_TYPE* const tableDecode = (FSEv06_DECODE_TYPE*) (tdPtr);
U16 symbolNext[FSEv06_MAX_SYMBOL_VALUE+1];
U32 const maxSV1 = maxSymbolValue + 1;
U32 const tableSize = 1 << tableLog;
U32 highThreshold = tableSize-1;
/* Sanity Checks */
if (maxSymbolValue > FSEv06_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
if (tableLog > FSEv06_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
/* Init, lay down lowprob symbols */
{ FSEv06_DTableHeader DTableH;
DTableH.tableLog = (U16)tableLog;
DTableH.fastMode = 1;
{ S16 const largeLimit= (S16)(1 << (tableLog-1));
U32 s;
for (s=0; s<maxSV1; s++) {
if (normalizedCounter[s]==-1) {
tableDecode[highThreshold--].symbol = (FSEv06_FUNCTION_TYPE)s;
symbolNext[s] = 1;
} else {
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
symbolNext[s] = normalizedCounter[s];
} } }
memcpy(dt, &DTableH, sizeof(DTableH));
}
/* Spread symbols */
{ U32 const tableMask = tableSize-1;
U32 const step = FSEv06_TABLESTEP(tableSize);
U32 s, position = 0;
for (s=0; s<maxSV1; s++) {
int i;
for (i=0; i<normalizedCounter[s]; i++) {
tableDecode[position].symbol = (FSEv06_FUNCTION_TYPE)s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
} }
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
}
/* Build Decoding table */
{ U32 u;
for (u=0; u<tableSize; u++) {
FSEv06_FUNCTION_TYPE const symbol = (FSEv06_FUNCTION_TYPE)(tableDecode[u].symbol);
U16 nextState = symbolNext[symbol]++;
tableDecode[u].nbBits = (BYTE) (tableLog - BITv06_highbit32 ((U32)nextState) );
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
} }
return 0;
}
#ifndef FSEv06_COMMONDEFS_ONLY
/*-*******************************************************
* Decompression (Byte symbols)
*********************************************************/
size_t FSEv06_buildDTable_rle (FSEv06_DTable* dt, BYTE symbolValue)
{
void* ptr = dt;
FSEv06_DTableHeader* const DTableH = (FSEv06_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSEv06_decode_t* const cell = (FSEv06_decode_t*)dPtr;
DTableH->tableLog = 0;
DTableH->fastMode = 0;
cell->newState = 0;
cell->symbol = symbolValue;
cell->nbBits = 0;
return 0;
}
size_t FSEv06_buildDTable_raw (FSEv06_DTable* dt, unsigned nbBits)
{
void* ptr = dt;
FSEv06_DTableHeader* const DTableH = (FSEv06_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSEv06_decode_t* const dinfo = (FSEv06_decode_t*)dPtr;
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSV1 = tableMask+1;
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* Build Decoding Table */
DTableH->tableLog = (U16)nbBits;
DTableH->fastMode = 1;
for (s=0; s<maxSV1; s++) {
dinfo[s].newState = 0;
dinfo[s].symbol = (BYTE)s;
dinfo[s].nbBits = (BYTE)nbBits;
}
return 0;
}
FORCE_INLINE size_t FSEv06_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const FSEv06_DTable* dt, const unsigned fast)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const omax = op + maxDstSize;
BYTE* const olimit = omax-3;
BITv06_DStream_t bitD;
FSEv06_DState_t state1;
FSEv06_DState_t state2;
/* Init */
{ size_t const errorCode = BITv06_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
if (FSEv06_isError(errorCode)) return errorCode; }
FSEv06_initDState(&state1, &bitD, dt);
FSEv06_initDState(&state2, &bitD, dt);
#define FSEv06_GETSYMBOL(statePtr) fast ? FSEv06_decodeSymbolFast(statePtr, &bitD) : FSEv06_decodeSymbol(statePtr, &bitD)
/* 4 symbols per loop */
for ( ; (BITv06_reloadDStream(&bitD)==BITv06_DStream_unfinished) && (op<olimit) ; op+=4) {
op[0] = FSEv06_GETSYMBOL(&state1);
if (FSEv06_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BITv06_reloadDStream(&bitD);
op[1] = FSEv06_GETSYMBOL(&state2);
if (FSEv06_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
{ if (BITv06_reloadDStream(&bitD) > BITv06_DStream_unfinished) { op+=2; break; } }
op[2] = FSEv06_GETSYMBOL(&state1);
if (FSEv06_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BITv06_reloadDStream(&bitD);
op[3] = FSEv06_GETSYMBOL(&state2);
}
/* tail */
/* note : BITv06_reloadDStream(&bitD) >= FSEv06_DStream_partiallyFilled; Ends at exactly BITv06_DStream_completed */
while (1) {
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
*op++ = FSEv06_GETSYMBOL(&state1);
if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) {
*op++ = FSEv06_GETSYMBOL(&state2);
break;
}
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
*op++ = FSEv06_GETSYMBOL(&state2);
if (BITv06_reloadDStream(&bitD)==BITv06_DStream_overflow) {
*op++ = FSEv06_GETSYMBOL(&state1);
break;
} }
return op-ostart;
}
size_t FSEv06_decompress_usingDTable(void* dst, size_t originalSize,
const void* cSrc, size_t cSrcSize,
const FSEv06_DTable* dt)
{
const void* ptr = dt;
const FSEv06_DTableHeader* DTableH = (const FSEv06_DTableHeader*)ptr;
const U32 fastMode = DTableH->fastMode;
/* select fast mode (static) */
if (fastMode) return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
return FSEv06_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
}
size_t FSEv06_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* const istart = (const BYTE*)cSrc;
const BYTE* ip = istart;
short counting[FSEv06_MAX_SYMBOL_VALUE+1];
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
unsigned tableLog;
unsigned maxSymbolValue = FSEv06_MAX_SYMBOL_VALUE;
if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
/* normal FSE decoding mode */
{ size_t const NCountLength = FSEv06_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
if (FSEv06_isError(NCountLength)) return NCountLength;
if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
ip += NCountLength;
cSrcSize -= NCountLength;
}
{ size_t const errorCode = FSEv06_buildDTable (dt, counting, maxSymbolValue, tableLog);
if (FSEv06_isError(errorCode)) return errorCode; }
return FSEv06_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); /* always return, even if it is an error code */
}
#endif /* FSEv06_COMMONDEFS_ONLY */
/* ******************************************************************
Huffman coder, part of New Generation Entropy library
header file
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef HUFv06_H
#define HUFv06_H
#if defined (__cplusplus)
extern "C" {
#endif
/* ****************************************
* HUF simple functions
******************************************/
size_t HUFv06_decompress(void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize);
/*
HUFv06_decompress() :
Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
into already allocated destination buffer 'dst', of size 'dstSize'.
`dstSize` : must be the **exact** size of original (uncompressed) data.
Note : in contrast with FSE, HUFv06_decompress can regenerate
RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
because it knows size to regenerate.
@return : size of regenerated data (== dstSize)
or an error code, which can be tested using HUFv06_isError()
*/
/* ****************************************
* Tool functions
******************************************/
size_t HUFv06_compressBound(size_t size); /**< maximum compressed size */
#if defined (__cplusplus)
}
#endif
#endif /* HUFv06_H */
/* ******************************************************************
Huffman codec, part of New Generation Entropy library
header file, for static linking only
Copyright (C) 2013-2016, Yann Collet
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef HUFv06_STATIC_H
#define HUFv06_STATIC_H
#if defined (__cplusplus)
extern "C" {
#endif
/* ****************************************
* Static allocation
******************************************/
/* HUF buffer bounds */
#define HUFv06_CTABLEBOUND 129
#define HUFv06_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
#define HUFv06_COMPRESSBOUND(size) (HUFv06_CTABLEBOUND + HUFv06_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* static allocation of HUF's DTable */
#define HUFv06_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog))
#define HUFv06_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
unsigned short DTable[HUFv06_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
#define HUFv06_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
unsigned int DTable[HUFv06_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
#define HUFv06_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
unsigned int DTable[HUFv06_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
/* ****************************************
* Advanced decompression functions
******************************************/
size_t HUFv06_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
size_t HUFv06_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
/*!
HUFv06_decompress() does the following:
1. select the decompression algorithm (X2, X4, X6) based on pre-computed heuristics
2. build Huffman table from save, using HUFv06_readDTableXn()
3. decode 1 or 4 segments in parallel using HUFv06_decompressSXn_usingDTable
*/
size_t HUFv06_readDTableX2 (unsigned short* DTable, const void* src, size_t srcSize);
size_t HUFv06_readDTableX4 (unsigned* DTable, const void* src, size_t srcSize);
size_t HUFv06_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
size_t HUFv06_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
/* single stream variants */
size_t HUFv06_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
size_t HUFv06_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
size_t HUFv06_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned short* DTable);
size_t HUFv06_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const unsigned* DTable);
/* **************************************************************
* Constants
****************************************************************/
#define HUFv06_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUFv06_MAX_TABLELOG. Beyond that value, code does not work */
#define HUFv06_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUFv06_ABSOLUTEMAX_TABLELOG */
#define HUFv06_DEFAULT_TABLELOG HUFv06_MAX_TABLELOG /* tableLog by default, when not specified */
#define HUFv06_MAX_SYMBOL_VALUE 255
#if (HUFv06_MAX_TABLELOG > HUFv06_ABSOLUTEMAX_TABLELOG)
# error "HUFv06_MAX_TABLELOG is too large !"
#endif
/*! HUFv06_readStats() :
Read compact Huffman tree, saved by HUFv06_writeCTable().
`huffWeight` is destination buffer.
@return : size read from `src`
*/
MEM_STATIC size_t HUFv06_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize)
{
U32 weightTotal;
const BYTE* ip = (const BYTE*) src;
size_t iSize;
size_t oSize;
if (!srcSize) return ERROR(srcSize_wrong);
iSize = ip[0];
/* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */
if (iSize >= 128) { /* special header */
if (iSize >= (242)) { /* RLE */
static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
oSize = l[iSize-242];
memset(huffWeight, 1, hwSize);
iSize = 0;
}
else { /* Incompressible */
oSize = iSize - 127;
iSize = ((oSize+1)/2);
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
if (oSize >= hwSize) return ERROR(corruption_detected);
ip += 1;
{ U32 n;
for (n=0; n<oSize; n+=2) {
huffWeight[n] = ip[n/2] >> 4;
huffWeight[n+1] = ip[n/2] & 15;
} } } }
else { /* header compressed with FSE (normal case) */
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
oSize = FSEv06_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
if (FSEv06_isError(oSize)) return oSize;
}
/* collect weight stats */
memset(rankStats, 0, (HUFv06_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
weightTotal = 0;
{ U32 n; for (n=0; n<oSize; n++) {
if (huffWeight[n] >= HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
rankStats[huffWeight[n]]++;
weightTotal += (1 << huffWeight[n]) >> 1;
} }
if (weightTotal == 0) return ERROR(corruption_detected);
/* get last non-null symbol weight (implied, total must be 2^n) */
{ U32 const tableLog = BITv06_highbit32(weightTotal) + 1;
if (tableLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
*tableLogPtr = tableLog;
/* determine last weight */
{ U32 const total = 1 << tableLog;
U32 const rest = total - weightTotal;
U32 const verif = 1 << BITv06_highbit32(rest);
U32 const lastWeight = BITv06_highbit32(rest) + 1;
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
huffWeight[oSize] = (BYTE)lastWeight;
rankStats[lastWeight]++;
} }
/* check tree construction validity */
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
/* results */
*nbSymbolsPtr = (U32)(oSize+1);
return iSize+1;
}
#if defined (__cplusplus)
}
#endif
#endif /* HUFv06_STATIC_H */
/* ******************************************************************
Huffman decoder, part of New Generation Entropy library
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/* **************************************************************
* Compiler specifics
****************************************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
/* inline is defined */
#elif defined(_MSC_VER)
# define inline __inline
#else
# define inline /* disable inline */
#endif
#ifdef _MSC_VER /* Visual Studio */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif
/* **************************************************************
* Error Management
****************************************************************/
#define HUFv06_STATIC_ASSERT(c) { enum { HUFv06_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* *******************************************************
* HUF : Huffman block decompression
*********************************************************/
typedef struct { BYTE byte; BYTE nbBits; } HUFv06_DEltX2; /* single-symbol decoding */
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv06_DEltX4; /* double-symbols decoding */
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
/*-***************************/
/* single-symbol decoding */
/*-***************************/
size_t HUFv06_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
{
BYTE huffWeight[HUFv06_MAX_SYMBOL_VALUE + 1];
U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
size_t iSize;
U32 nbSymbols = 0;
U32 n;
U32 nextRankStart;
void* const dtPtr = DTable + 1;
HUFv06_DEltX2* const dt = (HUFv06_DEltX2*)dtPtr;
HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
/* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUFv06_readStats(huffWeight, HUFv06_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
if (HUFv06_isError(iSize)) return iSize;
/* check result */
if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
DTable[0] = (U16)tableLog; /* maybe should separate sizeof allocated DTable, from used size of DTable, in case of re-use */
/* Prepare ranks */
nextRankStart = 0;
for (n=1; n<tableLog+1; n++) {
U32 current = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = current;
}
/* fill DTable */
for (n=0; n<nbSymbols; n++) {
const U32 w = huffWeight[n];
const U32 length = (1 << w) >> 1;
U32 i;
HUFv06_DEltX2 D;
D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
for (i = rankVal[w]; i < rankVal[w] + length; i++)
dt[i] = D;
rankVal[w] += length;
}
return iSize;
}
static BYTE HUFv06_decodeSymbolX2(BITv06_DStream_t* Dstream, const HUFv06_DEltX2* dt, const U32 dtLog)
{
const size_t val = BITv06_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
const BYTE c = dt[val].byte;
BITv06_skipBits(Dstream, dt[val].nbBits);
return c;
}
#define HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
*ptr++ = HUFv06_decodeSymbolX2(DStreamPtr, dt, dtLog)
#define HUFv06_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \
HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
#define HUFv06_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
HUFv06_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
static inline size_t HUFv06_decodeStreamX2(BYTE* p, BITv06_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv06_DEltX2* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 4 symbols at a time */
while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-4)) {
HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr);
HUFv06_DECODE_SYMBOLX2_1(p, bitDPtr);
HUFv06_DECODE_SYMBOLX2_2(p, bitDPtr);
HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
}
/* closer to the end */
while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd))
HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
/* no more data to retrieve from bitstream, hence no need to reload */
while (p < pEnd)
HUFv06_DECODE_SYMBOLX2_0(p, bitDPtr);
return pEnd-pStart;
}
size_t HUFv06_decompress1X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U16* DTable)
{
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + dstSize;
const U32 dtLog = DTable[0];
const void* dtPtr = DTable;
const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr)+1;
BITv06_DStream_t bitD;
{ size_t const errorCode = BITv06_initDStream(&bitD, cSrc, cSrcSize);
if (HUFv06_isError(errorCode)) return errorCode; }
HUFv06_decodeStreamX2(op, &bitD, oend, dt, dtLog);
/* check */
if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected);
return dstSize;
}
size_t HUFv06_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize);
if (HUFv06_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
ip += errorCode;
cSrcSize -= errorCode;
return HUFv06_decompress1X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
size_t HUFv06_decompress4X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U16* DTable)
{
/* Check */
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable;
const HUFv06_DEltX2* const dt = ((const HUFv06_DEltX2*)dtPtr) +1;
const U32 dtLog = DTable[0];
size_t errorCode;
/* Init */
BITv06_DStream_t bitD1;
BITv06_DStream_t bitD2;
BITv06_DStream_t bitD3;
BITv06_DStream_t bitD4;
const size_t length1 = MEM_readLE16(istart);
const size_t length2 = MEM_readLE16(istart+2);
const size_t length3 = MEM_readLE16(istart+4);
size_t length4;
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
length4 = cSrcSize - (length1 + length2 + length3 + 6);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
errorCode = BITv06_initDStream(&bitD1, istart1, length1);
if (HUFv06_isError(errorCode)) return errorCode;
errorCode = BITv06_initDStream(&bitD2, istart2, length2);
if (HUFv06_isError(errorCode)) return errorCode;
errorCode = BITv06_initDStream(&bitD3, istart3, length3);
if (HUFv06_isError(errorCode)) return errorCode;
errorCode = BITv06_initDStream(&bitD4, istart4, length4);
if (HUFv06_isError(errorCode)) return errorCode;
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) {
HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1);
HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2);
HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3);
HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4);
HUFv06_DECODE_SYMBOLX2_1(op1, &bitD1);
HUFv06_DECODE_SYMBOLX2_1(op2, &bitD2);
HUFv06_DECODE_SYMBOLX2_1(op3, &bitD3);
HUFv06_DECODE_SYMBOLX2_1(op4, &bitD4);
HUFv06_DECODE_SYMBOLX2_2(op1, &bitD1);
HUFv06_DECODE_SYMBOLX2_2(op2, &bitD2);
HUFv06_DECODE_SYMBOLX2_2(op3, &bitD3);
HUFv06_DECODE_SYMBOLX2_2(op4, &bitD4);
HUFv06_DECODE_SYMBOLX2_0(op1, &bitD1);
HUFv06_DECODE_SYMBOLX2_0(op2, &bitD2);
HUFv06_DECODE_SYMBOLX2_0(op3, &bitD3);
HUFv06_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUFv06_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
HUFv06_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
HUFv06_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
HUFv06_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
size_t HUFv06_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv06_CREATE_STATIC_DTABLEX2(DTable, HUFv06_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t const errorCode = HUFv06_readDTableX2 (DTable, cSrc, cSrcSize);
if (HUFv06_isError(errorCode)) return errorCode;
if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
ip += errorCode;
cSrcSize -= errorCode;
return HUFv06_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
/* *************************/
/* double-symbols decoding */
/* *************************/
static void HUFv06_fillDTableX4Level2(HUFv06_DEltX4* DTable, U32 sizeLog, const U32 consumed,
const U32* rankValOrigin, const int minWeight,
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
U32 nbBitsBaseline, U16 baseSeq)
{
HUFv06_DEltX4 DElt;
U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];
/* get pre-calculated rankVal */
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill skipped values */
if (minWeight>1) {
U32 i, skipSize = rankVal[minWeight];
MEM_writeLE16(&(DElt.sequence), baseSeq);
DElt.nbBits = (BYTE)(consumed);
DElt.length = 1;
for (i = 0; i < skipSize; i++)
DTable[i] = DElt;
}
/* fill DTable */
{ U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
const U32 symbol = sortedSymbols[s].symbol;
const U32 weight = sortedSymbols[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 length = 1 << (sizeLog-nbBits);
const U32 start = rankVal[weight];
U32 i = start;
const U32 end = start + length;
MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
DElt.nbBits = (BYTE)(nbBits + consumed);
DElt.length = 2;
do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
rankVal[weight] += length;
}}
}
typedef U32 rankVal_t[HUFv06_ABSOLUTEMAX_TABLELOG][HUFv06_ABSOLUTEMAX_TABLELOG + 1];
static void HUFv06_fillDTableX4(HUFv06_DEltX4* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList, const U32 sortedListSize,
const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
const U32 nbBitsBaseline)
{
U32 rankVal[HUFv06_ABSOLUTEMAX_TABLELOG + 1];
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
const U32 minBits = nbBitsBaseline - maxWeight;
U32 s;
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill DTable */
for (s=0; s<sortedListSize; s++) {
const U16 symbol = sortedList[s].symbol;
const U32 weight = sortedList[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 start = rankVal[weight];
const U32 length = 1 << (targetLog-nbBits);
if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
U32 sortedRank;
int minWeight = nbBits + scaleLog;
if (minWeight < 1) minWeight = 1;
sortedRank = rankStart[minWeight];
HUFv06_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
rankValOrigin[nbBits], minWeight,
sortedList+sortedRank, sortedListSize-sortedRank,
nbBitsBaseline, symbol);
} else {
HUFv06_DEltX4 DElt;
MEM_writeLE16(&(DElt.sequence), symbol);
DElt.nbBits = (BYTE)(nbBits);
DElt.length = 1;
{ U32 u;
const U32 end = start + length;
for (u = start; u < end; u++) DTable[u] = DElt;
} }
rankVal[weight] += length;
}
}
size_t HUFv06_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
{
BYTE weightList[HUFv06_MAX_SYMBOL_VALUE + 1];
sortedSymbol_t sortedSymbol[HUFv06_MAX_SYMBOL_VALUE + 1];
U32 rankStats[HUFv06_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
U32 rankStart0[HUFv06_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
U32* const rankStart = rankStart0+1;
rankVal_t rankVal;
U32 tableLog, maxW, sizeOfSort, nbSymbols;
const U32 memLog = DTable[0];
size_t iSize;
void* dtPtr = DTable;
HUFv06_DEltX4* const dt = ((HUFv06_DEltX4*)dtPtr) + 1;
HUFv06_STATIC_ASSERT(sizeof(HUFv06_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */
if (memLog > HUFv06_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
/* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUFv06_readStats(weightList, HUFv06_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
if (HUFv06_isError(iSize)) return iSize;
/* check result */
if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
/* find maxWeight */
for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
/* Get start index of each weight */
{ U32 w, nextRankStart = 0;
for (w=1; w<maxW+1; w++) {
U32 current = nextRankStart;
nextRankStart += rankStats[w];
rankStart[w] = current;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
sizeOfSort = nextRankStart;
}
/* sort symbols by weight */
{ U32 s;
for (s=0; s<nbSymbols; s++) {
U32 const w = weightList[s];
U32 const r = rankStart[w]++;
sortedSymbol[r].symbol = (BYTE)s;
sortedSymbol[r].weight = (BYTE)w;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
/* Build rankVal */
{ U32* const rankVal0 = rankVal[0];
{ int const rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
U32 nextRankVal = 0;
U32 w;
for (w=1; w<maxW+1; w++) {
U32 current = nextRankVal;
nextRankVal += rankStats[w] << (w+rescale);
rankVal0[w] = current;
} }
{ U32 const minBits = tableLog+1 - maxW;
U32 consumed;
for (consumed = minBits; consumed < memLog - minBits + 1; consumed++) {
U32* const rankValPtr = rankVal[consumed];
U32 w;
for (w = 1; w < maxW+1; w++) {
rankValPtr[w] = rankVal0[w] >> consumed;
} } } }
HUFv06_fillDTableX4(dt, memLog,
sortedSymbol, sizeOfSort,
rankStart0, rankVal, maxW,
tableLog+1);
return iSize;
}
static U32 HUFv06_decodeSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog)
{
const size_t val = BITv06_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 2);
BITv06_skipBits(DStream, dt[val].nbBits);
return dt[val].length;
}
static U32 HUFv06_decodeLastSymbolX4(void* op, BITv06_DStream_t* DStream, const HUFv06_DEltX4* dt, const U32 dtLog)
{
const size_t val = BITv06_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 1);
if (dt[val].length==1) BITv06_skipBits(DStream, dt[val].nbBits);
else {
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
BITv06_skipBits(DStream, dt[val].nbBits);
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
} }
return 1;
}
#define HUFv06_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUFv06_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUFv06_MAX_TABLELOG<=12)) \
ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUFv06_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
ptr += HUFv06_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
static inline size_t HUFv06_decodeStreamX4(BYTE* p, BITv06_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv06_DEltX4* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 8 symbols at a time */
while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p < pEnd-7)) {
HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr);
HUFv06_DECODE_SYMBOLX4_1(p, bitDPtr);
HUFv06_DECODE_SYMBOLX4_2(p, bitDPtr);
HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);
}
/* closer to the end */
while ((BITv06_reloadDStream(bitDPtr) == BITv06_DStream_unfinished) && (p <= pEnd-2))
HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr);
while (p <= pEnd-2)
HUFv06_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
if (p < pEnd)
p += HUFv06_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
return p-pStart;
}
size_t HUFv06_decompress1X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U32* DTable)
{
const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const U32 dtLog = DTable[0];
const void* const dtPtr = DTable;
const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1;
/* Init */
BITv06_DStream_t bitD;
{ size_t const errorCode = BITv06_initDStream(&bitD, istart, cSrcSize);
if (HUFv06_isError(errorCode)) return errorCode; }
/* decode */
HUFv06_decodeStreamX4(ostart, &bitD, oend, dt, dtLog);
/* check */
if (!BITv06_endOfDStream(&bitD)) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
size_t HUFv06_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize);
if (HUFv06_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize;
cSrcSize -= hSize;
return HUFv06_decompress1X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
size_t HUFv06_decompress4X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const U32* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable;
const HUFv06_DEltX4* const dt = ((const HUFv06_DEltX4*)dtPtr) +1;
const U32 dtLog = DTable[0];
size_t errorCode;
/* Init */
BITv06_DStream_t bitD1;
BITv06_DStream_t bitD2;
BITv06_DStream_t bitD3;
BITv06_DStream_t bitD4;
const size_t length1 = MEM_readLE16(istart);
const size_t length2 = MEM_readLE16(istart+2);
const size_t length3 = MEM_readLE16(istart+4);
size_t length4;
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
length4 = cSrcSize - (length1 + length2 + length3 + 6);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
errorCode = BITv06_initDStream(&bitD1, istart1, length1);
if (HUFv06_isError(errorCode)) return errorCode;
errorCode = BITv06_initDStream(&bitD2, istart2, length2);
if (HUFv06_isError(errorCode)) return errorCode;
errorCode = BITv06_initDStream(&bitD3, istart3, length3);
if (HUFv06_isError(errorCode)) return errorCode;
errorCode = BITv06_initDStream(&bitD4, istart4, length4);
if (HUFv06_isError(errorCode)) return errorCode;
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
for ( ; (endSignal==BITv06_DStream_unfinished) && (op4<(oend-7)) ; ) {
HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1);
HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2);
HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3);
HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4);
HUFv06_DECODE_SYMBOLX4_1(op1, &bitD1);
HUFv06_DECODE_SYMBOLX4_1(op2, &bitD2);
HUFv06_DECODE_SYMBOLX4_1(op3, &bitD3);
HUFv06_DECODE_SYMBOLX4_1(op4, &bitD4);
HUFv06_DECODE_SYMBOLX4_2(op1, &bitD1);
HUFv06_DECODE_SYMBOLX4_2(op2, &bitD2);
HUFv06_DECODE_SYMBOLX4_2(op3, &bitD3);
HUFv06_DECODE_SYMBOLX4_2(op4, &bitD4);
HUFv06_DECODE_SYMBOLX4_0(op1, &bitD1);
HUFv06_DECODE_SYMBOLX4_0(op2, &bitD2);
HUFv06_DECODE_SYMBOLX4_0(op3, &bitD3);
HUFv06_DECODE_SYMBOLX4_0(op4, &bitD4);
endSignal = BITv06_reloadDStream(&bitD1) | BITv06_reloadDStream(&bitD2) | BITv06_reloadDStream(&bitD3) | BITv06_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUFv06_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
HUFv06_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
HUFv06_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
HUFv06_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BITv06_endOfDStream(&bitD1) & BITv06_endOfDStream(&bitD2) & BITv06_endOfDStream(&bitD3) & BITv06_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
size_t HUFv06_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv06_CREATE_STATIC_DTABLEX4(DTable, HUFv06_MAX_TABLELOG);
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUFv06_readDTableX4 (DTable, cSrc, cSrcSize);
if (HUFv06_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize;
cSrcSize -= hSize;
return HUFv06_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
}
/* ********************************/
/* Generic decompression selector */
/* ********************************/
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
{
/* single, double, quad */
{{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
{{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
{{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
{{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
{{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
{{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
{{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
{{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
{{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
{{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
{{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
{{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
{{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
{{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
{{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
{{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
};
typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
size_t HUFv06_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
static const decompressionAlgo decompress[3] = { HUFv06_decompress4X2, HUFv06_decompress4X4, NULL };
U32 Dtime[3]; /* decompression time estimation */
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
/* decoder timing evaluation */
{ U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
U32 const D256 = (U32)(dstSize >> 8);
U32 n; for (n=0; n<3; n++)
Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
}
Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
{ U32 algoNb = 0;
if (Dtime[1] < Dtime[0]) algoNb = 1;
/* if (Dtime[2] < Dtime[algoNb]) algoNb = 2; */ /* current speed of HUFv06_decompress4X6 is not good */
return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
}
/* return HUFv06_decompress4X2(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams single-symbol decoding */
/* return HUFv06_decompress4X4(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams double-symbols decoding */
/* return HUFv06_decompress4X6(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams quad-symbols decoding */
}
/*
Common functions of Zstd compression library
Copyright (C) 2015-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd homepage : http://www.zstd.net/
*/
/*-****************************************
* Version
******************************************/
/*-****************************************
* ZSTD Error Management
******************************************/
/*! ZSTDv06_isError() :
* tells if a return value is an error code */
unsigned ZSTDv06_isError(size_t code) { return ERR_isError(code); }
/*! ZSTDv06_getErrorName() :
* provides error code string from function result (useful for debugging) */
const char* ZSTDv06_getErrorName(size_t code) { return ERR_getErrorName(code); }
/* **************************************************************
* ZBUFF Error Management
****************************************************************/
unsigned ZBUFFv06_isError(size_t errorCode) { return ERR_isError(errorCode); }
const char* ZBUFFv06_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
/*
zstd - standard compression library
Copyright (C) 2014-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd homepage : http://www.zstd.net
*/
/* ***************************************************************
* Tuning parameters
*****************************************************************/
/*!
* HEAPMODE :
* Select how default decompression function ZSTDv06_decompress() will allocate memory,
* in memory stack (0), or in memory heap (1, requires malloc())
*/
#ifndef ZSTDv06_HEAPMODE
# define ZSTDv06_HEAPMODE 1
#endif
/*-*******************************************************
* Compiler specifics
*********************************************************/
#ifdef _MSC_VER /* Visual Studio */
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
#endif
/*-*************************************
* Macros
***************************************/
#define ZSTDv06_isError ERR_isError /* for inlining */
#define FSEv06_isError ERR_isError
#define HUFv06_isError ERR_isError
/*_*******************************************************
* Memory operations
**********************************************************/
static void ZSTDv06_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
/*-*************************************************************
* Context management
***************************************************************/
typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock } ZSTDv06_dStage;
struct ZSTDv06_DCtx_s
{
FSEv06_DTable LLTable[FSEv06_DTABLE_SIZE_U32(LLFSELog)];
FSEv06_DTable OffTable[FSEv06_DTABLE_SIZE_U32(OffFSELog)];
FSEv06_DTable MLTable[FSEv06_DTABLE_SIZE_U32(MLFSELog)];
unsigned hufTableX4[HUFv06_DTABLE_SIZE(HufLog)];
const void* previousDstEnd;
const void* base;
const void* vBase;
const void* dictEnd;
size_t expected;
size_t headerSize;
ZSTDv06_frameParams fParams;
blockType_t bType; /* used in ZSTDv06_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
ZSTDv06_dStage stage;
U32 flagRepeatTable;
const BYTE* litPtr;
size_t litSize;
BYTE litBuffer[ZSTDv06_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];
}; /* typedef'd to ZSTDv06_DCtx within "zstd_static.h" */
size_t ZSTDv06_sizeofDCtx (void); /* Hidden declaration */
size_t ZSTDv06_sizeofDCtx (void) { return sizeof(ZSTDv06_DCtx); }
size_t ZSTDv06_decompressBegin(ZSTDv06_DCtx* dctx)
{
dctx->expected = ZSTDv06_frameHeaderSize_min;
dctx->stage = ZSTDds_getFrameHeaderSize;
dctx->previousDstEnd = NULL;
dctx->base = NULL;
dctx->vBase = NULL;
dctx->dictEnd = NULL;
dctx->hufTableX4[0] = HufLog;
dctx->flagRepeatTable = 0;
return 0;
}
ZSTDv06_DCtx* ZSTDv06_createDCtx(void)
{
ZSTDv06_DCtx* dctx = (ZSTDv06_DCtx*)malloc(sizeof(ZSTDv06_DCtx));
if (dctx==NULL) return NULL;
ZSTDv06_decompressBegin(dctx);
return dctx;
}
size_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx)
{
free(dctx);
return 0; /* reserved as a potential error code in the future */
}
void ZSTDv06_copyDCtx(ZSTDv06_DCtx* dstDCtx, const ZSTDv06_DCtx* srcDCtx)
{
memcpy(dstDCtx, srcDCtx,
sizeof(ZSTDv06_DCtx) - (ZSTDv06_BLOCKSIZE_MAX+WILDCOPY_OVERLENGTH + ZSTDv06_frameHeaderSize_max)); /* no need to copy workspace */
}
/*-*************************************************************
* Decompression section
***************************************************************/
/* Frame format description
Frame Header - [ Block Header - Block ] - Frame End
1) Frame Header
- 4 bytes - Magic Number : ZSTDv06_MAGICNUMBER (defined within zstd_static.h)
- 1 byte - Frame Descriptor
2) Block Header
- 3 bytes, starting with a 2-bits descriptor
Uncompressed, Compressed, Frame End, unused
3) Block
See Block Format Description
4) Frame End
- 3 bytes, compatible with Block Header
*/
/* Frame descriptor
1 byte, using :
bit 0-3 : windowLog - ZSTDv06_WINDOWLOG_ABSOLUTEMIN (see zstd_internal.h)
bit 4 : minmatch 4(0) or 3(1)
bit 5 : reserved (must be zero)
bit 6-7 : Frame content size : unknown, 1 byte, 2 bytes, 8 bytes
Optional : content size (0, 1, 2 or 8 bytes)
0 : unknown
1 : 0-255 bytes
2 : 256 - 65535+256
8 : up to 16 exa
*/
/* Compressed Block, format description
Block = Literal Section - Sequences Section
Prerequisite : size of (compressed) block, maximum size of regenerated data
1) Literal Section
1.1) Header : 1-5 bytes
flags: 2 bits
00 compressed by Huff0
01 unused
10 is Raw (uncompressed)
11 is Rle
Note : using 01 => Huff0 with precomputed table ?
Note : delta map ? => compressed ?
1.1.1) Huff0-compressed literal block : 3-5 bytes
srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
srcSize < 1 KB => 3 bytes (2-2-10-10)
srcSize < 16KB => 4 bytes (2-2-14-14)
else => 5 bytes (2-2-18-18)
big endian convention
1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
size : 5 bits: (IS_RAW<<6) + (0<<4) + size
12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
size&255
20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
size>>8&255
size&255
1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
size : 5 bits: (IS_RLE<<6) + (0<<4) + size
12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
size&255
20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
size>>8&255
size&255
1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
srcSize < 1 KB => 3 bytes (2-2-10-10)
srcSize < 16KB => 4 bytes (2-2-14-14)
else => 5 bytes (2-2-18-18)
big endian convention
1- CTable available (stored into workspace ?)
2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
1.2) Literal block content
1.2.1) Huff0 block, using sizes from header
See Huff0 format
1.2.2) Huff0 block, using prepared table
1.2.3) Raw content
1.2.4) single byte
2) Sequences section
TO DO
*/
/** ZSTDv06_frameHeaderSize() :
* srcSize must be >= ZSTDv06_frameHeaderSize_min.
* @return : size of the Frame Header */
static size_t ZSTDv06_frameHeaderSize(const void* src, size_t srcSize)
{
if (srcSize < ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong);
{ U32 const fcsId = (((const BYTE*)src)[4]) >> 6;
return ZSTDv06_frameHeaderSize_min + ZSTDv06_fcs_fieldSize[fcsId]; }
}
/** ZSTDv06_getFrameParams() :
* decode Frame Header, or provide expected `srcSize`.
* @return : 0, `fparamsPtr` is correctly filled,
* >0, `srcSize` is too small, result is expected `srcSize`,
* or an error code, which can be tested using ZSTDv06_isError() */
size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
if (srcSize < ZSTDv06_frameHeaderSize_min) return ZSTDv06_frameHeaderSize_min;
if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) return ERROR(prefix_unknown);
/* ensure there is enough `srcSize` to fully read/decode frame header */
{ size_t const fhsize = ZSTDv06_frameHeaderSize(src, srcSize);
if (srcSize < fhsize) return fhsize; }
memset(fparamsPtr, 0, sizeof(*fparamsPtr));
{ BYTE const frameDesc = ip[4];
fparamsPtr->windowLog = (frameDesc & 0xF) + ZSTDv06_WINDOWLOG_ABSOLUTEMIN;
if ((frameDesc & 0x20) != 0) return ERROR(frameParameter_unsupported); /* reserved 1 bit */
switch(frameDesc >> 6) /* fcsId */
{
default: /* impossible */
case 0 : fparamsPtr->frameContentSize = 0; break;
case 1 : fparamsPtr->frameContentSize = ip[5]; break;
case 2 : fparamsPtr->frameContentSize = MEM_readLE16(ip+5)+256; break;
case 3 : fparamsPtr->frameContentSize = MEM_readLE64(ip+5); break;
} }
return 0;
}
/** ZSTDv06_decodeFrameHeader() :
* `srcSize` must be the size provided by ZSTDv06_frameHeaderSize().
* @return : 0 if success, or an error code, which can be tested using ZSTDv06_isError() */
static size_t ZSTDv06_decodeFrameHeader(ZSTDv06_DCtx* zc, const void* src, size_t srcSize)
{
size_t const result = ZSTDv06_getFrameParams(&(zc->fParams), src, srcSize);
if ((MEM_32bits()) && (zc->fParams.windowLog > 25)) return ERROR(frameParameter_unsupported);
return result;
}
typedef struct
{
blockType_t blockType;
U32 origSize;
} blockProperties_t;
/*! ZSTDv06_getcBlockSize() :
* Provides the size of compressed block from block header `src` */
static size_t ZSTDv06_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
{
U32 cSize;
if (srcSize < ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
bpPtr->blockType = (blockType_t)((*in) >> 6);
cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
if (bpPtr->blockType == bt_end) return 0;
if (bpPtr->blockType == bt_rle) return 1;
return cSize;
}
static size_t ZSTDv06_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
if (dst==NULL) return ERROR(dstSize_tooSmall);
if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
memcpy(dst, src, srcSize);
return srcSize;
}
/*! ZSTDv06_decodeLiteralsBlock() :
@return : nb of bytes read from src (< srcSize ) */
static size_t ZSTDv06_decodeLiteralsBlock(ZSTDv06_DCtx* dctx,
const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
{
const BYTE* const istart = (const BYTE*) src;
/* any compressed block with literals segment must be at least this size */
if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
switch(istart[0]>> 6)
{
case IS_HUF:
{ size_t litSize, litCSize, singleStream=0;
U32 lhSize = ((istart[0]) >> 4) & 3;
if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */
switch(lhSize)
{
case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
/* 2 - 2 - 10 - 10 */
lhSize=3;
singleStream = istart[0] & 16;
litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
litCSize = ((istart[1] & 3) << 8) + istart[2];
break;
case 2:
/* 2 - 2 - 14 - 14 */
lhSize=4;
litSize = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
litCSize = ((istart[2] & 63) << 8) + istart[3];
break;
case 3:
/* 2 - 2 - 18 - 18 */
lhSize=5;
litSize = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
litCSize = ((istart[2] & 3) << 16) + (istart[3] << 8) + istart[4];
break;
}
if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected);
if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
if (HUFv06_isError(singleStream ?
HUFv06_decompress1X2(dctx->litBuffer, litSize, istart+lhSize, litCSize) :
HUFv06_decompress (dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
return ERROR(corruption_detected);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return litCSize + lhSize;
}
case IS_PCH:
{ size_t litSize, litCSize;
U32 lhSize = ((istart[0]) >> 4) & 3;
if (lhSize != 1) /* only case supported for now : small litSize, single stream */
return ERROR(corruption_detected);
if (!dctx->flagRepeatTable)
return ERROR(dictionary_corrupted);
/* 2 - 2 - 10 - 10 */
lhSize=3;
litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
litCSize = ((istart[1] & 3) << 8) + istart[2];
if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
{ size_t const errorCode = HUFv06_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTableX4);
if (HUFv06_isError(errorCode)) return ERROR(corruption_detected);
}
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return litCSize + lhSize;
}
case IS_RAW:
{ size_t litSize;
U32 lhSize = ((istart[0]) >> 4) & 3;
switch(lhSize)
{
case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
lhSize=1;
litSize = istart[0] & 31;
break;
case 2:
litSize = ((istart[0] & 15) << 8) + istart[1];
break;
case 3:
litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
break;
}
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart+lhSize, litSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return lhSize+litSize;
}
/* direct reference into compressed stream */
dctx->litPtr = istart+lhSize;
dctx->litSize = litSize;
return lhSize+litSize;
}
case IS_RLE:
{ size_t litSize;
U32 lhSize = ((istart[0]) >> 4) & 3;
switch(lhSize)
{
case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
lhSize = 1;
litSize = istart[0] & 31;
break;
case 2:
litSize = ((istart[0] & 15) << 8) + istart[1];
break;
case 3:
litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
break;
}
if (litSize > ZSTDv06_BLOCKSIZE_MAX) return ERROR(corruption_detected);
memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return lhSize+1;
}
default:
return ERROR(corruption_detected); /* impossible */
}
}
/*! ZSTDv06_buildSeqTable() :
@return : nb bytes read from src,
or an error code if it fails, testable with ZSTDv06_isError()
*/
static size_t ZSTDv06_buildSeqTable(FSEv06_DTable* DTable, U32 type, U32 max, U32 maxLog,
const void* src, size_t srcSize,
const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)
{
switch(type)
{
case FSEv06_ENCODING_RLE :
if (!srcSize) return ERROR(srcSize_wrong);
if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
FSEv06_buildDTable_rle(DTable, *(const BYTE*)src); /* if *src > max, data is corrupted */
return 1;
case FSEv06_ENCODING_RAW :
FSEv06_buildDTable(DTable, defaultNorm, max, defaultLog);
return 0;
case FSEv06_ENCODING_STATIC:
if (!flagRepeatTable) return ERROR(corruption_detected);
return 0;
default : /* impossible */
case FSEv06_ENCODING_DYNAMIC :
{ U32 tableLog;
S16 norm[MaxSeq+1];
size_t const headerSize = FSEv06_readNCount(norm, &max, &tableLog, src, srcSize);
if (FSEv06_isError(headerSize)) return ERROR(corruption_detected);
if (tableLog > maxLog) return ERROR(corruption_detected);
FSEv06_buildDTable(DTable, norm, max, tableLog);
return headerSize;
} }
}
static size_t ZSTDv06_decodeSeqHeaders(int* nbSeqPtr,
FSEv06_DTable* DTableLL, FSEv06_DTable* DTableML, FSEv06_DTable* DTableOffb, U32 flagRepeatTable,
const void* src, size_t srcSize)
{
const BYTE* const iend = istart + srcSize;
const BYTE* ip = istart;
/* check */
if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
/* SeqHead */
{ int nbSeq = *ip++;
if (!nbSeq) { *nbSeqPtr=0; return 1; }
if (nbSeq > 0x7F) {
if (nbSeq == 0xFF) {
if (ip+2 > iend) return ERROR(srcSize_wrong);
nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
} else {
if (ip >= iend) return ERROR(srcSize_wrong);
nbSeq = ((nbSeq-0x80)<<8) + *ip++;
}
}
*nbSeqPtr = nbSeq;
}
/* FSE table descriptors */
if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are "raw", hence no header, but at least xxLog bits per type */
{ U32 const LLtype = *ip >> 6;
U32 const Offtype = (*ip >> 4) & 3;
U32 const MLtype = (*ip >> 2) & 3;
ip++;
/* Build DTables */
{ size_t const bhSize = ZSTDv06_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
ip += bhSize;
}
{ size_t const bhSize = ZSTDv06_buildSeqTable(DTableOffb, Offtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable);
if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
ip += bhSize;
}
{ size_t const bhSize = ZSTDv06_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable);
if (ZSTDv06_isError(bhSize)) return ERROR(corruption_detected);
ip += bhSize;
} }
return ip-istart;
}
typedef struct {
size_t litLength;
size_t matchLength;
size_t offset;
} seq_t;
typedef struct {
BITv06_DStream_t DStream;
FSEv06_DState_t stateLL;
FSEv06_DState_t stateOffb;
FSEv06_DState_t stateML;
size_t prevOffset[ZSTDv06_REP_INIT];
} seqState_t;
static void ZSTDv06_decodeSequence(seq_t* seq, seqState_t* seqState)
{
/* Literal length */
U32 const llCode = FSEv06_peekSymbol(&(seqState->stateLL));
U32 const mlCode = FSEv06_peekSymbol(&(seqState->stateML));
U32 const ofCode = FSEv06_peekSymbol(&(seqState->stateOffb)); /* <= maxOff, by table construction */
U32 const llBits = LL_bits[llCode];
U32 const mlBits = ML_bits[mlCode];
U32 const ofBits = ofCode;
U32 const totalBits = llBits+mlBits+ofBits;
static const U32 LL_base[MaxLL+1] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
0x2000, 0x4000, 0x8000, 0x10000 };
static const U32 ML_base[MaxML+1] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 34, 36, 38, 40, 44, 48, 56, 64, 80, 96, 0x80, 0x100, 0x200, 0x400, 0x800,
0x1000, 0x2000, 0x4000, 0x8000, 0x10000 };
static const U32 OF_base[MaxOff+1] = {
0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F,
0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF,
0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, /*fake*/ 1, 1 };
/* sequence */
{ size_t offset;
if (!ofCode)
offset = 0;
else {
offset = OF_base[ofCode] + BITv06_readBits(&(seqState->DStream), ofBits); /* <= 26 bits */
if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream));
}
if (offset < ZSTDv06_REP_NUM) {
if (llCode == 0 && offset <= 1) offset = 1-offset;
if (offset != 0) {
size_t temp = seqState->prevOffset[offset];
if (offset != 1) {
seqState->prevOffset[2] = seqState->prevOffset[1];
}
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset = temp;
} else {
offset = seqState->prevOffset[0];
}
} else {
offset -= ZSTDv06_REP_MOVE;
seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset;
}
seq->offset = offset;
}
seq->matchLength = ML_base[mlCode] + MINMATCH + ((mlCode>31) ? BITv06_readBits(&(seqState->DStream), mlBits) : 0); /* <= 16 bits */
if (MEM_32bits() && (mlBits+llBits>24)) BITv06_reloadDStream(&(seqState->DStream));
seq->litLength = LL_base[llCode] + ((llCode>15) ? BITv06_readBits(&(seqState->DStream), llBits) : 0); /* <= 16 bits */
if (MEM_32bits() ||
(totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv06_reloadDStream(&(seqState->DStream));
/* ANS state update */
FSEv06_updateState(&(seqState->stateLL), &(seqState->DStream)); /* <= 9 bits */
FSEv06_updateState(&(seqState->stateML), &(seqState->DStream)); /* <= 9 bits */
if (MEM_32bits()) BITv06_reloadDStream(&(seqState->DStream)); /* <= 18 bits */
FSEv06_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <= 8 bits */
}
static size_t ZSTDv06_execSequence(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
BYTE* const oend_8 = oend-8;
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
/* check */
if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
/* copy Literals */
ZSTDv06_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
op = oLitEnd;
*litPtr = iLitEnd; /* update for next sequence */
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - base)) {
/* offset beyond prefix */
if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
match = dictEnd - (base-match);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = base;
if (op > oend_8 || sequence.matchLength < MINMATCH) {
while (op < oMatchEnd) *op++ = *match++;
return sequenceLength;
}
} }
/* Requirement: op <= oend_8 */
/* match within prefix */
if (sequence.offset < 8) {
/* close range match, overlap */
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
int const sub2 = dec64table[sequence.offset];
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
match += dec32table[sequence.offset];
ZSTDv06_copy4(op+4, match);
match -= sub2;
} else {
ZSTDv06_copy8(op, match);
}
op += 8; match += 8;
if (oMatchEnd > oend-(16-MINMATCH)) {
if (op < oend_8) {
ZSTDv06_wildcopy(op, match, oend_8 - op);
match += oend_8 - op;
op = oend_8;
}
while (op < oMatchEnd) *op++ = *match++;
} else {
ZSTDv06_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
}
return sequenceLength;
}
static size_t ZSTDv06_decompressSequences(
ZSTDv06_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const oend = ostart + maxDstSize;
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize;
FSEv06_DTable* DTableLL = dctx->LLTable;
FSEv06_DTable* DTableML = dctx->MLTable;
FSEv06_DTable* DTableOffb = dctx->OffTable;
const BYTE* const base = (const BYTE*) (dctx->base);
const BYTE* const vBase = (const BYTE*) (dctx->vBase);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
int nbSeq;
/* Build Decoding Tables */
{ size_t const seqHSize = ZSTDv06_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->flagRepeatTable, ip, seqSize);
if (ZSTDv06_isError(seqHSize)) return seqHSize;
ip += seqHSize;
dctx->flagRepeatTable = 0;
}
/* Regen sequences */
if (nbSeq) {
seq_t sequence;
seqState_t seqState;
memset(&sequence, 0, sizeof(sequence));
sequence.offset = REPCODE_STARTVALUE;
{ U32 i; for (i=0; i<ZSTDv06_REP_INIT; i++) seqState.prevOffset[i] = REPCODE_STARTVALUE; }
{ size_t const errorCode = BITv06_initDStream(&(seqState.DStream), ip, iend-ip);
if (ERR_isError(errorCode)) return ERROR(corruption_detected); }
FSEv06_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
FSEv06_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
FSEv06_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
for ( ; (BITv06_reloadDStream(&(seqState.DStream)) <= BITv06_DStream_completed) && nbSeq ; ) {
nbSeq--;
ZSTDv06_decodeSequence(&sequence, &seqState);
#if 0 /* debug */
static BYTE* start = NULL;
if (start==NULL) start = op;
size_t pos = (size_t)(op-start);
if ((pos >= 5810037) && (pos < 5810400))
printf("Dpos %6u :%5u literals & match %3u bytes at distance %6u \n",
pos, (U32)sequence.litLength, (U32)sequence.matchLength, (U32)sequence.offset);
#endif
{ size_t const oneSeqSize = ZSTDv06_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
if (ZSTDv06_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
} }
/* check if reached exact end */
if (nbSeq) return ERROR(corruption_detected);
}
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr;
if (litPtr > litEnd) return ERROR(corruption_detected); /* too many literals already used */
if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
if (lastLLSize > 0) {
memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
return op-ostart;
}
static void ZSTDv06_checkContinuity(ZSTDv06_DCtx* dctx, const void* dst)
{
if (dst != dctx->previousDstEnd) { /* not contiguous */
dctx->dictEnd = dctx->previousDstEnd;
dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
dctx->base = dst;
dctx->previousDstEnd = dst;
}
}
static size_t ZSTDv06_decompressBlock_internal(ZSTDv06_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{ /* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
if (srcSize >= ZSTDv06_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
/* Decode literals sub-block */
{ size_t const litCSize = ZSTDv06_decodeLiteralsBlock(dctx, src, srcSize);
if (ZSTDv06_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
}
return ZSTDv06_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
}
size_t ZSTDv06_decompressBlock(ZSTDv06_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
ZSTDv06_checkContinuity(dctx, dst);
return ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
}
/*! ZSTDv06_decompressFrame() :
* `dctx` must be properly initialized */
static size_t ZSTDv06_decompressFrame(ZSTDv06_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const BYTE* const iend = ip + srcSize;
BYTE* op = ostart;
BYTE* const oend = ostart + dstCapacity;
size_t remainingSize = srcSize;
blockProperties_t blockProperties = { bt_compressed, 0 };
/* check */
if (srcSize < ZSTDv06_frameHeaderSize_min+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
/* Frame Header */
{ size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
if (ZSTDv06_isError(frameHeaderSize)) return frameHeaderSize;
if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) return ERROR(srcSize_wrong);
if (ZSTDv06_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected);
ip += frameHeaderSize; remainingSize -= frameHeaderSize;
}
/* Loop on each block */
while (1) {
size_t decodedSize=0;
size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, iend-ip, &blockProperties);
if (ZSTDv06_isError(cBlockSize)) return cBlockSize;
ip += ZSTDv06_blockHeaderSize;
remainingSize -= ZSTDv06_blockHeaderSize;
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
switch(blockProperties.blockType)
{
case bt_compressed:
decodedSize = ZSTDv06_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
break;
case bt_raw :
decodedSize = ZSTDv06_copyRawBlock(op, oend-op, ip, cBlockSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet supported */
break;
case bt_end :
/* end of frame */
if (remainingSize) return ERROR(srcSize_wrong);
break;
default:
return ERROR(GENERIC); /* impossible */
}
if (cBlockSize == 0) break; /* bt_end */
if (ZSTDv06_isError(decodedSize)) return decodedSize;
op += decodedSize;
ip += cBlockSize;
remainingSize -= cBlockSize;
}
return op-ostart;
}
size_t ZSTDv06_decompress_usingPreparedDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* refDCtx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
ZSTDv06_copyDCtx(dctx, refDCtx);
ZSTDv06_checkContinuity(dctx, dst);
return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
}
size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict, size_t dictSize)
{
ZSTDv06_decompressBegin_usingDict(dctx, dict, dictSize);
ZSTDv06_checkContinuity(dctx, dst);
return ZSTDv06_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
}
size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
return ZSTDv06_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
}
size_t ZSTDv06_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
#if defined(ZSTDv06_HEAPMODE) && (ZSTDv06_HEAPMODE==1)
size_t regenSize;
ZSTDv06_DCtx* dctx = ZSTDv06_createDCtx();
if (dctx==NULL) return ERROR(memory_allocation);
regenSize = ZSTDv06_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
ZSTDv06_freeDCtx(dctx);
return regenSize;
#else /* stack mode */
ZSTDv06_DCtx dctx;
return ZSTDv06_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
#endif
}
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
const BYTE* ip = (const BYTE*)src;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
blockProperties_t blockProperties = { bt_compressed, 0 };
/* Frame Header */
{ size_t const frameHeaderSize = ZSTDv06_frameHeaderSize(src, srcSize);
if (ZSTDv06_isError(frameHeaderSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
return;
}
if (MEM_readLE32(src) != ZSTDv06_MAGICNUMBER) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
if (srcSize < frameHeaderSize+ZSTDv06_blockHeaderSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
ip += frameHeaderSize; remainingSize -= frameHeaderSize;
}
/* Loop on each block */
while (1) {
size_t const cBlockSize = ZSTDv06_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTDv06_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
ip += ZSTDv06_blockHeaderSize;
remainingSize -= ZSTDv06_blockHeaderSize;
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
if (cBlockSize == 0) break; /* bt_end */
ip += cBlockSize;
remainingSize -= cBlockSize;
nbBlocks++;
}
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * ZSTDv06_BLOCKSIZE_MAX;
}
/*_******************************
* Streaming Decompression API
********************************/
size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx)
{
return dctx->expected;
}
size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
/* Sanity check */
if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
if (dstCapacity) ZSTDv06_checkContinuity(dctx, dst);
/* Decompress : frame header; part 1 */
switch (dctx->stage)
{
case ZSTDds_getFrameHeaderSize :
if (srcSize != ZSTDv06_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */
dctx->headerSize = ZSTDv06_frameHeaderSize(src, ZSTDv06_frameHeaderSize_min);
if (ZSTDv06_isError(dctx->headerSize)) return dctx->headerSize;
memcpy(dctx->headerBuffer, src, ZSTDv06_frameHeaderSize_min);
if (dctx->headerSize > ZSTDv06_frameHeaderSize_min) {
dctx->expected = dctx->headerSize - ZSTDv06_frameHeaderSize_min;
dctx->stage = ZSTDds_decodeFrameHeader;
return 0;
}
dctx->expected = 0; /* not necessary to copy more */
/* fall-through */
case ZSTDds_decodeFrameHeader:
{ size_t result;
memcpy(dctx->headerBuffer + ZSTDv06_frameHeaderSize_min, src, dctx->expected);
result = ZSTDv06_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize);
if (ZSTDv06_isError(result)) return result;
dctx->expected = ZSTDv06_blockHeaderSize;
dctx->stage = ZSTDds_decodeBlockHeader;
return 0;
}
case ZSTDds_decodeBlockHeader:
{ blockProperties_t bp;
size_t const cBlockSize = ZSTDv06_getcBlockSize(src, ZSTDv06_blockHeaderSize, &bp);
if (ZSTDv06_isError(cBlockSize)) return cBlockSize;
if (bp.blockType == bt_end) {
dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
} else {
dctx->expected = cBlockSize;
dctx->bType = bp.blockType;
dctx->stage = ZSTDds_decompressBlock;
}
return 0;
}
case ZSTDds_decompressBlock:
{ size_t rSize;
switch(dctx->bType)
{
case bt_compressed:
rSize = ZSTDv06_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
break;
case bt_raw :
rSize = ZSTDv06_copyRawBlock(dst, dstCapacity, src, srcSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet handled */
break;
case bt_end : /* should never happen (filtered at phase 1) */
rSize = 0;
break;
default:
return ERROR(GENERIC); /* impossible */
}
dctx->stage = ZSTDds_decodeBlockHeader;
dctx->expected = ZSTDv06_blockHeaderSize;
dctx->previousDstEnd = (char*)dst + rSize;
return rSize;
}
default:
return ERROR(GENERIC); /* impossible */
}
}
static void ZSTDv06_refDictContent(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
{
dctx->dictEnd = dctx->previousDstEnd;
dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
dctx->base = dict;
dctx->previousDstEnd = (const char*)dict + dictSize;
}
static size_t ZSTDv06_loadEntropy(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
{
size_t hSize, offcodeHeaderSize, matchlengthHeaderSize, litlengthHeaderSize;
hSize = HUFv06_readDTableX4(dctx->hufTableX4, dict, dictSize);
if (HUFv06_isError(hSize)) return ERROR(dictionary_corrupted);
dict = (const char*)dict + hSize;
dictSize -= hSize;
{ short offcodeNCount[MaxOff+1];
U32 offcodeMaxValue=MaxOff, offcodeLog;
offcodeHeaderSize = FSEv06_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dict, dictSize);
if (FSEv06_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
{ size_t const errorCode = FSEv06_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
dict = (const char*)dict + offcodeHeaderSize;
dictSize -= offcodeHeaderSize;
}
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
matchlengthHeaderSize = FSEv06_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dict, dictSize);
if (FSEv06_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
{ size_t const errorCode = FSEv06_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
dict = (const char*)dict + matchlengthHeaderSize;
dictSize -= matchlengthHeaderSize;
}
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
litlengthHeaderSize = FSEv06_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dict, dictSize);
if (FSEv06_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
{ size_t const errorCode = FSEv06_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
if (FSEv06_isError(errorCode)) return ERROR(dictionary_corrupted); }
}
dctx->flagRepeatTable = 1;
return hSize + offcodeHeaderSize + matchlengthHeaderSize + litlengthHeaderSize;
}
static size_t ZSTDv06_decompress_insertDictionary(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
{
size_t eSize;
U32 const magic = MEM_readLE32(dict);
if (magic != ZSTDv06_DICT_MAGIC) {
/* pure content mode */
ZSTDv06_refDictContent(dctx, dict, dictSize);
return 0;
}
/* load entropy tables */
dict = (const char*)dict + 4;
dictSize -= 4;
eSize = ZSTDv06_loadEntropy(dctx, dict, dictSize);
if (ZSTDv06_isError(eSize)) return ERROR(dictionary_corrupted);
/* reference dictionary content */
dict = (const char*)dict + eSize;
dictSize -= eSize;
ZSTDv06_refDictContent(dctx, dict, dictSize);
return 0;
}
size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize)
{
{ size_t const errorCode = ZSTDv06_decompressBegin(dctx);
if (ZSTDv06_isError(errorCode)) return errorCode; }
if (dict && dictSize) {
size_t const errorCode = ZSTDv06_decompress_insertDictionary(dctx, dict, dictSize);
if (ZSTDv06_isError(errorCode)) return ERROR(dictionary_corrupted);
}
return 0;
}
/*
Buffered version of Zstd compression library
Copyright (C) 2015-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd homepage : http://www.zstd.net/
*/
/*-***************************************************************************
* Streaming decompression howto
*
* A ZBUFFv06_DCtx object is required to track streaming operations.
* Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.
* Use ZBUFFv06_decompressInit() to start a new decompression operation,
* or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.
* Note that ZBUFFv06_DCtx objects can be re-init multiple times.
*
* Use ZBUFFv06_decompressContinue() repetitively to consume your input.
* *srcSizePtr and *dstCapacityPtr can be any size.
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
* The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst.
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
* or 0 when a frame is completely decoded,
* or an error code, which can be tested using ZBUFFv06_isError().
*
* Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()
* output : ZBUFFv06_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
* input : ZBUFFv06_recommendedDInSize == 128KB + 3;
* just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
* *******************************************************************************/
typedef enum { ZBUFFds_init, ZBUFFds_loadHeader,
ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv06_dStage;
/* *** Resource management *** */
struct ZBUFFv06_DCtx_s {
ZSTDv06_DCtx* zd;
ZSTDv06_frameParams fParams;
ZBUFFv06_dStage stage;
char* inBuff;
size_t inBuffSize;
size_t inPos;
char* outBuff;
size_t outBuffSize;
size_t outStart;
size_t outEnd;
size_t blockSize;
BYTE headerBuffer[ZSTDv06_FRAMEHEADERSIZE_MAX];
size_t lhSize;
}; /* typedef'd to ZBUFFv06_DCtx within "zstd_buffered.h" */
ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void)
{
ZBUFFv06_DCtx* zbd = (ZBUFFv06_DCtx*)malloc(sizeof(ZBUFFv06_DCtx));
if (zbd==NULL) return NULL;
memset(zbd, 0, sizeof(*zbd));
zbd->zd = ZSTDv06_createDCtx();
zbd->stage = ZBUFFds_init;
return zbd;
}
size_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* zbd)
{
if (zbd==NULL) return 0; /* support free on null */
ZSTDv06_freeDCtx(zbd->zd);
free(zbd->inBuff);
free(zbd->outBuff);
free(zbd);
return 0;
}
/* *** Initialization *** */
size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* zbd, const void* dict, size_t dictSize)
{
zbd->stage = ZBUFFds_loadHeader;
zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0;
return ZSTDv06_decompressBegin_usingDict(zbd->zd, dict, dictSize);
}
size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* zbd)
{
return ZBUFFv06_decompressInitDictionary(zbd, NULL, 0);
}
MEM_STATIC size_t ZBUFFv06_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
size_t length = MIN(dstCapacity, srcSize);
if (length > 0) {
memcpy(dst, src, length);
}
return length;
}
/* *** Decompression *** */
size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* zbd,
void* dst, size_t* dstCapacityPtr,
const void* src, size_t* srcSizePtr)
{
const char* const istart = (const char*)src;
const char* const iend = istart + *srcSizePtr;
const char* ip = istart;
char* const ostart = (char*)dst;
char* const oend = ostart + *dstCapacityPtr;
char* op = ostart;
U32 notDone = 1;
while (notDone) {
switch(zbd->stage)
{
case ZBUFFds_init :
return ERROR(init_missing);
case ZBUFFds_loadHeader :
{ size_t const hSize = ZSTDv06_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize);
if (hSize != 0) {
size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */
if (ZSTDv06_isError(hSize)) return hSize;
if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */
memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip);
zbd->lhSize += iend-ip;
*dstCapacityPtr = 0;
return (hSize - zbd->lhSize) + ZSTDv06_blockHeaderSize; /* remaining header bytes + next block header */
}
memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad;
break;
} }
/* Consume header */
{ size_t const h1Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd); /* == ZSTDv06_frameHeaderSize_min */
size_t const h1Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size);
if (ZSTDv06_isError(h1Result)) return h1Result;
if (h1Size < zbd->lhSize) { /* long header */
size_t const h2Size = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
size_t const h2Result = ZSTDv06_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size);
if (ZSTDv06_isError(h2Result)) return h2Result;
} }
/* Frame header instruct buffer sizes */
{ size_t const blockSize = MIN(1 << zbd->fParams.windowLog, ZSTDv06_BLOCKSIZE_MAX);
zbd->blockSize = blockSize;
if (zbd->inBuffSize < blockSize) {
free(zbd->inBuff);
zbd->inBuffSize = blockSize;
zbd->inBuff = (char*)malloc(blockSize);
if (zbd->inBuff == NULL) return ERROR(memory_allocation);
}
{ size_t const neededOutSize = ((size_t)1 << zbd->fParams.windowLog) + blockSize + WILDCOPY_OVERLENGTH * 2;
if (zbd->outBuffSize < neededOutSize) {
free(zbd->outBuff);
zbd->outBuffSize = neededOutSize;
zbd->outBuff = (char*)malloc(neededOutSize);
if (zbd->outBuff == NULL) return ERROR(memory_allocation);
} } }
zbd->stage = ZBUFFds_read;
/* fall-through */
case ZBUFFds_read:
{ size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
if (neededInSize==0) { /* end of frame */
zbd->stage = ZBUFFds_init;
notDone = 0;
break;
}
if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd,
zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
ip, neededInSize);
if (ZSTDv06_isError(decodedSize)) return decodedSize;
ip += neededInSize;
if (!decodedSize) break; /* this was just a header */
zbd->outEnd = zbd->outStart + decodedSize;
zbd->stage = ZBUFFds_flush;
break;
}
if (ip==iend) { notDone = 0; break; } /* no more input */
zbd->stage = ZBUFFds_load;
}
/* fall-through */
case ZBUFFds_load:
{ size_t const neededInSize = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
size_t const toLoad = neededInSize - zbd->inPos; /* should always be <= remaining space within inBuff */
size_t loadedSize;
if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected); /* should never happen */
loadedSize = ZBUFFv06_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip);
ip += loadedSize;
zbd->inPos += loadedSize;
if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */
/* decode loaded input */
{ size_t const decodedSize = ZSTDv06_decompressContinue(zbd->zd,
zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
zbd->inBuff, neededInSize);
if (ZSTDv06_isError(decodedSize)) return decodedSize;
zbd->inPos = 0; /* input is consumed */
if (!decodedSize) { zbd->stage = ZBUFFds_read; break; } /* this was just a header */
zbd->outEnd = zbd->outStart + decodedSize;
zbd->stage = ZBUFFds_flush;
/* break; */ /* ZBUFFds_flush follows */
}
}
/* fall-through */
case ZBUFFds_flush:
{ size_t const toFlushSize = zbd->outEnd - zbd->outStart;
size_t const flushedSize = ZBUFFv06_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);
op += flushedSize;
zbd->outStart += flushedSize;
if (flushedSize == toFlushSize) {
zbd->stage = ZBUFFds_read;
if (zbd->outStart + zbd->blockSize > zbd->outBuffSize)
zbd->outStart = zbd->outEnd = 0;
break;
}
/* cannot flush everything */
notDone = 0;
break;
}
default: return ERROR(GENERIC); /* impossible */
} }
/* result */
*srcSizePtr = ip-istart;
*dstCapacityPtr = op-ostart;
{ size_t nextSrcSizeHint = ZSTDv06_nextSrcSizeToDecompress(zbd->zd);
if (nextSrcSizeHint > ZSTDv06_blockHeaderSize) nextSrcSizeHint+= ZSTDv06_blockHeaderSize; /* get following block header too */
nextSrcSizeHint -= zbd->inPos; /* already loaded*/
return nextSrcSizeHint;
}
}
/* *************************************
* Tool functions
***************************************/
size_t ZBUFFv06_recommendedDInSize(void) { return ZSTDv06_BLOCKSIZE_MAX + ZSTDv06_blockHeaderSize /* block header size*/ ; }
size_t ZBUFFv06_recommendedDOutSize(void) { return ZSTDv06_BLOCKSIZE_MAX; }
BYTE* const ostart = (BYTE* const)dst;
BYTE* const ostart = (BYTE* const)dst;
const BYTE* const istart = (const BYTE* const)src;
const BYTE* const in = (const BYTE* const)src;
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTDv06_H
#define ZSTDv06_H
#if defined (__cplusplus)
extern "C" {
#endif
/*====== Dependency ======*/
#include <stddef.h> /* size_t */
/*====== Export for Windows ======*/
/*!
* ZSTDv06_DLL_EXPORT :
* Enable exporting of functions when building a Windows DLL
*/
#if defined(_WIN32) && defined(ZSTDv06_DLL_EXPORT) && (ZSTDv06_DLL_EXPORT==1)
# define ZSTDLIBv06_API __declspec(dllexport)
#else
# define ZSTDLIBv06_API
#endif
/* *************************************
* Simple functions
***************************************/
/*! ZSTDv06_decompress() :
`compressedSize` : is the _exact_ size of the compressed blob, otherwise decompression will fail.
`dstCapacity` must be large enough, equal or larger than originalSize.
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
or an errorCode if it fails (which can be tested using ZSTDv06_isError()) */
ZSTDLIBv06_API size_t ZSTDv06_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
/**
ZSTDv06_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.6.x format
srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
cSize (output parameter) : the number of bytes that would be read to decompress this frame
or an error code if it fails (which can be tested using ZSTDv01_isError())
dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
or ZSTD_CONTENTSIZE_ERROR if an error occurs
note : assumes `cSize` and `dBound` are _not_ NULL.
*/
void ZSTDv06_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
/* *************************************
* Helper functions
***************************************/
ZSTDLIBv06_API size_t ZSTDv06_compressBound(size_t srcSize); /*!< maximum compressed size (worst case scenario) */
/* Error Management */
ZSTDLIBv06_API unsigned ZSTDv06_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
ZSTDLIBv06_API const char* ZSTDv06_getErrorName(size_t code); /*!< provides readable string for an error code */
/* *************************************
* Explicit memory management
***************************************/
/** Decompression context */
typedef struct ZSTDv06_DCtx_s ZSTDv06_DCtx;
ZSTDLIBv06_API ZSTDv06_DCtx* ZSTDv06_createDCtx(void);
ZSTDLIBv06_API size_t ZSTDv06_freeDCtx(ZSTDv06_DCtx* dctx); /*!< @return : errorCode */
/** ZSTDv06_decompressDCtx() :
* Same as ZSTDv06_decompress(), but requires an already allocated ZSTDv06_DCtx (see ZSTDv06_createDCtx()) */
ZSTDLIBv06_API size_t ZSTDv06_decompressDCtx(ZSTDv06_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/*-***********************
* Dictionary API
*************************/
/*! ZSTDv06_decompress_usingDict() :
* Decompression using a pre-defined Dictionary content (see dictBuilder).
* Dictionary must be identical to the one used during compression, otherwise regenerated data will be corrupted.
* Note : dict can be NULL, in which case, it's equivalent to ZSTDv06_decompressDCtx() */
ZSTDLIBv06_API size_t ZSTDv06_decompress_usingDict(ZSTDv06_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize);
/*-************************
* Advanced Streaming API
***************************/
struct ZSTDv06_frameParams_s { unsigned long long frameContentSize; unsigned windowLog; };
typedef struct ZSTDv06_frameParams_s ZSTDv06_frameParams;
ZSTDLIBv06_API size_t ZSTDv06_getFrameParams(ZSTDv06_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */
ZSTDLIBv06_API size_t ZSTDv06_decompressBegin_usingDict(ZSTDv06_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIBv06_API void ZSTDv06_copyDCtx(ZSTDv06_DCtx* dctx, const ZSTDv06_DCtx* preparedDCtx);
ZSTDLIBv06_API size_t ZSTDv06_nextSrcSizeToDecompress(ZSTDv06_DCtx* dctx);
ZSTDLIBv06_API size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/* *************************************
* ZBUFF API
***************************************/
typedef struct ZBUFFv06_DCtx_s ZBUFFv06_DCtx;
ZSTDLIBv06_API ZBUFFv06_DCtx* ZBUFFv06_createDCtx(void);
ZSTDLIBv06_API size_t ZBUFFv06_freeDCtx(ZBUFFv06_DCtx* dctx);
ZSTDLIBv06_API size_t ZBUFFv06_decompressInit(ZBUFFv06_DCtx* dctx);
ZSTDLIBv06_API size_t ZBUFFv06_decompressInitDictionary(ZBUFFv06_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIBv06_API size_t ZBUFFv06_decompressContinue(ZBUFFv06_DCtx* dctx,
void* dst, size_t* dstCapacityPtr,
const void* src, size_t* srcSizePtr);
/*-***************************************************************************
* Streaming decompression howto
*
* A ZBUFFv06_DCtx object is required to track streaming operations.
* Use ZBUFFv06_createDCtx() and ZBUFFv06_freeDCtx() to create/release resources.
* Use ZBUFFv06_decompressInit() to start a new decompression operation,
* or ZBUFFv06_decompressInitDictionary() if decompression requires a dictionary.
* Note that ZBUFFv06_DCtx objects can be re-init multiple times.
*
* Use ZBUFFv06_decompressContinue() repetitively to consume your input.
* *srcSizePtr and *dstCapacityPtr can be any size.
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
* or 0 when a frame is completely decoded,
* or an error code, which can be tested using ZBUFFv06_isError().
*
* Hint : recommended buffer sizes (not compulsory) : ZBUFFv06_recommendedDInSize() and ZBUFFv06_recommendedDOutSize()
* output : ZBUFFv06_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
* input : ZBUFFv06_recommendedDInSize == 128KB + 3;
* just follow indications from ZBUFFv06_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
* *******************************************************************************/
/* *************************************
* Tool functions
***************************************/
ZSTDLIBv06_API unsigned ZBUFFv06_isError(size_t errorCode);
ZSTDLIBv06_API const char* ZBUFFv06_getErrorName(size_t errorCode);
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
* These sizes are just hints, they tend to offer better latency */
ZSTDLIBv06_API size_t ZBUFFv06_recommendedDInSize(void);
ZSTDLIBv06_API size_t ZBUFFv06_recommendedDOutSize(void);
/*-*************************************
* Constants
***************************************/
#define ZSTDv06_MAGICNUMBER 0xFD2FB526 /* v0.6 */
#if defined (__cplusplus)
}
#endif
#endif /* ZSTDv06_BUFFERED_H */
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
/*- Dependencies -*/
#include <stddef.h> /* size_t, ptrdiff_t */
#include <string.h> /* memcpy */
#include <stdlib.h> /* malloc, free, qsort */
#ifndef XXH_STATIC_LINKING_ONLY
# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
#endif
#include "../common/xxhash.h" /* XXH64_* */
#include "zstd_v07.h"
#define FSEv07_STATIC_LINKING_ONLY /* FSEv07_MIN_TABLELOG */
#define HUFv07_STATIC_LINKING_ONLY /* HUFv07_TABLELOG_ABSOLUTEMAX */
#define ZSTDv07_STATIC_LINKING_ONLY
#include "../common/error_private.h"
#ifdef ZSTDv07_STATIC_LINKING_ONLY
/* ====================================================================================
* The definitions in this section are considered experimental.
* They should never be used with a dynamic library, as they may change in the future.
* They are provided for advanced usages.
* Use them only in association with static linking.
* ==================================================================================== */
/*--- Constants ---*/
#define ZSTDv07_MAGIC_SKIPPABLE_START 0x184D2A50U
#define ZSTDv07_WINDOWLOG_MAX_32 25
#define ZSTDv07_WINDOWLOG_MAX_64 27
#define ZSTDv07_WINDOWLOG_MAX ((U32)(MEM_32bits() ? ZSTDv07_WINDOWLOG_MAX_32 : ZSTDv07_WINDOWLOG_MAX_64))
#define ZSTDv07_WINDOWLOG_MIN 18
#define ZSTDv07_CHAINLOG_MAX (ZSTDv07_WINDOWLOG_MAX+1)
#define ZSTDv07_CHAINLOG_MIN 4
#define ZSTDv07_HASHLOG_MAX ZSTDv07_WINDOWLOG_MAX
#define ZSTDv07_HASHLOG_MIN 12
#define ZSTDv07_HASHLOG3_MAX 17
#define ZSTDv07_SEARCHLOG_MAX (ZSTDv07_WINDOWLOG_MAX-1)
#define ZSTDv07_SEARCHLOG_MIN 1
#define ZSTDv07_SEARCHLENGTH_MAX 7
#define ZSTDv07_SEARCHLENGTH_MIN 3
#define ZSTDv07_TARGETLENGTH_MIN 4
#define ZSTDv07_TARGETLENGTH_MAX 999
#define ZSTDv07_FRAMEHEADERSIZE_MAX 18 /* for static allocation */
static const size_t ZSTDv07_frameHeaderSize_min = 5;
static const size_t ZSTDv07_frameHeaderSize_max = ZSTDv07_FRAMEHEADERSIZE_MAX;
static const size_t ZSTDv07_skippableHeaderSize = 8; /* magic number + skippable frame length */
/* custom memory allocation functions */
typedef void* (*ZSTDv07_allocFunction) (void* opaque, size_t size);
typedef void (*ZSTDv07_freeFunction) (void* opaque, void* address);
typedef struct { ZSTDv07_allocFunction customAlloc; ZSTDv07_freeFunction customFree; void* opaque; } ZSTDv07_customMem;
/*--- Advanced Decompression functions ---*/
/*! ZSTDv07_estimateDCtxSize() :
* Gives the potential amount of memory allocated to create a ZSTDv07_DCtx */
ZSTDLIBv07_API size_t ZSTDv07_estimateDCtxSize(void);
/*! ZSTDv07_createDCtx_advanced() :
* Create a ZSTD decompression context using external alloc and free functions */
ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem);
/*! ZSTDv07_sizeofDCtx() :
* Gives the amount of memory used by a given ZSTDv07_DCtx */
ZSTDLIBv07_API size_t ZSTDv07_sizeofDCtx(const ZSTDv07_DCtx* dctx);
/* ******************************************************************
* Buffer-less streaming functions (synchronous mode)
********************************************************************/
ZSTDLIBv07_API size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx);
ZSTDLIBv07_API size_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIBv07_API void ZSTDv07_copyDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* preparedDCtx);
ZSTDLIBv07_API size_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx);
ZSTDLIBv07_API size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/*
Buffer-less streaming decompression (synchronous mode)
A ZSTDv07_DCtx object is required to track streaming operations.
Use ZSTDv07_createDCtx() / ZSTDv07_freeDCtx() to manage it.
A ZSTDv07_DCtx object can be re-used multiple times.
First optional operation is to retrieve frame parameters, using ZSTDv07_getFrameParams(), which doesn't consume the input.
It can provide the minimum size of rolling buffer required to properly decompress data (`windowSize`),
and optionally the final size of uncompressed content.
(Note : content size is an optional info that may not be present. 0 means : content size unknown)
Frame parameters are extracted from the beginning of compressed frame.
The amount of data to read is variable, from ZSTDv07_frameHeaderSize_min to ZSTDv07_frameHeaderSize_max (so if `srcSize` >= ZSTDv07_frameHeaderSize_max, it will always work)
If `srcSize` is too small for operation to succeed, function will return the minimum size it requires to produce a result.
Result : 0 when successful, it means the ZSTDv07_frameParams structure has been filled.
>0 : means there is not enough data into `src`. Provides the expected size to successfully decode header.
errorCode, which can be tested using ZSTDv07_isError()
Start decompression, with ZSTDv07_decompressBegin() or ZSTDv07_decompressBegin_usingDict().
Alternatively, you can copy a prepared context, using ZSTDv07_copyDCtx().
Then use ZSTDv07_nextSrcSizeToDecompress() and ZSTDv07_decompressContinue() alternatively.
ZSTDv07_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTDv07_decompressContinue().
ZSTDv07_decompressContinue() requires this exact amount of bytes, or it will fail.
@result of ZSTDv07_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
It can be zero, which is not an error; it just means ZSTDv07_decompressContinue() has decoded some header.
ZSTDv07_decompressContinue() needs previous data blocks during decompression, up to `windowSize`.
They should preferably be located contiguously, prior to current block.
Alternatively, a round buffer of sufficient size is also possible. Sufficient size is determined by frame parameters.
ZSTDv07_decompressContinue() is very sensitive to contiguity,
if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
or that previous contiguous segment is large enough to properly handle maximum back-reference.
A frame is fully decoded when ZSTDv07_nextSrcSizeToDecompress() returns zero.
Context can then be reset to start a new decompression.
== Special case : skippable frames ==
Skippable frames allow the integration of user-defined data into a flow of concatenated frames.
Skippable frames will be ignored (skipped) by a decompressor. The format of skippable frame is following:
a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
c) Frame Content - any content (User Data) of length equal to Frame Size
For skippable frames ZSTDv07_decompressContinue() always returns 0.
For skippable frames ZSTDv07_getFrameParams() returns fparamsPtr->windowLog==0 what means that a frame is skippable.
It also returns Frame Size as fparamsPtr->frameContentSize.
*/
/* **************************************
* Block functions
****************************************/
/*! Block functions produce and decode raw zstd blocks, without frame metadata.
Frame metadata cost is typically ~18 bytes, which can be non-negligible for very small blocks (< 100 bytes).
User will have to take in charge required information to regenerate data, such as compressed and content sizes.
A few rules to respect :
- Compressing and decompressing require a context structure
+ Use ZSTDv07_createCCtx() and ZSTDv07_createDCtx()
- It is necessary to init context before starting
+ compression : ZSTDv07_compressBegin()
+ decompression : ZSTDv07_decompressBegin()
+ variants _usingDict() are also allowed
+ copyCCtx() and copyDCtx() work too
- Block size is limited, it must be <= ZSTDv07_getBlockSizeMax()
+ If you need to compress more, cut data into multiple blocks
+ Consider using the regular ZSTDv07_compress() instead, as frame metadata costs become negligible when source size is large.
- When a block is considered not compressible enough, ZSTDv07_compressBlock() result will be zero.
In which case, nothing is produced into `dst`.
+ User must test for such outcome and deal directly with uncompressed data
+ ZSTDv07_decompressBlock() doesn't accept uncompressed data as input !!!
+ In case of multiple successive blocks, decoder must be informed of uncompressed block existence to follow proper history.
Use ZSTDv07_insertBlock() in such a case.
*/
#define ZSTDv07_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) /* define, for static allocation */
ZSTDLIBv07_API size_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert block into `dctx` history. Useful for uncompressed blocks */
#endif /* ZSTDv07_STATIC_LINKING_ONLY */
/* ******************************************************************
mem.h
low-level memory access routines
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
#ifndef MEM_H_MODULE
#define MEM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/*-****************************************
* Compiler specifics
******************************************/
#if defined(_MSC_VER) /* Visual Studio */
# include <stdlib.h> /* _byteswap_ulong */
# include <intrin.h> /* _byteswap_* */
#endif
#if defined(__GNUC__)
# define MEM_STATIC static __attribute__((unused))
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define MEM_STATIC static inline
#elif defined(_MSC_VER)
# define MEM_STATIC static __inline
#else
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif
/*-**************************************************************
* Basic Types
*****************************************************************/
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# if defined(_AIX)
# include <inttypes.h>
# else
# include <stdint.h> /* intptr_t */
# endif
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef signed short S16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
typedef signed long long S64;
#endif
/*-**************************************************************
* Memory I/O
*****************************************************************/
/* MEM_FORCE_MEMORY_ACCESS :
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method is portable but violate C standard.
* It can generate buggy code on targets depending on alignment.
* In some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define MEM_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
# define MEM_FORCE_MEMORY_ACCESS 1
# endif
#endif
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
MEM_STATIC unsigned MEM_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
/* violates C standard, by lying on structure alignment.
Only use if no other choice to achieve best performance on target platform */
MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
typedef union { U16 u16; U32 u32; U64 u64; size_t st; } __attribute__((packed)) unalign;
MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
#else
/* default method, safe and standard.
can sometimes prove slower */
MEM_STATIC U16 MEM_read16(const void* memPtr)
{
U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U32 MEM_read32(const void* memPtr)
{
U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC U64 MEM_read64(const void* memPtr)
{
U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
}
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
{
memcpy(memPtr, &value, sizeof(value));
}
#endif /* MEM_FORCE_MEMORY_ACCESS */
MEM_STATIC U32 MEM_swap32(U32 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_ulong(in);
#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
return __builtin_bswap32(in);
#else
return ((in << 24) & 0xff000000 ) |
((in << 8) & 0x00ff0000 ) |
((in >> 8) & 0x0000ff00 ) |
((in >> 24) & 0x000000ff );
#endif
}
MEM_STATIC U64 MEM_swap64(U64 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_uint64(in);
#elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
return __builtin_bswap64(in);
#else
return ((in << 56) & 0xff00000000000000ULL) |
((in << 40) & 0x00ff000000000000ULL) |
((in << 24) & 0x0000ff0000000000ULL) |
((in << 8) & 0x000000ff00000000ULL) |
((in >> 8) & 0x00000000ff000000ULL) |
((in >> 24) & 0x0000000000ff0000ULL) |
((in >> 40) & 0x000000000000ff00ULL) |
((in >> 56) & 0x00000000000000ffULL);
#endif
}
/*=== Little endian r/w ===*/
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read16(memPtr);
else {
const BYTE* p = (const BYTE*)memPtr;
return (U16)(p[0] + (p[1]<<8));
}
}
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
{
if (MEM_isLittleEndian()) {
MEM_write16(memPtr, val);
} else {
BYTE* p = (BYTE*)memPtr;
p[0] = (BYTE)val;
p[1] = (BYTE)(val>>8);
}
}
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read32(memPtr);
else
return MEM_swap32(MEM_read32(memPtr));
}
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
{
if (MEM_isLittleEndian())
return MEM_read64(memPtr);
else
return MEM_swap64(MEM_read64(memPtr));
}
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
{
if (MEM_32bits())
return (size_t)MEM_readLE32(memPtr);
else
return (size_t)MEM_readLE64(memPtr);
}
#if defined (__cplusplus)
}
#endif
#endif /* MEM_H_MODULE */
/* ******************************************************************
bitstream
Part of FSE library
header file (to include)
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef BITSTREAM_H_MODULE
#define BITSTREAM_H_MODULE
#if defined (__cplusplus)
extern "C" {
#endif
/*
* This API consists of small unitary functions, which must be inlined for best performance.
* Since link-time-optimization is not available for all compilers,
* these functions are defined into a .h to be included.
*/
/*=========================================
* Target specific
=========================================*/
#if defined(__BMI__) && defined(__GNUC__)
# include <immintrin.h> /* support for bextr (experimental) */
#endif
/*-********************************************
* bitStream decoding API (read backward)
**********************************************/
typedef struct
{
size_t bitContainer;
unsigned bitsConsumed;
const char* ptr;
const char* start;
} BITv07_DStream_t;
typedef enum { BITv07_DStream_unfinished = 0,
BITv07_DStream_endOfBuffer = 1,
BITv07_DStream_completed = 2,
BITv07_DStream_overflow = 3 } BITv07_DStream_status; /* result of BITv07_reloadDStream() */
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
MEM_STATIC size_t BITv07_readBits(BITv07_DStream_t* bitD, unsigned nbBits);
MEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD);
MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* bitD);
/*-****************************************
* unsafe API
******************************************/
MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, unsigned nbBits);
/* faster, but works only if nbBits >= 1 */
/*-**************************************************************
* Internal functions
****************************************************************/
MEM_STATIC unsigned BITv07_highbit32 (U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r=0;
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
# endif
}
/*-********************************************************
* bitStream decoding
**********************************************************/
/*! BITv07_initDStream() :
* Initialize a BITv07_DStream_t.
* `bitD` : a pointer to an already allocated BITv07_DStream_t structure.
* `srcSize` must be the *exact* size of the bitStream, in bytes.
* @return : size of stream (== srcSize) or an errorCode if a problem is detected
*/
MEM_STATIC size_t BITv07_initDStream(BITv07_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
bitD->start = (const char*)srcBuffer;
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
} else {
bitD->start = (const char*)srcBuffer;
bitD->ptr = bitD->start;
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);/* fall-through */
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);/* fall-through */
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);/* fall-through */
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; /* fall-through */
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; /* fall-through */
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; /* fall-through */
default: break;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
bitD->bitsConsumed = lastByte ? 8 - BITv07_highbit32(lastByte) : 0;
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
}
return srcSize;
}
MEM_STATIC size_t BITv07_lookBits(const BITv07_DStream_t* bitD, U32 nbBits)
{
U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
}
/*! BITv07_lookBitsFast() :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BITv07_lookBitsFast(const BITv07_DStream_t* bitD, U32 nbBits)
{
U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
}
MEM_STATIC void BITv07_skipBits(BITv07_DStream_t* bitD, U32 nbBits)
{
bitD->bitsConsumed += nbBits;
}
MEM_STATIC size_t BITv07_readBits(BITv07_DStream_t* bitD, U32 nbBits)
{
size_t const value = BITv07_lookBits(bitD, nbBits);
BITv07_skipBits(bitD, nbBits);
return value;
}
/*! BITv07_readBitsFast() :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BITv07_readBitsFast(BITv07_DStream_t* bitD, U32 nbBits)
{
size_t const value = BITv07_lookBitsFast(bitD, nbBits);
BITv07_skipBits(bitD, nbBits);
return value;
}
MEM_STATIC BITv07_DStream_status BITv07_reloadDStream(BITv07_DStream_t* bitD)
{
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should not happen => corruption detected */
return BITv07_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
bitD->ptr -= bitD->bitsConsumed >> 3;
bitD->bitsConsumed &= 7;
bitD->bitContainer = MEM_readLEST(bitD->ptr);
return BITv07_DStream_unfinished;
}
if (bitD->ptr == bitD->start) {
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BITv07_DStream_endOfBuffer;
return BITv07_DStream_completed;
}
{ U32 nbBytes = bitD->bitsConsumed >> 3;
BITv07_DStream_status result = BITv07_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start) {
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
result = BITv07_DStream_endOfBuffer;
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes*8;
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
return result;
}
}
/*! BITv07_endOfDStream() :
* @return Tells if DStream has exactly reached its end (all bits consumed).
*/
MEM_STATIC unsigned BITv07_endOfDStream(const BITv07_DStream_t* DStream)
{
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
}
#if defined (__cplusplus)
}
#endif
#endif /* BITSTREAM_H_MODULE */
/* ******************************************************************
FSE : Finite State Entropy codec
Public Prototypes declaration
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef FSEv07_H
#define FSEv07_H
#if defined (__cplusplus)
extern "C" {
#endif
/*-****************************************
* FSE simple functions
******************************************/
/*! FSEv07_decompress():
Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
into already allocated destination buffer 'dst', of size 'dstCapacity'.
@return : size of regenerated data (<= maxDstSize),
or an error code, which can be tested using FSEv07_isError() .
** Important ** : FSEv07_decompress() does not decompress non-compressible nor RLE data !!!
Why ? : making this distinction requires a header.
Header management is intentionally delegated to the user layer, which can better manage special cases.
*/
size_t FSEv07_decompress(void* dst, size_t dstCapacity,
const void* cSrc, size_t cSrcSize);
/* Error Management */
unsigned FSEv07_isError(size_t code); /* tells if a return value is an error code */
const char* FSEv07_getErrorName(size_t code); /* provides error code string (useful for debugging) */
/*-*****************************************
* FSE detailed API
******************************************/
/*!
FSEv07_decompress() does the following:
1. read normalized counters with readNCount()
2. build decoding table 'DTable' from normalized counters
3. decode the data stream using decoding table 'DTable'
The following API allows targeting specific sub-functions for advanced tasks.
For example, it's possible to compress several blocks using the same 'CTable',
or to save and provide normalized distribution using external method.
*/
/* *** DECOMPRESSION *** */
/*! FSEv07_readNCount():
Read compactly saved 'normalizedCounter' from 'rBuffer'.
@return : size read from 'rBuffer',
or an errorCode, which can be tested using FSEv07_isError().
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
size_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
/*! Constructor and Destructor of FSEv07_DTable.
Note that its size depends on 'tableLog' */
typedef unsigned FSEv07_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
FSEv07_DTable* FSEv07_createDTable(unsigned tableLog);
void FSEv07_freeDTable(FSEv07_DTable* dt);
/*! FSEv07_buildDTable():
Builds 'dt', which must be already allocated, using FSEv07_createDTable().
return : 0, or an errorCode, which can be tested using FSEv07_isError() */
size_t FSEv07_buildDTable (FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
/*! FSEv07_decompress_usingDTable():
Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
into `dst` which must be already allocated.
@return : size of regenerated data (necessarily <= `dstCapacity`),
or an errorCode, which can be tested using FSEv07_isError() */
size_t FSEv07_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSEv07_DTable* dt);
/*!
Tutorial :
----------
(Note : these functions only decompress FSE-compressed blocks.
If block is uncompressed, use memcpy() instead
If block is a single repeated byte, use memset() instead )
The first step is to obtain the normalized frequencies of symbols.
This can be performed by FSEv07_readNCount() if it was saved using FSEv07_writeNCount().
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
or size the table to handle worst case situations (typically 256).
FSEv07_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
The result of FSEv07_readNCount() is the number of bytes read from 'rBuffer'.
Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
If there is an error, the function will return an error code, which can be tested using FSEv07_isError().
The next step is to build the decompression tables 'FSEv07_DTable' from 'normalizedCounter'.
This is performed by the function FSEv07_buildDTable().
The space required by 'FSEv07_DTable' must be already allocated using FSEv07_createDTable().
If there is an error, the function will return an error code, which can be tested using FSEv07_isError().
`FSEv07_DTable` can then be used to decompress `cSrc`, with FSEv07_decompress_usingDTable().
`cSrcSize` must be strictly correct, otherwise decompression will fail.
FSEv07_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
If there is an error, the function will return an error code, which can be tested using FSEv07_isError(). (ex: dst buffer too small)
*/
#ifdef FSEv07_STATIC_LINKING_ONLY
/* *****************************************
* Static allocation
*******************************************/
/* FSE buffer bounds */
#define FSEv07_NCOUNTBOUND 512
#define FSEv07_BLOCKBOUND(size) (size + (size>>7))
/* It is possible to statically allocate FSE CTable/DTable as a table of unsigned using below macros */
#define FSEv07_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
/* *****************************************
* FSE advanced API
*******************************************/
size_t FSEv07_countFast(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
/**< same as FSEv07_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr */
unsigned FSEv07_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
/**< same as FSEv07_optimalTableLog(), which used `minus==2` */
size_t FSEv07_buildDTable_raw (FSEv07_DTable* dt, unsigned nbBits);
/**< build a fake FSEv07_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
size_t FSEv07_buildDTable_rle (FSEv07_DTable* dt, unsigned char symbolValue);
/**< build a fake FSEv07_DTable, designed to always generate the same symbolValue */
/* *****************************************
* FSE symbol decompression API
*******************************************/
typedef struct
{
size_t state;
const void* table; /* precise table may vary, depending on U16 */
} FSEv07_DState_t;
static void FSEv07_initDState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD, const FSEv07_DTable* dt);
static unsigned char FSEv07_decodeSymbol(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD);
/* *****************************************
* FSE unsafe API
*******************************************/
static unsigned char FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD);
/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
/* ====== Decompression ====== */
typedef struct {
U16 tableLog;
U16 fastMode;
} FSEv07_DTableHeader; /* sizeof U32 */
typedef struct
{
unsigned short newState;
unsigned char symbol;
unsigned char nbBits;
} FSEv07_decode_t; /* size == U32 */
MEM_STATIC void FSEv07_initDState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD, const FSEv07_DTable* dt)
{
const void* ptr = dt;
const FSEv07_DTableHeader* const DTableH = (const FSEv07_DTableHeader*)ptr;
DStatePtr->state = BITv07_readBits(bitD, DTableH->tableLog);
BITv07_reloadDStream(bitD);
DStatePtr->table = dt + 1;
}
MEM_STATIC BYTE FSEv07_peekSymbol(const FSEv07_DState_t* DStatePtr)
{
FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
return DInfo.symbol;
}
MEM_STATIC void FSEv07_updateState(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
{
FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
U32 const nbBits = DInfo.nbBits;
size_t const lowBits = BITv07_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
}
MEM_STATIC BYTE FSEv07_decodeSymbol(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
{
FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
U32 const nbBits = DInfo.nbBits;
BYTE const symbol = DInfo.symbol;
size_t const lowBits = BITv07_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
/*! FSEv07_decodeSymbolFast() :
unsafe, only works if no symbol has a probability > 50% */
MEM_STATIC BYTE FSEv07_decodeSymbolFast(FSEv07_DState_t* DStatePtr, BITv07_DStream_t* bitD)
{
FSEv07_decode_t const DInfo = ((const FSEv07_decode_t*)(DStatePtr->table))[DStatePtr->state];
U32 const nbBits = DInfo.nbBits;
BYTE const symbol = DInfo.symbol;
size_t const lowBits = BITv07_readBitsFast(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
#ifndef FSEv07_COMMONDEFS_ONLY
/* **************************************************************
* Tuning parameters
****************************************************************/
/*!MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
#define FSEv07_MAX_MEMORY_USAGE 14
#define FSEv07_DEFAULT_MEMORY_USAGE 13
/*!FSEv07_MAX_SYMBOL_VALUE :
* Maximum symbol value authorized.
* Required for proper stack allocation */
#define FSEv07_MAX_SYMBOL_VALUE 255
/* **************************************************************
* template functions type & suffix
****************************************************************/
#define FSEv07_FUNCTION_TYPE BYTE
#define FSEv07_FUNCTION_EXTENSION
#define FSEv07_DECODE_TYPE FSEv07_decode_t
#endif /* !FSEv07_COMMONDEFS_ONLY */
/* ***************************************************************
* Constants
*****************************************************************/
#define FSEv07_MAX_TABLELOG (FSEv07_MAX_MEMORY_USAGE-2)
#define FSEv07_MAX_TABLESIZE (1U<<FSEv07_MAX_TABLELOG)
#define FSEv07_MAXTABLESIZE_MASK (FSEv07_MAX_TABLESIZE-1)
#define FSEv07_DEFAULT_TABLELOG (FSEv07_DEFAULT_MEMORY_USAGE-2)
#define FSEv07_MIN_TABLELOG 5
#define FSEv07_TABLELOG_ABSOLUTE_MAX 15
#if FSEv07_MAX_TABLELOG > FSEv07_TABLELOG_ABSOLUTE_MAX
# error "FSEv07_MAX_TABLELOG > FSEv07_TABLELOG_ABSOLUTE_MAX is not supported"
#endif
#define FSEv07_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
#endif /* FSEv07_STATIC_LINKING_ONLY */
#if defined (__cplusplus)
}
#endif
#endif /* FSEv07_H */
/* ******************************************************************
Huffman coder, part of New Generation Entropy library
header file
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef HUFv07_H_298734234
#define HUFv07_H_298734234
#if defined (__cplusplus)
extern "C" {
#endif
/* *** simple functions *** */
/**
HUFv07_decompress() :
Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
into already allocated buffer 'dst', of minimum size 'dstSize'.
`dstSize` : **must** be the ***exact*** size of original (uncompressed) data.
Note : in contrast with FSE, HUFv07_decompress can regenerate
RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
because it knows size to regenerate.
@return : size of regenerated data (== dstSize),
or an error code, which can be tested using HUFv07_isError()
*/
size_t HUFv07_decompress(void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize);
/* ****************************************
* Tool functions
******************************************/
#define HUFv07_BLOCKSIZE_MAX (128 * 1024)
/* Error Management */
unsigned HUFv07_isError(size_t code); /**< tells if a return value is an error code */
const char* HUFv07_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
/* *** Advanced function *** */
#ifdef HUFv07_STATIC_LINKING_ONLY
/* *** Constants *** */
#define HUFv07_TABLELOG_ABSOLUTEMAX 16 /* absolute limit of HUFv07_MAX_TABLELOG. Beyond that value, code does not work */
#define HUFv07_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUFv07_ABSOLUTEMAX_TABLELOG */
#define HUFv07_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
#define HUFv07_SYMBOLVALUE_MAX 255
#if (HUFv07_TABLELOG_MAX > HUFv07_TABLELOG_ABSOLUTEMAX)
# error "HUFv07_TABLELOG_MAX is too large !"
#endif
/* ****************************************
* Static allocation
******************************************/
/* HUF buffer bounds */
#define HUFv07_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
/* static allocation of HUF's DTable */
typedef U32 HUFv07_DTable;
#define HUFv07_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
#define HUFv07_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
HUFv07_DTable DTable[HUFv07_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1)*0x1000001) }
#define HUFv07_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
HUFv07_DTable DTable[HUFv07_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog)*0x1000001) }
/* ****************************************
* Advanced decompression functions
******************************************/
size_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
size_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
size_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
size_t HUFv07_decompress4X_hufOnly(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
size_t HUFv07_decompress4X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
size_t HUFv07_decompress4X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
size_t HUFv07_decompress1X2_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
size_t HUFv07_decompress1X4_DCtx(HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
/* ****************************************
* HUF detailed API
******************************************/
/*!
The following API allows targeting specific sub-functions for advanced tasks.
For example, it's possible to compress several blocks using the same 'CTable',
or to save and regenerate 'CTable' using external methods.
*/
/* FSEv07_count() : find it within "fse.h" */
/*! HUFv07_readStats() :
Read compact Huffman tree, saved by HUFv07_writeCTable().
`huffWeight` is destination buffer.
@return : size read from `src` , or an error Code .
Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() . */
size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize);
/*
HUFv07_decompress() does the following:
1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
2. build Huffman table from save, using HUFv07_readDTableXn()
3. decode 1 or 4 segments in parallel using HUFv07_decompressSXn_usingDTable
*/
/** HUFv07_selectDecoder() :
* Tells which decoder is likely to decode faster,
* based on a set of pre-determined metrics.
* @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 .
* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
U32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize);
size_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize);
size_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize);
size_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
size_t HUFv07_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
size_t HUFv07_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
/* single stream variants */
size_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
size_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
size_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
size_t HUFv07_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
size_t HUFv07_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUFv07_DTable* DTable);
#endif /* HUFv07_STATIC_LINKING_ONLY */
#if defined (__cplusplus)
}
#endif
#endif /* HUFv07_H_298734234 */
/*
Common functions of New Generation Entropy library
Copyright (C) 2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
*************************************************************************** */
/*-****************************************
* FSE Error Management
******************************************/
unsigned FSEv07_isError(size_t code) { return ERR_isError(code); }
const char* FSEv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
/* **************************************************************
* HUF Error Management
****************************************************************/
unsigned HUFv07_isError(size_t code) { return ERR_isError(code); }
const char* HUFv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
/*-**************************************************************
* FSE NCount encoding-decoding
****************************************************************/
static short FSEv07_abs(short a) { return (short)(a<0 ? -a : a); }
size_t FSEv07_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
const BYTE* const istart = (const BYTE*) headerBuffer;
const BYTE* const iend = istart + hbSize;
const BYTE* ip = istart;
int nbBits;
int remaining;
int threshold;
U32 bitStream;
int bitCount;
unsigned charnum = 0;
int previous0 = 0;
if (hbSize < 4) return ERROR(srcSize_wrong);
bitStream = MEM_readLE32(ip);
nbBits = (bitStream & 0xF) + FSEv07_MIN_TABLELOG; /* extract tableLog */
if (nbBits > FSEv07_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
bitStream >>= 4;
bitCount = 4;
*tableLogPtr = nbBits;
remaining = (1<<nbBits)+1;
threshold = 1<<nbBits;
nbBits++;
while ((remaining>1) && (charnum<=*maxSVPtr)) {
if (previous0) {
unsigned n0 = charnum;
while ((bitStream & 0xFFFF) == 0xFFFF) {
n0+=24;
if (ip < iend-5) {
ip+=2;
bitStream = MEM_readLE32(ip) >> bitCount;
} else {
bitStream >>= 16;
bitCount+=16;
} }
while ((bitStream & 3) == 3) {
n0+=3;
bitStream>>=2;
bitCount+=2;
}
n0 += bitStream & 3;
bitCount += 2;
if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
while (charnum < n0) normalizedCounter[charnum++] = 0;
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
ip += bitCount>>3;
bitCount &= 7;
bitStream = MEM_readLE32(ip) >> bitCount;
}
else
bitStream >>= 2;
}
{ short const max = (short)((2*threshold-1)-remaining);
short count;
if ((bitStream & (threshold-1)) < (U32)max) {
count = (short)(bitStream & (threshold-1));
bitCount += nbBits-1;
} else {
count = (short)(bitStream & (2*threshold-1));
if (count >= threshold) count -= max;
bitCount += nbBits;
}
count--; /* extra accuracy */
remaining -= FSEv07_abs(count);
normalizedCounter[charnum++] = count;
previous0 = !count;
while (remaining < threshold) {
nbBits--;
threshold >>= 1;
}
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
ip += bitCount>>3;
bitCount &= 7;
} else {
bitCount -= (int)(8 * (iend - 4 - ip));
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
} } /* while ((remaining>1) && (charnum<=*maxSVPtr)) */
if (remaining != 1) return ERROR(GENERIC);
*maxSVPtr = charnum-1;
ip += (bitCount+7)>>3;
if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
return ip-istart;
}
/*! HUFv07_readStats() :
Read compact Huffman tree, saved by HUFv07_writeCTable().
`huffWeight` is destination buffer.
@return : size read from `src` , or an error Code .
Note : Needed by HUFv07_readCTable() and HUFv07_readDTableXn() .
*/
size_t HUFv07_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize)
{
U32 weightTotal;
const BYTE* ip = (const BYTE*) src;
size_t iSize;
size_t oSize;
if (!srcSize) return ERROR(srcSize_wrong);
iSize = ip[0];
/* memset(huffWeight, 0, hwSize); */ /* is not necessary, even though some analyzer complain ... */
if (iSize >= 128) { /* special header */
if (iSize >= (242)) { /* RLE */
static U32 l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
oSize = l[iSize-242];
memset(huffWeight, 1, hwSize);
iSize = 0;
}
else { /* Incompressible */
oSize = iSize - 127;
iSize = ((oSize+1)/2);
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
if (oSize >= hwSize) return ERROR(corruption_detected);
ip += 1;
{ U32 n;
for (n=0; n<oSize; n+=2) {
huffWeight[n] = ip[n/2] >> 4;
huffWeight[n+1] = ip[n/2] & 15;
} } } }
else { /* header compressed with FSE (normal case) */
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
oSize = FSEv07_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
if (FSEv07_isError(oSize)) return oSize;
}
/* collect weight stats */
memset(rankStats, 0, (HUFv07_TABLELOG_ABSOLUTEMAX + 1) * sizeof(U32));
weightTotal = 0;
{ U32 n; for (n=0; n<oSize; n++) {
if (huffWeight[n] >= HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);
rankStats[huffWeight[n]]++;
weightTotal += (1 << huffWeight[n]) >> 1;
} }
if (weightTotal == 0) return ERROR(corruption_detected);
/* get last non-null symbol weight (implied, total must be 2^n) */
{ U32 const tableLog = BITv07_highbit32(weightTotal) + 1;
if (tableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(corruption_detected);
*tableLogPtr = tableLog;
/* determine last weight */
{ U32 const total = 1 << tableLog;
U32 const rest = total - weightTotal;
U32 const verif = 1 << BITv07_highbit32(rest);
U32 const lastWeight = BITv07_highbit32(rest) + 1;
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
huffWeight[oSize] = (BYTE)lastWeight;
rankStats[lastWeight]++;
} }
/* check tree construction validity */
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
/* results */
*nbSymbolsPtr = (U32)(oSize+1);
return iSize+1;
}
/* ******************************************************************
FSE : Finite State Entropy decoder
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/* **************************************************************
* Compiler specifics
****************************************************************/
#ifdef _MSC_VER /* Visual Studio */
# define FORCE_INLINE static __forceinline
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
#else
# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline))
# else
# define FORCE_INLINE static inline
# endif
# else
# define FORCE_INLINE static
# endif /* __STDC_VERSION__ */
#endif
/* **************************************************************
* Error Management
****************************************************************/
#define FSEv07_isError ERR_isError
#define FSEv07_STATIC_ASSERT(c) { enum { FSEv07_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* **************************************************************
* Complex types
****************************************************************/
typedef U32 DTable_max_t[FSEv07_DTABLE_SIZE_U32(FSEv07_MAX_TABLELOG)];
/* **************************************************************
* Templates
****************************************************************/
/*
designed to be included
for type-specific functions (template emulation in C)
Objective is to write these functions only once, for improved maintenance
*/
/* safety checks */
#ifndef FSEv07_FUNCTION_EXTENSION
# error "FSEv07_FUNCTION_EXTENSION must be defined"
#endif
#ifndef FSEv07_FUNCTION_TYPE
# error "FSEv07_FUNCTION_TYPE must be defined"
#endif
/* Function names */
#define FSEv07_CAT(X,Y) X##Y
#define FSEv07_FUNCTION_NAME(X,Y) FSEv07_CAT(X,Y)
#define FSEv07_TYPE_NAME(X,Y) FSEv07_CAT(X,Y)
/* Function templates */
FSEv07_DTable* FSEv07_createDTable (unsigned tableLog)
{
if (tableLog > FSEv07_TABLELOG_ABSOLUTE_MAX) tableLog = FSEv07_TABLELOG_ABSOLUTE_MAX;
return (FSEv07_DTable*)malloc( FSEv07_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
}
void FSEv07_freeDTable (FSEv07_DTable* dt)
{
free(dt);
}
size_t FSEv07_buildDTable(FSEv07_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
FSEv07_DECODE_TYPE* const tableDecode = (FSEv07_DECODE_TYPE*) (tdPtr);
U16 symbolNext[FSEv07_MAX_SYMBOL_VALUE+1];
U32 const maxSV1 = maxSymbolValue + 1;
U32 const tableSize = 1 << tableLog;
U32 highThreshold = tableSize-1;
/* Sanity Checks */
if (maxSymbolValue > FSEv07_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
if (tableLog > FSEv07_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
/* Init, lay down lowprob symbols */
{ FSEv07_DTableHeader DTableH;
DTableH.tableLog = (U16)tableLog;
DTableH.fastMode = 1;
{ S16 const largeLimit= (S16)(1 << (tableLog-1));
U32 s;
for (s=0; s<maxSV1; s++) {
if (normalizedCounter[s]==-1) {
tableDecode[highThreshold--].symbol = (FSEv07_FUNCTION_TYPE)s;
symbolNext[s] = 1;
} else {
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
symbolNext[s] = normalizedCounter[s];
} } }
memcpy(dt, &DTableH, sizeof(DTableH));
}
/* Spread symbols */
{ U32 const tableMask = tableSize-1;
U32 const step = FSEv07_TABLESTEP(tableSize);
U32 s, position = 0;
for (s=0; s<maxSV1; s++) {
int i;
for (i=0; i<normalizedCounter[s]; i++) {
tableDecode[position].symbol = (FSEv07_FUNCTION_TYPE)s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
} }
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
}
/* Build Decoding table */
{ U32 u;
for (u=0; u<tableSize; u++) {
FSEv07_FUNCTION_TYPE const symbol = (FSEv07_FUNCTION_TYPE)(tableDecode[u].symbol);
U16 nextState = symbolNext[symbol]++;
tableDecode[u].nbBits = (BYTE) (tableLog - BITv07_highbit32 ((U32)nextState) );
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
} }
return 0;
}
#ifndef FSEv07_COMMONDEFS_ONLY
/*-*******************************************************
* Decompression (Byte symbols)
*********************************************************/
size_t FSEv07_buildDTable_rle (FSEv07_DTable* dt, BYTE symbolValue)
{
void* ptr = dt;
FSEv07_DTableHeader* const DTableH = (FSEv07_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSEv07_decode_t* const cell = (FSEv07_decode_t*)dPtr;
DTableH->tableLog = 0;
DTableH->fastMode = 0;
cell->newState = 0;
cell->symbol = symbolValue;
cell->nbBits = 0;
return 0;
}
size_t FSEv07_buildDTable_raw (FSEv07_DTable* dt, unsigned nbBits)
{
void* ptr = dt;
FSEv07_DTableHeader* const DTableH = (FSEv07_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSEv07_decode_t* const dinfo = (FSEv07_decode_t*)dPtr;
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSV1 = tableMask+1;
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* Build Decoding Table */
DTableH->tableLog = (U16)nbBits;
DTableH->fastMode = 1;
for (s=0; s<maxSV1; s++) {
dinfo[s].newState = 0;
dinfo[s].symbol = (BYTE)s;
dinfo[s].nbBits = (BYTE)nbBits;
}
return 0;
}
FORCE_INLINE size_t FSEv07_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const FSEv07_DTable* dt, const unsigned fast)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const omax = op + maxDstSize;
BYTE* const olimit = omax-3;
BITv07_DStream_t bitD;
FSEv07_DState_t state1;
FSEv07_DState_t state2;
/* Init */
{ size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
if (FSEv07_isError(errorCode)) return errorCode; }
FSEv07_initDState(&state1, &bitD, dt);
FSEv07_initDState(&state2, &bitD, dt);
#define FSEv07_GETSYMBOL(statePtr) fast ? FSEv07_decodeSymbolFast(statePtr, &bitD) : FSEv07_decodeSymbol(statePtr, &bitD)
/* 4 symbols per loop */
for ( ; (BITv07_reloadDStream(&bitD)==BITv07_DStream_unfinished) && (op<olimit) ; op+=4) {
op[0] = FSEv07_GETSYMBOL(&state1);
if (FSEv07_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BITv07_reloadDStream(&bitD);
op[1] = FSEv07_GETSYMBOL(&state2);
if (FSEv07_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
{ if (BITv07_reloadDStream(&bitD) > BITv07_DStream_unfinished) { op+=2; break; } }
op[2] = FSEv07_GETSYMBOL(&state1);
if (FSEv07_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BITv07_reloadDStream(&bitD);
op[3] = FSEv07_GETSYMBOL(&state2);
}
/* tail */
/* note : BITv07_reloadDStream(&bitD) >= FSEv07_DStream_partiallyFilled; Ends at exactly BITv07_DStream_completed */
while (1) {
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
*op++ = FSEv07_GETSYMBOL(&state1);
if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) {
*op++ = FSEv07_GETSYMBOL(&state2);
break;
}
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
*op++ = FSEv07_GETSYMBOL(&state2);
if (BITv07_reloadDStream(&bitD)==BITv07_DStream_overflow) {
*op++ = FSEv07_GETSYMBOL(&state1);
break;
} }
return op-ostart;
}
size_t FSEv07_decompress_usingDTable(void* dst, size_t originalSize,
const void* cSrc, size_t cSrcSize,
const FSEv07_DTable* dt)
{
const void* ptr = dt;
const FSEv07_DTableHeader* DTableH = (const FSEv07_DTableHeader*)ptr;
const U32 fastMode = DTableH->fastMode;
/* select fast mode (static) */
if (fastMode) return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
return FSEv07_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
}
size_t FSEv07_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* const istart = (const BYTE*)cSrc;
const BYTE* ip = istart;
short counting[FSEv07_MAX_SYMBOL_VALUE+1];
DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
unsigned tableLog;
unsigned maxSymbolValue = FSEv07_MAX_SYMBOL_VALUE;
if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
/* normal FSE decoding mode */
{ size_t const NCountLength = FSEv07_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
if (FSEv07_isError(NCountLength)) return NCountLength;
if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
ip += NCountLength;
cSrcSize -= NCountLength;
}
{ size_t const errorCode = FSEv07_buildDTable (dt, counting, maxSymbolValue, tableLog);
if (FSEv07_isError(errorCode)) return errorCode; }
return FSEv07_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); /* always return, even if it is an error code */
}
#endif /* FSEv07_COMMONDEFS_ONLY */
/* ******************************************************************
Huffman decoder, part of New Generation Entropy library
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/* **************************************************************
* Compiler specifics
****************************************************************/
#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
/* inline is defined */
#elif defined(_MSC_VER)
# define inline __inline
#else
# define inline /* disable inline */
#endif
#ifdef _MSC_VER /* Visual Studio */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif
/* **************************************************************
* Error Management
****************************************************************/
#define HUFv07_STATIC_ASSERT(c) { enum { HUFv07_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/*-***************************/
/* generic DTableDesc */
/*-***************************/
typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
static DTableDesc HUFv07_getDTableDesc(const HUFv07_DTable* table)
{
DTableDesc dtd;
memcpy(&dtd, table, sizeof(dtd));
return dtd;
}
/*-***************************/
/* single-symbol decoding */
/*-***************************/
typedef struct { BYTE byte; BYTE nbBits; } HUFv07_DEltX2; /* single-symbol decoding */
size_t HUFv07_readDTableX2 (HUFv07_DTable* DTable, const void* src, size_t srcSize)
{
BYTE huffWeight[HUFv07_SYMBOLVALUE_MAX + 1];
U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
U32 nbSymbols = 0;
size_t iSize;
void* const dtPtr = DTable + 1;
HUFv07_DEltX2* const dt = (HUFv07_DEltX2*)dtPtr;
HUFv07_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUFv07_DTable));
/* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUFv07_readStats(huffWeight, HUFv07_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
if (HUFv07_isError(iSize)) return iSize;
/* Table header */
{ DTableDesc dtd = HUFv07_getDTableDesc(DTable);
if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, huffman tree cannot fit in */
dtd.tableType = 0;
dtd.tableLog = (BYTE)tableLog;
memcpy(DTable, &dtd, sizeof(dtd));
}
/* Prepare ranks */
{ U32 n, nextRankStart = 0;
for (n=1; n<tableLog+1; n++) {
U32 current = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = current;
} }
/* fill DTable */
{ U32 n;
for (n=0; n<nbSymbols; n++) {
U32 const w = huffWeight[n];
U32 const length = (1 << w) >> 1;
U32 i;
HUFv07_DEltX2 D;
D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
for (i = rankVal[w]; i < rankVal[w] + length; i++)
dt[i] = D;
rankVal[w] += length;
} }
return iSize;
}
static BYTE HUFv07_decodeSymbolX2(BITv07_DStream_t* Dstream, const HUFv07_DEltX2* dt, const U32 dtLog)
{
size_t const val = BITv07_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
BYTE const c = dt[val].byte;
BITv07_skipBits(Dstream, dt[val].nbBits);
return c;
}
#define HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
*ptr++ = HUFv07_decodeSymbolX2(DStreamPtr, dt, dtLog)
#define HUFv07_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \
HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
#define HUFv07_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
HUFv07_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
static inline size_t HUFv07_decodeStreamX2(BYTE* p, BITv07_DStream_t* const bitDPtr, BYTE* const pEnd, const HUFv07_DEltX2* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 4 symbols at a time */
while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-4)) {
HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr);
HUFv07_DECODE_SYMBOLX2_1(p, bitDPtr);
HUFv07_DECODE_SYMBOLX2_2(p, bitDPtr);
HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
}
/* closer to the end */
while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd))
HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
/* no more data to retrieve from bitstream, hence no need to reload */
while (p < pEnd)
HUFv07_DECODE_SYMBOLX2_0(p, bitDPtr);
return pEnd-pStart;
}
static size_t HUFv07_decompress1X2_usingDTable_internal(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUFv07_DTable* DTable)
{
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + dstSize;
const void* dtPtr = DTable + 1;
const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr;
BITv07_DStream_t bitD;
DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
{ size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);
if (HUFv07_isError(errorCode)) return errorCode; }
HUFv07_decodeStreamX2(op, &bitD, oend, dt, dtLog);
/* check */
if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected);
return dstSize;
}
size_t HUFv07_decompress1X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUFv07_DTable* DTable)
{
DTableDesc dtd = HUFv07_getDTableDesc(DTable);
if (dtd.tableType != 0) return ERROR(GENERIC);
return HUFv07_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
}
size_t HUFv07_decompress1X2_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUFv07_readDTableX2 (DCtx, cSrc, cSrcSize);
if (HUFv07_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUFv07_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
}
size_t HUFv07_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX);
return HUFv07_decompress1X2_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
}
static size_t HUFv07_decompress4X2_usingDTable_internal(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUFv07_DTable* DTable)
{
/* Check */
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable + 1;
const HUFv07_DEltX2* const dt = (const HUFv07_DEltX2*)dtPtr;
/* Init */
BITv07_DStream_t bitD1;
BITv07_DStream_t bitD2;
BITv07_DStream_t bitD3;
BITv07_DStream_t bitD4;
size_t const length1 = MEM_readLE16(istart);
size_t const length2 = MEM_readLE16(istart+2);
size_t const length3 = MEM_readLE16(istart+4);
size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
{ size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1);
if (HUFv07_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2);
if (HUFv07_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3);
if (HUFv07_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4);
if (HUFv07_isError(errorCode)) return errorCode; }
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) {
HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1);
HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2);
HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3);
HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4);
HUFv07_DECODE_SYMBOLX2_1(op1, &bitD1);
HUFv07_DECODE_SYMBOLX2_1(op2, &bitD2);
HUFv07_DECODE_SYMBOLX2_1(op3, &bitD3);
HUFv07_DECODE_SYMBOLX2_1(op4, &bitD4);
HUFv07_DECODE_SYMBOLX2_2(op1, &bitD1);
HUFv07_DECODE_SYMBOLX2_2(op2, &bitD2);
HUFv07_DECODE_SYMBOLX2_2(op3, &bitD3);
HUFv07_DECODE_SYMBOLX2_2(op4, &bitD4);
HUFv07_DECODE_SYMBOLX2_0(op1, &bitD1);
HUFv07_DECODE_SYMBOLX2_0(op2, &bitD2);
HUFv07_DECODE_SYMBOLX2_0(op3, &bitD3);
HUFv07_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUFv07_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
HUFv07_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
HUFv07_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
HUFv07_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
size_t HUFv07_decompress4X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUFv07_DTable* DTable)
{
DTableDesc dtd = HUFv07_getDTableDesc(DTable);
if (dtd.tableType != 0) return ERROR(GENERIC);
return HUFv07_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
}
size_t HUFv07_decompress4X2_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUFv07_readDTableX2 (dctx, cSrc, cSrcSize);
if (HUFv07_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUFv07_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx);
}
size_t HUFv07_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv07_CREATE_STATIC_DTABLEX2(DTable, HUFv07_TABLELOG_MAX);
return HUFv07_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
}
/* *************************/
/* double-symbols decoding */
/* *************************/
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUFv07_DEltX4; /* double-symbols decoding */
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
static void HUFv07_fillDTableX4Level2(HUFv07_DEltX4* DTable, U32 sizeLog, const U32 consumed,
const U32* rankValOrigin, const int minWeight,
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
U32 nbBitsBaseline, U16 baseSeq)
{
HUFv07_DEltX4 DElt;
U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];
/* get pre-calculated rankVal */
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill skipped values */
if (minWeight>1) {
U32 i, skipSize = rankVal[minWeight];
MEM_writeLE16(&(DElt.sequence), baseSeq);
DElt.nbBits = (BYTE)(consumed);
DElt.length = 1;
for (i = 0; i < skipSize; i++)
DTable[i] = DElt;
}
/* fill DTable */
{ U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
const U32 symbol = sortedSymbols[s].symbol;
const U32 weight = sortedSymbols[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 length = 1 << (sizeLog-nbBits);
const U32 start = rankVal[weight];
U32 i = start;
const U32 end = start + length;
MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
DElt.nbBits = (BYTE)(nbBits + consumed);
DElt.length = 2;
do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
rankVal[weight] += length;
}}
}
typedef U32 rankVal_t[HUFv07_TABLELOG_ABSOLUTEMAX][HUFv07_TABLELOG_ABSOLUTEMAX + 1];
static void HUFv07_fillDTableX4(HUFv07_DEltX4* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList, const U32 sortedListSize,
const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
const U32 nbBitsBaseline)
{
U32 rankVal[HUFv07_TABLELOG_ABSOLUTEMAX + 1];
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
const U32 minBits = nbBitsBaseline - maxWeight;
U32 s;
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill DTable */
for (s=0; s<sortedListSize; s++) {
const U16 symbol = sortedList[s].symbol;
const U32 weight = sortedList[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 start = rankVal[weight];
const U32 length = 1 << (targetLog-nbBits);
if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
U32 sortedRank;
int minWeight = nbBits + scaleLog;
if (minWeight < 1) minWeight = 1;
sortedRank = rankStart[minWeight];
HUFv07_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
rankValOrigin[nbBits], minWeight,
sortedList+sortedRank, sortedListSize-sortedRank,
nbBitsBaseline, symbol);
} else {
HUFv07_DEltX4 DElt;
MEM_writeLE16(&(DElt.sequence), symbol);
DElt.nbBits = (BYTE)(nbBits);
DElt.length = 1;
{ U32 u;
const U32 end = start + length;
for (u = start; u < end; u++) DTable[u] = DElt;
} }
rankVal[weight] += length;
}
}
size_t HUFv07_readDTableX4 (HUFv07_DTable* DTable, const void* src, size_t srcSize)
{
BYTE weightList[HUFv07_SYMBOLVALUE_MAX + 1];
sortedSymbol_t sortedSymbol[HUFv07_SYMBOLVALUE_MAX + 1];
U32 rankStats[HUFv07_TABLELOG_ABSOLUTEMAX + 1] = { 0 };
U32 rankStart0[HUFv07_TABLELOG_ABSOLUTEMAX + 2] = { 0 };
U32* const rankStart = rankStart0+1;
rankVal_t rankVal;
U32 tableLog, maxW, sizeOfSort, nbSymbols;
DTableDesc dtd = HUFv07_getDTableDesc(DTable);
U32 const maxTableLog = dtd.maxTableLog;
size_t iSize;
void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
HUFv07_DEltX4* const dt = (HUFv07_DEltX4*)dtPtr;
HUFv07_STATIC_ASSERT(sizeof(HUFv07_DEltX4) == sizeof(HUFv07_DTable)); /* if compilation fails here, assertion is false */
if (maxTableLog > HUFv07_TABLELOG_ABSOLUTEMAX) return ERROR(tableLog_tooLarge);
/* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUFv07_readStats(weightList, HUFv07_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
if (HUFv07_isError(iSize)) return iSize;
/* check result */
if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
/* find maxWeight */
for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
/* Get start index of each weight */
{ U32 w, nextRankStart = 0;
for (w=1; w<maxW+1; w++) {
U32 current = nextRankStart;
nextRankStart += rankStats[w];
rankStart[w] = current;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
sizeOfSort = nextRankStart;
}
/* sort symbols by weight */
{ U32 s;
for (s=0; s<nbSymbols; s++) {
U32 const w = weightList[s];
U32 const r = rankStart[w]++;
sortedSymbol[r].symbol = (BYTE)s;
sortedSymbol[r].weight = (BYTE)w;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
/* Build rankVal */
{ U32* const rankVal0 = rankVal[0];
{ int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
U32 nextRankVal = 0;
U32 w;
for (w=1; w<maxW+1; w++) {
U32 current = nextRankVal;
nextRankVal += rankStats[w] << (w+rescale);
rankVal0[w] = current;
} }
{ U32 const minBits = tableLog+1 - maxW;
U32 consumed;
for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
U32* const rankValPtr = rankVal[consumed];
U32 w;
for (w = 1; w < maxW+1; w++) {
rankValPtr[w] = rankVal0[w] >> consumed;
} } } }
HUFv07_fillDTableX4(dt, maxTableLog,
sortedSymbol, sizeOfSort,
rankStart0, rankVal, maxW,
tableLog+1);
dtd.tableLog = (BYTE)maxTableLog;
dtd.tableType = 1;
memcpy(DTable, &dtd, sizeof(dtd));
return iSize;
}
static U32 HUFv07_decodeSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog)
{
const size_t val = BITv07_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 2);
BITv07_skipBits(DStream, dt[val].nbBits);
return dt[val].length;
}
static U32 HUFv07_decodeLastSymbolX4(void* op, BITv07_DStream_t* DStream, const HUFv07_DEltX4* dt, const U32 dtLog)
{
const size_t val = BITv07_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 1);
if (dt[val].length==1) BITv07_skipBits(DStream, dt[val].nbBits);
else {
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
BITv07_skipBits(DStream, dt[val].nbBits);
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
} }
return 1;
}
#define HUFv07_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUFv07_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUFv07_TABLELOG_MAX<=12)) \
ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUFv07_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
ptr += HUFv07_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
static inline size_t HUFv07_decodeStreamX4(BYTE* p, BITv07_DStream_t* bitDPtr, BYTE* const pEnd, const HUFv07_DEltX4* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 8 symbols at a time */
while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p < pEnd-7)) {
HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr);
HUFv07_DECODE_SYMBOLX4_1(p, bitDPtr);
HUFv07_DECODE_SYMBOLX4_2(p, bitDPtr);
HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);
}
/* closer to end : up to 2 symbols at a time */
while ((BITv07_reloadDStream(bitDPtr) == BITv07_DStream_unfinished) && (p <= pEnd-2))
HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr);
while (p <= pEnd-2)
HUFv07_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
if (p < pEnd)
p += HUFv07_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
return p-pStart;
}
static size_t HUFv07_decompress1X4_usingDTable_internal(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUFv07_DTable* DTable)
{
BITv07_DStream_t bitD;
/* Init */
{ size_t const errorCode = BITv07_initDStream(&bitD, cSrc, cSrcSize);
if (HUFv07_isError(errorCode)) return errorCode;
}
/* decode */
{ BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr;
DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
HUFv07_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
}
/* check */
if (!BITv07_endOfDStream(&bitD)) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
size_t HUFv07_decompress1X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUFv07_DTable* DTable)
{
DTableDesc dtd = HUFv07_getDTableDesc(DTable);
if (dtd.tableType != 1) return ERROR(GENERIC);
return HUFv07_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
}
size_t HUFv07_decompress1X4_DCtx (HUFv07_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUFv07_readDTableX4 (DCtx, cSrc, cSrcSize);
if (HUFv07_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUFv07_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
}
size_t HUFv07_decompress1X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX);
return HUFv07_decompress1X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
}
static size_t HUFv07_decompress4X4_usingDTable_internal(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUFv07_DTable* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable+1;
const HUFv07_DEltX4* const dt = (const HUFv07_DEltX4*)dtPtr;
/* Init */
BITv07_DStream_t bitD1;
BITv07_DStream_t bitD2;
BITv07_DStream_t bitD3;
BITv07_DStream_t bitD4;
size_t const length1 = MEM_readLE16(istart);
size_t const length2 = MEM_readLE16(istart+2);
size_t const length3 = MEM_readLE16(istart+4);
size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
size_t const segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
{ size_t const errorCode = BITv07_initDStream(&bitD1, istart1, length1);
if (HUFv07_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BITv07_initDStream(&bitD2, istart2, length2);
if (HUFv07_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BITv07_initDStream(&bitD3, istart3, length3);
if (HUFv07_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BITv07_initDStream(&bitD4, istart4, length4);
if (HUFv07_isError(errorCode)) return errorCode; }
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
for ( ; (endSignal==BITv07_DStream_unfinished) && (op4<(oend-7)) ; ) {
HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1);
HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2);
HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3);
HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4);
HUFv07_DECODE_SYMBOLX4_1(op1, &bitD1);
HUFv07_DECODE_SYMBOLX4_1(op2, &bitD2);
HUFv07_DECODE_SYMBOLX4_1(op3, &bitD3);
HUFv07_DECODE_SYMBOLX4_1(op4, &bitD4);
HUFv07_DECODE_SYMBOLX4_2(op1, &bitD1);
HUFv07_DECODE_SYMBOLX4_2(op2, &bitD2);
HUFv07_DECODE_SYMBOLX4_2(op3, &bitD3);
HUFv07_DECODE_SYMBOLX4_2(op4, &bitD4);
HUFv07_DECODE_SYMBOLX4_0(op1, &bitD1);
HUFv07_DECODE_SYMBOLX4_0(op2, &bitD2);
HUFv07_DECODE_SYMBOLX4_0(op3, &bitD3);
HUFv07_DECODE_SYMBOLX4_0(op4, &bitD4);
endSignal = BITv07_reloadDStream(&bitD1) | BITv07_reloadDStream(&bitD2) | BITv07_reloadDStream(&bitD3) | BITv07_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUFv07_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
HUFv07_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
HUFv07_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
HUFv07_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
/* check */
{ U32 const endCheck = BITv07_endOfDStream(&bitD1) & BITv07_endOfDStream(&bitD2) & BITv07_endOfDStream(&bitD3) & BITv07_endOfDStream(&bitD4);
if (!endCheck) return ERROR(corruption_detected); }
/* decoded size */
return dstSize;
}
}
size_t HUFv07_decompress4X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUFv07_DTable* DTable)
{
DTableDesc dtd = HUFv07_getDTableDesc(DTable);
if (dtd.tableType != 1) return ERROR(GENERIC);
return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
}
size_t HUFv07_decompress4X4_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUFv07_readDTableX4 (dctx, cSrc, cSrcSize);
if (HUFv07_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUFv07_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
}
size_t HUFv07_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUFv07_CREATE_STATIC_DTABLEX4(DTable, HUFv07_TABLELOG_MAX);
return HUFv07_decompress4X4_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
}
/* ********************************/
/* Generic decompression selector */
/* ********************************/
size_t HUFv07_decompress1X_usingDTable(void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const HUFv07_DTable* DTable)
{
DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
return dtd.tableType ? HUFv07_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
HUFv07_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
}
size_t HUFv07_decompress4X_usingDTable(void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const HUFv07_DTable* DTable)
{
DTableDesc const dtd = HUFv07_getDTableDesc(DTable);
return dtd.tableType ? HUFv07_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
HUFv07_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
}
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
{
/* single, double, quad */
{{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
{{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
{{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
{{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
{{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
{{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
{{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
{{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
{{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
{{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
{{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
{{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
{{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
{{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
{{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
{{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
};
/** HUFv07_selectDecoder() :
* Tells which decoder is likely to decode faster,
* based on a set of pre-determined metrics.
* @return : 0==HUFv07_decompress4X2, 1==HUFv07_decompress4X4 .
* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
U32 HUFv07_selectDecoder (size_t dstSize, size_t cSrcSize)
{
/* decoder timing evaluation */
U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
U32 const D256 = (U32)(dstSize >> 8);
U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
return DTime1 < DTime0;
}
typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
size_t HUFv07_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
static const decompressionAlgo decompress[2] = { HUFv07_decompress4X2, HUFv07_decompress4X4 };
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
{ U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
}
/* return HUFv07_decompress4X2(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams single-symbol decoding */
/* return HUFv07_decompress4X4(dst, dstSize, cSrc, cSrcSize); */ /* multi-streams double-symbols decoding */
}
size_t HUFv07_decompress4X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
{ U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
}
}
size_t HUFv07_decompress4X_hufOnly (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */
{ U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
return algoNb ? HUFv07_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
HUFv07_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
}
}
size_t HUFv07_decompress1X_DCtx (HUFv07_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
{ U32 const algoNb = HUFv07_selectDecoder(dstSize, cSrcSize);
return algoNb ? HUFv07_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
HUFv07_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
}
}
/*
Common functions of Zstd compression library
Copyright (C) 2015-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd homepage : http://www.zstd.net/
*/
/*-****************************************
* ZSTD Error Management
******************************************/
/*! ZSTDv07_isError() :
* tells if a return value is an error code */
unsigned ZSTDv07_isError(size_t code) { return ERR_isError(code); }
/*! ZSTDv07_getErrorName() :
* provides error code string from function result (useful for debugging) */
const char* ZSTDv07_getErrorName(size_t code) { return ERR_getErrorName(code); }
/* **************************************************************
* ZBUFF Error Management
****************************************************************/
unsigned ZBUFFv07_isError(size_t errorCode) { return ERR_isError(errorCode); }
const char* ZBUFFv07_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
static void* ZSTDv07_defaultAllocFunction(void* opaque, size_t size)
{
void* address = malloc(size);
(void)opaque;
/* printf("alloc %p, %d opaque=%p \n", address, (int)size, opaque); */
return address;
}
static void ZSTDv07_defaultFreeFunction(void* opaque, void* address)
{
(void)opaque;
/* if (address) printf("free %p opaque=%p \n", address, opaque); */
free(address);
}
/*
zstd_internal - common functions to include
Header File for include
Copyright (C) 2014-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd homepage : https://www.zstd.net
*/
#ifndef ZSTDv07_CCOMMON_H_MODULE
#define ZSTDv07_CCOMMON_H_MODULE
/*-*************************************
* Common macros
***************************************/
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define MAX(a,b) ((a)>(b) ? (a) : (b))
/*-*************************************
* Common constants
***************************************/
#define ZSTDv07_OPT_NUM (1<<12)
#define ZSTDv07_DICT_MAGIC 0xEC30A437 /* v0.7 */
#define ZSTDv07_REP_NUM 3
#define ZSTDv07_REP_INIT ZSTDv07_REP_NUM
#define ZSTDv07_REP_MOVE (ZSTDv07_REP_NUM-1)
static const U32 repStartValue[ZSTDv07_REP_NUM] = { 1, 4, 8 };
#define KB *(1 <<10)
#define MB *(1 <<20)
#define GB *(1U<<30)
#define BIT7 128
#define BIT6 64
#define BIT5 32
#define BIT4 16
#define BIT1 2
#define BIT0 1
#define ZSTDv07_WINDOWLOG_ABSOLUTEMIN 10
static const size_t ZSTDv07_fcs_fieldSize[4] = { 0, 2, 4, 8 };
static const size_t ZSTDv07_did_fieldSize[4] = { 0, 1, 2, 4 };
#define ZSTDv07_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
static const size_t ZSTDv07_blockHeaderSize = ZSTDv07_BLOCKHEADERSIZE;
typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
#define HufLog 12
typedef enum { lbt_huffman, lbt_repeat, lbt_raw, lbt_rle } litBlockType_t;
#define LONGNBSEQ 0x7F00
#define MINMATCH 3
#define EQUAL_READ32 4
#define Litbits 8
#define MaxLit ((1<<Litbits) - 1)
#define MaxML 52
#define MaxLL 35
#define MaxOff 28
#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
#define MLFSELog 9
#define LLFSELog 9
#define OffFSELog 8
#define FSEv07_ENCODING_RAW 0
#define FSEv07_ENCODING_RLE 1
#define FSEv07_ENCODING_STATIC 2
#define FSEv07_ENCODING_DYNAMIC 3
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
13,14,15,16 };
static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
-1,-1,-1,-1 };
static const U32 LL_defaultNormLog = 6;
static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,
12,13,14,15,16 };
static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
-1,-1,-1,-1,-1 };
static const U32 ML_defaultNormLog = 6;
static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
static const U32 OF_defaultNormLog = 5;
/*-*******************************************
* Shared functions to include for inlining
*********************************************/
static void ZSTDv07_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
#define COPY8(d,s) { ZSTDv07_copy8(d,s); d+=8; s+=8; }
/*! ZSTDv07_wildcopy() :
* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
#define WILDCOPY_OVERLENGTH 8
MEM_STATIC void ZSTDv07_wildcopy(void* dst, const void* src, ptrdiff_t length)
{
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
do
COPY8(op, ip)
while (op < oend);
}
/*-*******************************************
* Private interfaces
*********************************************/
typedef struct ZSTDv07_stats_s ZSTDv07_stats_t;
typedef struct {
U32 off;
U32 len;
} ZSTDv07_match_t;
typedef struct {
U32 price;
U32 off;
U32 mlen;
U32 litlen;
U32 rep[ZSTDv07_REP_INIT];
} ZSTDv07_optimal_t;
struct ZSTDv07_stats_s { U32 unused; };
typedef struct {
void* buffer;
U32* offsetStart;
U32* offset;
BYTE* offCodeStart;
BYTE* litStart;
BYTE* lit;
U16* litLengthStart;
U16* litLength;
BYTE* llCodeStart;
U16* matchLengthStart;
U16* matchLength;
BYTE* mlCodeStart;
U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
U32 longLengthPos;
/* opt */
ZSTDv07_optimal_t* priceTable;
ZSTDv07_match_t* matchTable;
U32* matchLengthFreq;
U32* litLengthFreq;
U32* litFreq;
U32* offCodeFreq;
U32 matchLengthSum;
U32 matchSum;
U32 litLengthSum;
U32 litSum;
U32 offCodeSum;
U32 log2matchLengthSum;
U32 log2matchSum;
U32 log2litLengthSum;
U32 log2litSum;
U32 log2offCodeSum;
U32 factor;
U32 cachedPrice;
U32 cachedLitLength;
const BYTE* cachedLiterals;
ZSTDv07_stats_t stats;
} seqStore_t;
void ZSTDv07_seqToCodes(const seqStore_t* seqStorePtr, size_t const nbSeq);
/* custom memory allocation functions */
static const ZSTDv07_customMem defaultCustomMem = { ZSTDv07_defaultAllocFunction, ZSTDv07_defaultFreeFunction, NULL };
#endif /* ZSTDv07_CCOMMON_H_MODULE */
/*
zstd - standard compression library
Copyright (C) 2014-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd homepage : http://www.zstd.net
*/
/* ***************************************************************
* Tuning parameters
*****************************************************************/
/*!
* HEAPMODE :
* Select how default decompression function ZSTDv07_decompress() will allocate memory,
* in memory stack (0), or in memory heap (1, requires malloc())
*/
#ifndef ZSTDv07_HEAPMODE
# define ZSTDv07_HEAPMODE 1
#endif
/*-*******************************************************
* Compiler specifics
*********************************************************/
#ifdef _MSC_VER /* Visual Studio */
# include <intrin.h> /* For Visual 2005 */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
# pragma warning(disable : 4324) /* disable: C4324: padded structure */
# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
#endif
/*-*************************************
* Macros
***************************************/
#define ZSTDv07_isError ERR_isError /* for inlining */
#define FSEv07_isError ERR_isError
#define HUFv07_isError ERR_isError
/*_*******************************************************
* Memory operations
**********************************************************/
static void ZSTDv07_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
/*-*************************************************************
* Context management
***************************************************************/
typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTDv07_dStage;
struct ZSTDv07_DCtx_s
{
FSEv07_DTable LLTable[FSEv07_DTABLE_SIZE_U32(LLFSELog)];
FSEv07_DTable OffTable[FSEv07_DTABLE_SIZE_U32(OffFSELog)];
FSEv07_DTable MLTable[FSEv07_DTABLE_SIZE_U32(MLFSELog)];
HUFv07_DTable hufTable[HUFv07_DTABLE_SIZE(HufLog)]; /* can accommodate HUFv07_decompress4X */
const void* previousDstEnd;
const void* base;
const void* vBase;
const void* dictEnd;
size_t expected;
U32 rep[3];
ZSTDv07_frameParams fParams;
blockType_t bType; /* used in ZSTDv07_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
ZSTDv07_dStage stage;
U32 litEntropy;
U32 fseEntropy;
XXH64_state_t xxhState;
size_t headerSize;
U32 dictID;
const BYTE* litPtr;
ZSTDv07_customMem customMem;
size_t litSize;
BYTE litBuffer[ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH];
BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX];
}; /* typedef'd to ZSTDv07_DCtx within "zstd_static.h" */
int ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx);
size_t ZSTDv07_sizeofDCtx (const ZSTDv07_DCtx* dctx) { return sizeof(*dctx); }
size_t ZSTDv07_estimateDCtxSize(void) { return sizeof(ZSTDv07_DCtx); }
size_t ZSTDv07_decompressBegin(ZSTDv07_DCtx* dctx)
{
dctx->expected = ZSTDv07_frameHeaderSize_min;
dctx->stage = ZSTDds_getFrameHeaderSize;
dctx->previousDstEnd = NULL;
dctx->base = NULL;
dctx->vBase = NULL;
dctx->dictEnd = NULL;
dctx->hufTable[0] = (HUFv07_DTable)((HufLog)*0x1000001);
dctx->litEntropy = dctx->fseEntropy = 0;
dctx->dictID = 0;
{ int i; for (i=0; i<ZSTDv07_REP_NUM; i++) dctx->rep[i] = repStartValue[i]; }
return 0;
}
ZSTDv07_DCtx* ZSTDv07_createDCtx_advanced(ZSTDv07_customMem customMem)
{
ZSTDv07_DCtx* dctx;
if (!customMem.customAlloc && !customMem.customFree)
customMem = defaultCustomMem;
if (!customMem.customAlloc || !customMem.customFree)
return NULL;
dctx = (ZSTDv07_DCtx*) customMem.customAlloc(customMem.opaque, sizeof(ZSTDv07_DCtx));
if (!dctx) return NULL;
memcpy(&dctx->customMem, &customMem, sizeof(ZSTDv07_customMem));
ZSTDv07_decompressBegin(dctx);
return dctx;
}
ZSTDv07_DCtx* ZSTDv07_createDCtx(void)
{
return ZSTDv07_createDCtx_advanced(defaultCustomMem);
}
size_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx)
{
if (dctx==NULL) return 0; /* support free on NULL */
dctx->customMem.customFree(dctx->customMem.opaque, dctx);
return 0; /* reserved as a potential error code in the future */
}
void ZSTDv07_copyDCtx(ZSTDv07_DCtx* dstDCtx, const ZSTDv07_DCtx* srcDCtx)
{
memcpy(dstDCtx, srcDCtx,
sizeof(ZSTDv07_DCtx) - (ZSTDv07_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH + ZSTDv07_frameHeaderSize_max)); /* no need to copy workspace */
}
/*-*************************************************************
* Decompression section
***************************************************************/
/* Frame format description
Frame Header - [ Block Header - Block ] - Frame End
1) Frame Header
- 4 bytes - Magic Number : ZSTDv07_MAGICNUMBER (defined within zstd.h)
- 1 byte - Frame Descriptor
2) Block Header
- 3 bytes, starting with a 2-bits descriptor
Uncompressed, Compressed, Frame End, unused
3) Block
See Block Format Description
4) Frame End
- 3 bytes, compatible with Block Header
*/
/* Frame Header :
1 byte - FrameHeaderDescription :
bit 0-1 : dictID (0, 1, 2 or 4 bytes)
bit 2 : checksumFlag
bit 3 : reserved (must be zero)
bit 4 : reserved (unused, can be any value)
bit 5 : Single Segment (if 1, WindowLog byte is not present)
bit 6-7 : FrameContentFieldSize (0, 2, 4, or 8)
if (SkippedWindowLog && !FrameContentFieldsize) FrameContentFieldsize=1;
Optional : WindowLog (0 or 1 byte)
bit 0-2 : octal Fractional (1/8th)
bit 3-7 : Power of 2, with 0 = 1 KB (up to 2 TB)
Optional : dictID (0, 1, 2 or 4 bytes)
Automatic adaptation
0 : no dictID
1 : 1 - 255
2 : 256 - 65535
4 : all other values
Optional : content size (0, 1, 2, 4 or 8 bytes)
0 : unknown (fcfs==0 and swl==0)
1 : 0-255 bytes (fcfs==0 and swl==1)
2 : 256 - 65535+256 (fcfs==1)
4 : 0 - 4GB-1 (fcfs==2)
8 : 0 - 16EB-1 (fcfs==3)
*/
/* Compressed Block, format description
Block = Literal Section - Sequences Section
Prerequisite : size of (compressed) block, maximum size of regenerated data
1) Literal Section
1.1) Header : 1-5 bytes
flags: 2 bits
00 compressed by Huff0
01 unused
10 is Raw (uncompressed)
11 is Rle
Note : using 01 => Huff0 with precomputed table ?
Note : delta map ? => compressed ?
1.1.1) Huff0-compressed literal block : 3-5 bytes
srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
srcSize < 1 KB => 3 bytes (2-2-10-10)
srcSize < 16KB => 4 bytes (2-2-14-14)
else => 5 bytes (2-2-18-18)
big endian convention
1.1.2) Raw (uncompressed) literal block header : 1-3 bytes
size : 5 bits: (IS_RAW<<6) + (0<<4) + size
12 bits: (IS_RAW<<6) + (2<<4) + (size>>8)
size&255
20 bits: (IS_RAW<<6) + (3<<4) + (size>>16)
size>>8&255
size&255
1.1.3) Rle (repeated single byte) literal block header : 1-3 bytes
size : 5 bits: (IS_RLE<<6) + (0<<4) + size
12 bits: (IS_RLE<<6) + (2<<4) + (size>>8)
size&255
20 bits: (IS_RLE<<6) + (3<<4) + (size>>16)
size>>8&255
size&255
1.1.4) Huff0-compressed literal block, using precomputed CTables : 3-5 bytes
srcSize < 1 KB => 3 bytes (2-2-10-10) => single stream
srcSize < 1 KB => 3 bytes (2-2-10-10)
srcSize < 16KB => 4 bytes (2-2-14-14)
else => 5 bytes (2-2-18-18)
big endian convention
1- CTable available (stored into workspace ?)
2- Small input (fast heuristic ? Full comparison ? depend on clevel ?)
1.2) Literal block content
1.2.1) Huff0 block, using sizes from header
See Huff0 format
1.2.2) Huff0 block, using prepared table
1.2.3) Raw content
1.2.4) single byte
2) Sequences section
TO DO
*/
/** ZSTDv07_frameHeaderSize() :
* srcSize must be >= ZSTDv07_frameHeaderSize_min.
* @return : size of the Frame Header */
static size_t ZSTDv07_frameHeaderSize(const void* src, size_t srcSize)
{
if (srcSize < ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong);
{ BYTE const fhd = ((const BYTE*)src)[4];
U32 const dictID= fhd & 3;
U32 const directMode = (fhd >> 5) & 1;
U32 const fcsId = fhd >> 6;
return ZSTDv07_frameHeaderSize_min + !directMode + ZSTDv07_did_fieldSize[dictID] + ZSTDv07_fcs_fieldSize[fcsId]
+ (directMode && !ZSTDv07_fcs_fieldSize[fcsId]);
}
}
/** ZSTDv07_getFrameParams() :
* decode Frame Header, or require larger `srcSize`.
* @return : 0, `fparamsPtr` is correctly filled,
* >0, `srcSize` is too small, result is expected `srcSize`,
* or an error code, which can be tested using ZSTDv07_isError() */
size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
if (srcSize < ZSTDv07_frameHeaderSize_min) return ZSTDv07_frameHeaderSize_min;
memset(fparamsPtr, 0, sizeof(*fparamsPtr));
if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) {
if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) {
if (srcSize < ZSTDv07_skippableHeaderSize) return ZSTDv07_skippableHeaderSize; /* magic number + skippable frame length */
fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4);
fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
return 0;
}
return ERROR(prefix_unknown);
}
/* ensure there is enough `srcSize` to fully read/decode frame header */
{ size_t const fhsize = ZSTDv07_frameHeaderSize(src, srcSize);
if (srcSize < fhsize) return fhsize; }
{ BYTE const fhdByte = ip[4];
size_t pos = 5;
U32 const dictIDSizeCode = fhdByte&3;
U32 const checksumFlag = (fhdByte>>2)&1;
U32 const directMode = (fhdByte>>5)&1;
U32 const fcsID = fhdByte>>6;
U32 const windowSizeMax = 1U << ZSTDv07_WINDOWLOG_MAX;
U32 windowSize = 0;
U32 dictID = 0;
U64 frameContentSize = 0;
if ((fhdByte & 0x08) != 0) /* reserved bits, which must be zero */
return ERROR(frameParameter_unsupported);
if (!directMode) {
BYTE const wlByte = ip[pos++];
U32 const windowLog = (wlByte >> 3) + ZSTDv07_WINDOWLOG_ABSOLUTEMIN;
if (windowLog > ZSTDv07_WINDOWLOG_MAX)
return ERROR(frameParameter_unsupported);
windowSize = (1U << windowLog);
windowSize += (windowSize >> 3) * (wlByte&7);
}
switch(dictIDSizeCode)
{
default: /* impossible */
case 0 : break;
case 1 : dictID = ip[pos]; pos++; break;
case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
}
switch(fcsID)
{
default: /* impossible */
case 0 : if (directMode) frameContentSize = ip[pos]; break;
case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
}
if (!windowSize) windowSize = (U32)frameContentSize;
if (windowSize > windowSizeMax)
return ERROR(frameParameter_unsupported);
fparamsPtr->frameContentSize = frameContentSize;
fparamsPtr->windowSize = windowSize;
fparamsPtr->dictID = dictID;
fparamsPtr->checksumFlag = checksumFlag;
}
return 0;
}
/** ZSTDv07_getDecompressedSize() :
* compatible with legacy mode
* @return : decompressed size if known, 0 otherwise
note : 0 can mean any of the following :
- decompressed size is not provided within frame header
- frame header unknown / not supported
- frame header not completely provided (`srcSize` too small) */
unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize)
{
ZSTDv07_frameParams fparams;
size_t const frResult = ZSTDv07_getFrameParams(&fparams, src, srcSize);
if (frResult!=0) return 0;
return fparams.frameContentSize;
}
/** ZSTDv07_decodeFrameHeader() :
* `srcSize` must be the size provided by ZSTDv07_frameHeaderSize().
* @return : 0 if success, or an error code, which can be tested using ZSTDv07_isError() */
static size_t ZSTDv07_decodeFrameHeader(ZSTDv07_DCtx* dctx, const void* src, size_t srcSize)
{
size_t const result = ZSTDv07_getFrameParams(&(dctx->fParams), src, srcSize);
if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong);
if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
return result;
}
typedef struct
{
blockType_t blockType;
U32 origSize;
} blockProperties_t;
/*! ZSTDv07_getcBlockSize() :
* Provides the size of compressed block from block header `src` */
static size_t ZSTDv07_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
{
U32 cSize;
if (srcSize < ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
bpPtr->blockType = (blockType_t)((*in) >> 6);
cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
if (bpPtr->blockType == bt_end) return 0;
if (bpPtr->blockType == bt_rle) return 1;
return cSize;
}
static size_t ZSTDv07_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
if (srcSize > 0) {
memcpy(dst, src, srcSize);
}
return srcSize;
}
/*! ZSTDv07_decodeLiteralsBlock() :
@return : nb of bytes read from src (< srcSize ) */
static size_t ZSTDv07_decodeLiteralsBlock(ZSTDv07_DCtx* dctx,
const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
{
const BYTE* const istart = (const BYTE*) src;
if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
switch((litBlockType_t)(istart[0]>> 6))
{
case lbt_huffman:
{ size_t litSize, litCSize, singleStream=0;
U32 lhSize = (istart[0] >> 4) & 3;
if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for lhSize, + cSize (+nbSeq) */
switch(lhSize)
{
case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
/* 2 - 2 - 10 - 10 */
lhSize=3;
singleStream = istart[0] & 16;
litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
litCSize = ((istart[1] & 3) << 8) + istart[2];
break;
case 2:
/* 2 - 2 - 14 - 14 */
lhSize=4;
litSize = ((istart[0] & 15) << 10) + (istart[1] << 2) + (istart[2] >> 6);
litCSize = ((istart[2] & 63) << 8) + istart[3];
break;
case 3:
/* 2 - 2 - 18 - 18 */
lhSize=5;
litSize = ((istart[0] & 15) << 14) + (istart[1] << 6) + (istart[2] >> 2);
litCSize = ((istart[2] & 3) << 16) + (istart[3] << 8) + istart[4];
break;
}
if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
if (HUFv07_isError(singleStream ?
HUFv07_decompress1X2_DCtx(dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) :
HUFv07_decompress4X_hufOnly (dctx->hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) ))
return ERROR(corruption_detected);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
dctx->litEntropy = 1;
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return litCSize + lhSize;
}
case lbt_repeat:
{ size_t litSize, litCSize;
U32 lhSize = ((istart[0]) >> 4) & 3;
if (lhSize != 1) /* only case supported for now : small litSize, single stream */
return ERROR(corruption_detected);
if (dctx->litEntropy==0)
return ERROR(dictionary_corrupted);
/* 2 - 2 - 10 - 10 */
lhSize=3;
litSize = ((istart[0] & 15) << 6) + (istart[1] >> 2);
litCSize = ((istart[1] & 3) << 8) + istart[2];
if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
{ size_t const errorCode = HUFv07_decompress1X4_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->hufTable);
if (HUFv07_isError(errorCode)) return ERROR(corruption_detected);
}
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return litCSize + lhSize;
}
case lbt_raw:
{ size_t litSize;
U32 lhSize = ((istart[0]) >> 4) & 3;
switch(lhSize)
{
case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
lhSize=1;
litSize = istart[0] & 31;
break;
case 2:
litSize = ((istart[0] & 15) << 8) + istart[1];
break;
case 3:
litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
break;
}
if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
memcpy(dctx->litBuffer, istart+lhSize, litSize);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
return lhSize+litSize;
}
/* direct reference into compressed stream */
dctx->litPtr = istart+lhSize;
dctx->litSize = litSize;
return lhSize+litSize;
}
case lbt_rle:
{ size_t litSize;
U32 lhSize = ((istart[0]) >> 4) & 3;
switch(lhSize)
{
case 0: case 1: default: /* note : default is impossible, since lhSize into [0..3] */
lhSize = 1;
litSize = istart[0] & 31;
break;
case 2:
litSize = ((istart[0] & 15) << 8) + istart[1];
break;
case 3:
litSize = ((istart[0] & 15) << 16) + (istart[1] << 8) + istart[2];
if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
break;
}
if (litSize > ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected);
memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
dctx->litPtr = dctx->litBuffer;
dctx->litSize = litSize;
return lhSize+1;
}
default:
return ERROR(corruption_detected); /* impossible */
}
}
/*! ZSTDv07_buildSeqTable() :
@return : nb bytes read from src,
or an error code if it fails, testable with ZSTDv07_isError()
*/
static size_t ZSTDv07_buildSeqTable(FSEv07_DTable* DTable, U32 type, U32 max, U32 maxLog,
const void* src, size_t srcSize,
const S16* defaultNorm, U32 defaultLog, U32 flagRepeatTable)
{
switch(type)
{
case FSEv07_ENCODING_RLE :
if (!srcSize) return ERROR(srcSize_wrong);
if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
FSEv07_buildDTable_rle(DTable, *(const BYTE*)src); /* if *src > max, data is corrupted */
return 1;
case FSEv07_ENCODING_RAW :
FSEv07_buildDTable(DTable, defaultNorm, max, defaultLog);
return 0;
case FSEv07_ENCODING_STATIC:
if (!flagRepeatTable) return ERROR(corruption_detected);
return 0;
default : /* impossible */
case FSEv07_ENCODING_DYNAMIC :
{ U32 tableLog;
S16 norm[MaxSeq+1];
size_t const headerSize = FSEv07_readNCount(norm, &max, &tableLog, src, srcSize);
if (FSEv07_isError(headerSize)) return ERROR(corruption_detected);
if (tableLog > maxLog) return ERROR(corruption_detected);
FSEv07_buildDTable(DTable, norm, max, tableLog);
return headerSize;
} }
}
static size_t ZSTDv07_decodeSeqHeaders(int* nbSeqPtr,
FSEv07_DTable* DTableLL, FSEv07_DTable* DTableML, FSEv07_DTable* DTableOffb, U32 flagRepeatTable,
const void* src, size_t srcSize)
{
const BYTE* const iend = istart + srcSize;
const BYTE* ip = istart;
/* check */
if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
/* SeqHead */
{ int nbSeq = *ip++;
if (!nbSeq) { *nbSeqPtr=0; return 1; }
if (nbSeq > 0x7F) {
if (nbSeq == 0xFF) {
if (ip+2 > iend) return ERROR(srcSize_wrong);
nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
} else {
if (ip >= iend) return ERROR(srcSize_wrong);
nbSeq = ((nbSeq-0x80)<<8) + *ip++;
}
}
*nbSeqPtr = nbSeq;
}
/* FSE table descriptors */
if (ip + 4 > iend) return ERROR(srcSize_wrong); /* min : header byte + all 3 are "raw", hence no header, but at least xxLog bits per type */
{ U32 const LLtype = *ip >> 6;
U32 const OFtype = (*ip >> 4) & 3;
U32 const MLtype = (*ip >> 2) & 3;
ip++;
/* Build DTables */
{ size_t const llhSize = ZSTDv07_buildSeqTable(DTableLL, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultNorm, LL_defaultNormLog, flagRepeatTable);
if (ZSTDv07_isError(llhSize)) return ERROR(corruption_detected);
ip += llhSize;
}
{ size_t const ofhSize = ZSTDv07_buildSeqTable(DTableOffb, OFtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultNorm, OF_defaultNormLog, flagRepeatTable);
if (ZSTDv07_isError(ofhSize)) return ERROR(corruption_detected);
ip += ofhSize;
}
{ size_t const mlhSize = ZSTDv07_buildSeqTable(DTableML, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultNorm, ML_defaultNormLog, flagRepeatTable);
if (ZSTDv07_isError(mlhSize)) return ERROR(corruption_detected);
ip += mlhSize;
} }
return ip-istart;
}
typedef struct {
size_t litLength;
size_t matchLength;
size_t offset;
} seq_t;
typedef struct {
BITv07_DStream_t DStream;
FSEv07_DState_t stateLL;
FSEv07_DState_t stateOffb;
FSEv07_DState_t stateML;
size_t prevOffset[ZSTDv07_REP_INIT];
} seqState_t;
static seq_t ZSTDv07_decodeSequence(seqState_t* seqState)
{
seq_t seq;
U32 const llCode = FSEv07_peekSymbol(&(seqState->stateLL));
U32 const mlCode = FSEv07_peekSymbol(&(seqState->stateML));
U32 const ofCode = FSEv07_peekSymbol(&(seqState->stateOffb)); /* <= maxOff, by table construction */
U32 const llBits = LL_bits[llCode];
U32 const mlBits = ML_bits[mlCode];
U32 const ofBits = ofCode;
U32 const totalBits = llBits+mlBits+ofBits;
static const U32 LL_base[MaxLL+1] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
0x2000, 0x4000, 0x8000, 0x10000 };
static const U32 ML_base[MaxML+1] = {
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
static const U32 OF_base[MaxOff+1] = {
0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
/* sequence */
{ size_t offset;
if (!ofCode)
offset = 0;
else {
offset = OF_base[ofCode] + BITv07_readBits(&(seqState->DStream), ofBits); /* <= (ZSTDv07_WINDOWLOG_MAX-1) bits */
if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream));
}
if (ofCode <= 1) {
if ((llCode == 0) & (offset <= 1)) offset = 1-offset;
if (offset) {
size_t const temp = seqState->prevOffset[offset];
if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset = temp;
} else {
offset = seqState->prevOffset[0];
}
} else {
seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset;
}
seq.offset = offset;
}
seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BITv07_readBits(&(seqState->DStream), mlBits) : 0); /* <= 16 bits */
if (MEM_32bits() && (mlBits+llBits>24)) BITv07_reloadDStream(&(seqState->DStream));
seq.litLength = LL_base[llCode] + ((llCode>15) ? BITv07_readBits(&(seqState->DStream), llBits) : 0); /* <= 16 bits */
if (MEM_32bits() ||
(totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BITv07_reloadDStream(&(seqState->DStream));
/* ANS state update */
FSEv07_updateState(&(seqState->stateLL), &(seqState->DStream)); /* <= 9 bits */
FSEv07_updateState(&(seqState->stateML), &(seqState->DStream)); /* <= 9 bits */
if (MEM_32bits()) BITv07_reloadDStream(&(seqState->DStream)); /* <= 18 bits */
FSEv07_updateState(&(seqState->stateOffb), &(seqState->DStream)); /* <= 8 bits */
return seq;
}
static
size_t ZSTDv07_execSequence(BYTE* op,
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
BYTE* const oend_w = oend-WILDCOPY_OVERLENGTH;
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
/* check */
if ((oLitEnd>oend_w) | (oMatchEnd>oend)) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
/* copy Literals */
ZSTDv07_wildcopy(op, *litPtr, sequence.litLength); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
op = oLitEnd;
*litPtr = iLitEnd; /* update for next sequence */
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - base)) {
/* offset beyond prefix */
if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
match = dictEnd - (base-match);
if (match + sequence.matchLength <= dictEnd) {
memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = base;
if (op > oend_w || sequence.matchLength < MINMATCH) {
while (op < oMatchEnd) *op++ = *match++;
return sequenceLength;
}
} }
/* Requirement: op <= oend_w */
/* match within prefix */
if (sequence.offset < 8) {
/* close range match, overlap */
static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
int const sub2 = dec64table[sequence.offset];
op[0] = match[0];
op[1] = match[1];
op[2] = match[2];
op[3] = match[3];
match += dec32table[sequence.offset];
ZSTDv07_copy4(op+4, match);
match -= sub2;
} else {
ZSTDv07_copy8(op, match);
}
op += 8; match += 8;
if (oMatchEnd > oend-(16-MINMATCH)) {
if (op < oend_w) {
ZSTDv07_wildcopy(op, match, oend_w - op);
match += oend_w - op;
op = oend_w;
}
while (op < oMatchEnd) *op++ = *match++;
} else {
ZSTDv07_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
}
return sequenceLength;
}
static size_t ZSTDv07_decompressSequences(
ZSTDv07_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const oend = ostart + maxDstSize;
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize;
FSEv07_DTable* DTableLL = dctx->LLTable;
FSEv07_DTable* DTableML = dctx->MLTable;
FSEv07_DTable* DTableOffb = dctx->OffTable;
const BYTE* const base = (const BYTE*) (dctx->base);
const BYTE* const vBase = (const BYTE*) (dctx->vBase);
const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
int nbSeq;
/* Build Decoding Tables */
{ size_t const seqHSize = ZSTDv07_decodeSeqHeaders(&nbSeq, DTableLL, DTableML, DTableOffb, dctx->fseEntropy, ip, seqSize);
if (ZSTDv07_isError(seqHSize)) return seqHSize;
ip += seqHSize;
}
/* Regen sequences */
if (nbSeq) {
seqState_t seqState;
dctx->fseEntropy = 1;
{ U32 i; for (i=0; i<ZSTDv07_REP_INIT; i++) seqState.prevOffset[i] = dctx->rep[i]; }
{ size_t const errorCode = BITv07_initDStream(&(seqState.DStream), ip, iend-ip);
if (ERR_isError(errorCode)) return ERROR(corruption_detected); }
FSEv07_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
FSEv07_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
FSEv07_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
for ( ; (BITv07_reloadDStream(&(seqState.DStream)) <= BITv07_DStream_completed) && nbSeq ; ) {
nbSeq--;
{ seq_t const sequence = ZSTDv07_decodeSequence(&seqState);
size_t const oneSeqSize = ZSTDv07_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
if (ZSTDv07_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
} }
/* check if reached exact end */
if (nbSeq) return ERROR(corruption_detected);
/* save reps for next block */
{ U32 i; for (i=0; i<ZSTDv07_REP_INIT; i++) dctx->rep[i] = (U32)(seqState.prevOffset[i]); }
}
/* last literal segment */
{ size_t const lastLLSize = litEnd - litPtr;
/* if (litPtr > litEnd) return ERROR(corruption_detected); */ /* too many literals already used */
if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
if (lastLLSize > 0) {
memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
return op-ostart;
}
static void ZSTDv07_checkContinuity(ZSTDv07_DCtx* dctx, const void* dst)
{
if (dst != dctx->previousDstEnd) { /* not contiguous */
dctx->dictEnd = dctx->previousDstEnd;
dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
dctx->base = dst;
dctx->previousDstEnd = dst;
}
}
static size_t ZSTDv07_decompressBlock_internal(ZSTDv07_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{ /* blockType == blockCompressed */
const BYTE* ip = (const BYTE*)src;
if (srcSize >= ZSTDv07_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong);
/* Decode literals sub-block */
{ size_t const litCSize = ZSTDv07_decodeLiteralsBlock(dctx, src, srcSize);
if (ZSTDv07_isError(litCSize)) return litCSize;
ip += litCSize;
srcSize -= litCSize;
}
return ZSTDv07_decompressSequences(dctx, dst, dstCapacity, ip, srcSize);
}
size_t ZSTDv07_decompressBlock(ZSTDv07_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
size_t dSize;
ZSTDv07_checkContinuity(dctx, dst);
dSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
dctx->previousDstEnd = (char*)dst + dSize;
return dSize;
}
/** ZSTDv07_insertBlock() :
insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
ZSTDLIBv07_API size_t ZSTDv07_insertBlock(ZSTDv07_DCtx* dctx, const void* blockStart, size_t blockSize)
{
ZSTDv07_checkContinuity(dctx, blockStart);
dctx->previousDstEnd = (const char*)blockStart + blockSize;
return blockSize;
}
static size_t ZSTDv07_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
{
if (length > dstCapacity) return ERROR(dstSize_tooSmall);
if (length > 0) {
memset(dst, byte, length);
}
return length;
}
/*! ZSTDv07_decompressFrame() :
* `dctx` must be properly initialized */
static size_t ZSTDv07_decompressFrame(ZSTDv07_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const BYTE* const iend = ip + srcSize;
BYTE* const oend = ostart + dstCapacity;
BYTE* op = ostart;
size_t remainingSize = srcSize;
/* check */
if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
/* Frame Header */
{ size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
if (ZSTDv07_isError(frameHeaderSize)) return frameHeaderSize;
if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) return ERROR(srcSize_wrong);
if (ZSTDv07_decodeFrameHeader(dctx, src, frameHeaderSize)) return ERROR(corruption_detected);
ip += frameHeaderSize; remainingSize -= frameHeaderSize;
}
/* Loop on each block */
while (1) {
size_t decodedSize;
blockProperties_t blockProperties;
size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, iend-ip, &blockProperties);
if (ZSTDv07_isError(cBlockSize)) return cBlockSize;
ip += ZSTDv07_blockHeaderSize;
remainingSize -= ZSTDv07_blockHeaderSize;
if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
switch(blockProperties.blockType)
{
case bt_compressed:
decodedSize = ZSTDv07_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize);
break;
case bt_raw :
decodedSize = ZSTDv07_copyRawBlock(op, oend-op, ip, cBlockSize);
break;
case bt_rle :
decodedSize = ZSTDv07_generateNxBytes(op, oend-op, *ip, blockProperties.origSize);
break;
case bt_end :
/* end of frame */
if (remainingSize) return ERROR(srcSize_wrong);
decodedSize = 0;
break;
default:
return ERROR(GENERIC); /* impossible */
}
if (blockProperties.blockType == bt_end) break; /* bt_end */
if (ZSTDv07_isError(decodedSize)) return decodedSize;
if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize);
op += decodedSize;
ip += cBlockSize;
remainingSize -= cBlockSize;
}
return op-ostart;
}
/*! ZSTDv07_decompress_usingPreparedDCtx() :
* Same as ZSTDv07_decompress_usingDict, but using a reference context `preparedDCtx`, where dictionary has been loaded.
* It avoids reloading the dictionary each time.
* `preparedDCtx` must have been properly initialized using ZSTDv07_decompressBegin_usingDict().
* Requires 2 contexts : 1 for reference (preparedDCtx), which will not be modified, and 1 to run the decompression operation (dctx) */
static size_t ZSTDv07_decompress_usingPreparedDCtx(ZSTDv07_DCtx* dctx, const ZSTDv07_DCtx* refDCtx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize)
{
ZSTDv07_copyDCtx(dctx, refDCtx);
ZSTDv07_checkContinuity(dctx, dst);
return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
}
size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict, size_t dictSize)
{
ZSTDv07_decompressBegin_usingDict(dctx, dict, dictSize);
ZSTDv07_checkContinuity(dctx, dst);
return ZSTDv07_decompressFrame(dctx, dst, dstCapacity, src, srcSize);
}
size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
return ZSTDv07_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
}
size_t ZSTDv07_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
#if defined(ZSTDv07_HEAPMODE) && (ZSTDv07_HEAPMODE==1)
size_t regenSize;
ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx();
if (dctx==NULL) return ERROR(memory_allocation);
regenSize = ZSTDv07_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
ZSTDv07_freeDCtx(dctx);
return regenSize;
#else /* stack mode */
ZSTDv07_DCtx dctx;
return ZSTDv07_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
#endif
}
/* ZSTD_errorFrameSizeInfoLegacy() :
assumes `cSize` and `dBound` are _not_ NULL */
static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
{
*cSize = ret;
*dBound = ZSTD_CONTENTSIZE_ERROR;
}
void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
{
const BYTE* ip = (const BYTE*)src;
size_t remainingSize = srcSize;
size_t nbBlocks = 0;
/* check */
if (srcSize < ZSTDv07_frameHeaderSize_min+ZSTDv07_blockHeaderSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
/* Frame Header */
{ size_t const frameHeaderSize = ZSTDv07_frameHeaderSize(src, srcSize);
if (ZSTDv07_isError(frameHeaderSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, frameHeaderSize);
return;
}
if (MEM_readLE32(src) != ZSTDv07_MAGICNUMBER) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
return;
}
if (srcSize < frameHeaderSize+ZSTDv07_blockHeaderSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
ip += frameHeaderSize; remainingSize -= frameHeaderSize;
}
/* Loop on each block */
while (1) {
blockProperties_t blockProperties;
size_t const cBlockSize = ZSTDv07_getcBlockSize(ip, remainingSize, &blockProperties);
if (ZSTDv07_isError(cBlockSize)) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
return;
}
ip += ZSTDv07_blockHeaderSize;
remainingSize -= ZSTDv07_blockHeaderSize;
if (blockProperties.blockType == bt_end) break;
if (cBlockSize > remainingSize) {
ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
return;
}
ip += cBlockSize;
remainingSize -= cBlockSize;
nbBlocks++;
}
*cSize = ip - (const BYTE*)src;
*dBound = nbBlocks * ZSTDv07_BLOCKSIZE_ABSOLUTEMAX;
}
/*_******************************
* Streaming Decompression API
********************************/
size_t ZSTDv07_nextSrcSizeToDecompress(ZSTDv07_DCtx* dctx)
{
return dctx->expected;
}
int ZSTDv07_isSkipFrame(ZSTDv07_DCtx* dctx)
{
return dctx->stage == ZSTDds_skipFrame;
}
/** ZSTDv07_decompressContinue() :
* @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
* or an error code, which can be tested using ZSTDv07_isError() */
size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
/* Sanity check */
if (srcSize != dctx->expected) return ERROR(srcSize_wrong);
if (dstCapacity) ZSTDv07_checkContinuity(dctx, dst);
switch (dctx->stage)
{
case ZSTDds_getFrameHeaderSize :
if (srcSize != ZSTDv07_frameHeaderSize_min) return ERROR(srcSize_wrong); /* impossible */
if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTDv07_MAGIC_SKIPPABLE_START) {
memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min);
dctx->expected = ZSTDv07_skippableHeaderSize - ZSTDv07_frameHeaderSize_min; /* magic number + skippable frame length */
dctx->stage = ZSTDds_decodeSkippableHeader;
return 0;
}
dctx->headerSize = ZSTDv07_frameHeaderSize(src, ZSTDv07_frameHeaderSize_min);
if (ZSTDv07_isError(dctx->headerSize)) return dctx->headerSize;
memcpy(dctx->headerBuffer, src, ZSTDv07_frameHeaderSize_min);
if (dctx->headerSize > ZSTDv07_frameHeaderSize_min) {
dctx->expected = dctx->headerSize - ZSTDv07_frameHeaderSize_min;
dctx->stage = ZSTDds_decodeFrameHeader;
return 0;
}
dctx->expected = 0; /* not necessary to copy more */
/* fall-through */
case ZSTDds_decodeFrameHeader:
{ size_t result;
memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);
result = ZSTDv07_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize);
if (ZSTDv07_isError(result)) return result;
dctx->expected = ZSTDv07_blockHeaderSize;
dctx->stage = ZSTDds_decodeBlockHeader;
return 0;
}
case ZSTDds_decodeBlockHeader:
{ blockProperties_t bp;
size_t const cBlockSize = ZSTDv07_getcBlockSize(src, ZSTDv07_blockHeaderSize, &bp);
if (ZSTDv07_isError(cBlockSize)) return cBlockSize;
if (bp.blockType == bt_end) {
if (dctx->fParams.checksumFlag) {
U64 const h64 = XXH64_digest(&dctx->xxhState);
U32 const h32 = (U32)(h64>>11) & ((1<<22)-1);
const BYTE* const ip = (const BYTE*)src;
U32 const check32 = ip[2] + (ip[1] << 8) + ((ip[0] & 0x3F) << 16);
if (check32 != h32) return ERROR(checksum_wrong);
}
dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
} else {
dctx->expected = cBlockSize;
dctx->bType = bp.blockType;
dctx->stage = ZSTDds_decompressBlock;
}
return 0;
}
case ZSTDds_decompressBlock:
{ size_t rSize;
switch(dctx->bType)
{
case bt_compressed:
rSize = ZSTDv07_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize);
break;
case bt_raw :
rSize = ZSTDv07_copyRawBlock(dst, dstCapacity, src, srcSize);
break;
case bt_rle :
return ERROR(GENERIC); /* not yet handled */
break;
case bt_end : /* should never happen (filtered at phase 1) */
rSize = 0;
break;
default:
return ERROR(GENERIC); /* impossible */
}
dctx->stage = ZSTDds_decodeBlockHeader;
dctx->expected = ZSTDv07_blockHeaderSize;
dctx->previousDstEnd = (char*)dst + rSize;
if (ZSTDv07_isError(rSize)) return rSize;
if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
return rSize;
}
case ZSTDds_decodeSkippableHeader:
{ memcpy(dctx->headerBuffer + ZSTDv07_frameHeaderSize_min, src, dctx->expected);
dctx->expected = MEM_readLE32(dctx->headerBuffer + 4);
dctx->stage = ZSTDds_skipFrame;
return 0;
}
case ZSTDds_skipFrame:
{ dctx->expected = 0;
dctx->stage = ZSTDds_getFrameHeaderSize;
return 0;
}
default:
return ERROR(GENERIC); /* impossible */
}
}
static size_t ZSTDv07_refDictContent(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
{
dctx->dictEnd = dctx->previousDstEnd;
dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
dctx->base = dict;
dctx->previousDstEnd = (const char*)dict + dictSize;
return 0;
}
static size_t ZSTDv07_loadEntropy(ZSTDv07_DCtx* dctx, const void* const dict, size_t const dictSize)
{
const BYTE* dictPtr = (const BYTE*)dict;
const BYTE* const dictEnd = dictPtr + dictSize;
{ size_t const hSize = HUFv07_readDTableX4(dctx->hufTable, dict, dictSize);
if (HUFv07_isError(hSize)) return ERROR(dictionary_corrupted);
dictPtr += hSize;
}
{ short offcodeNCount[MaxOff+1];
U32 offcodeMaxValue=MaxOff, offcodeLog;
size_t const offcodeHeaderSize = FSEv07_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
if (FSEv07_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
{ size_t const errorCode = FSEv07_buildDTable(dctx->OffTable, offcodeNCount, offcodeMaxValue, offcodeLog);
if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
dictPtr += offcodeHeaderSize;
}
{ short matchlengthNCount[MaxML+1];
unsigned matchlengthMaxValue = MaxML, matchlengthLog;
size_t const matchlengthHeaderSize = FSEv07_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
if (FSEv07_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
{ size_t const errorCode = FSEv07_buildDTable(dctx->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog);
if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
dictPtr += matchlengthHeaderSize;
}
{ short litlengthNCount[MaxLL+1];
unsigned litlengthMaxValue = MaxLL, litlengthLog;
size_t const litlengthHeaderSize = FSEv07_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
if (FSEv07_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
{ size_t const errorCode = FSEv07_buildDTable(dctx->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog);
if (FSEv07_isError(errorCode)) return ERROR(dictionary_corrupted); }
dictPtr += litlengthHeaderSize;
}
if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
dctx->rep[0] = MEM_readLE32(dictPtr+0); if (dctx->rep[0] == 0 || dctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
dctx->rep[1] = MEM_readLE32(dictPtr+4); if (dctx->rep[1] == 0 || dctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
dctx->rep[2] = MEM_readLE32(dictPtr+8); if (dctx->rep[2] == 0 || dctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
dictPtr += 12;
dctx->litEntropy = dctx->fseEntropy = 1;
return dictPtr - (const BYTE*)dict;
}
static size_t ZSTDv07_decompress_insertDictionary(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
{
if (dictSize < 8) return ZSTDv07_refDictContent(dctx, dict, dictSize);
{ U32 const magic = MEM_readLE32(dict);
if (magic != ZSTDv07_DICT_MAGIC) {
return ZSTDv07_refDictContent(dctx, dict, dictSize); /* pure content mode */
} }
dctx->dictID = MEM_readLE32((const char*)dict + 4);
/* load entropy tables */
dict = (const char*)dict + 8;
dictSize -= 8;
{ size_t const eSize = ZSTDv07_loadEntropy(dctx, dict, dictSize);
if (ZSTDv07_isError(eSize)) return ERROR(dictionary_corrupted);
dict = (const char*)dict + eSize;
dictSize -= eSize;
}
/* reference dictionary content */
return ZSTDv07_refDictContent(dctx, dict, dictSize);
}
size_t ZSTDv07_decompressBegin_usingDict(ZSTDv07_DCtx* dctx, const void* dict, size_t dictSize)
{
{ size_t const errorCode = ZSTDv07_decompressBegin(dctx);
if (ZSTDv07_isError(errorCode)) return errorCode; }
if (dict && dictSize) {
size_t const errorCode = ZSTDv07_decompress_insertDictionary(dctx, dict, dictSize);
if (ZSTDv07_isError(errorCode)) return ERROR(dictionary_corrupted);
}
return 0;
}
struct ZSTDv07_DDict_s {
void* dict;
size_t dictSize;
ZSTDv07_DCtx* refContext;
}; /* typedef'd tp ZSTDv07_CDict within zstd.h */
static ZSTDv07_DDict* ZSTDv07_createDDict_advanced(const void* dict, size_t dictSize, ZSTDv07_customMem customMem)
{
if (!customMem.customAlloc && !customMem.customFree)
customMem = defaultCustomMem;
if (!customMem.customAlloc || !customMem.customFree)
return NULL;
{ ZSTDv07_DDict* const ddict = (ZSTDv07_DDict*) customMem.customAlloc(customMem.opaque, sizeof(*ddict));
void* const dictContent = customMem.customAlloc(customMem.opaque, dictSize);
ZSTDv07_DCtx* const dctx = ZSTDv07_createDCtx_advanced(customMem);
if (!dictContent || !ddict || !dctx) {
customMem.customFree(customMem.opaque, dictContent);
customMem.customFree(customMem.opaque, ddict);
customMem.customFree(customMem.opaque, dctx);
return NULL;
}
memcpy(dictContent, dict, dictSize);
{ size_t const errorCode = ZSTDv07_decompressBegin_usingDict(dctx, dictContent, dictSize);
if (ZSTDv07_isError(errorCode)) {
customMem.customFree(customMem.opaque, dictContent);
customMem.customFree(customMem.opaque, ddict);
customMem.customFree(customMem.opaque, dctx);
return NULL;
} }
ddict->dict = dictContent;
ddict->dictSize = dictSize;
ddict->refContext = dctx;
return ddict;
}
}
/*! ZSTDv07_createDDict() :
* Create a digested dictionary, ready to start decompression without startup delay.
* `dict` can be released after `ZSTDv07_DDict` creation */
ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize)
{
ZSTDv07_customMem const allocator = { NULL, NULL, NULL };
return ZSTDv07_createDDict_advanced(dict, dictSize, allocator);
}
size_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict)
{
ZSTDv07_freeFunction const cFree = ddict->refContext->customMem.customFree;
void* const opaque = ddict->refContext->customMem.opaque;
ZSTDv07_freeDCtx(ddict->refContext);
cFree(opaque, ddict->dict);
cFree(opaque, ddict);
return 0;
}
/*! ZSTDv07_decompress_usingDDict() :
* Decompression using a pre-digested Dictionary
* Use dictionary without significant overhead. */
ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTDv07_DDict* ddict)
{
return ZSTDv07_decompress_usingPreparedDCtx(dctx, ddict->refContext,
dst, dstCapacity,
src, srcSize);
}
/*
Buffered version of Zstd compression library
Copyright (C) 2015-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- zstd homepage : http://www.zstd.net/
*/
/*-***************************************************************************
* Streaming decompression howto
*
* A ZBUFFv07_DCtx object is required to track streaming operations.
* Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.
* Use ZBUFFv07_decompressInit() to start a new decompression operation,
* or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.
* Note that ZBUFFv07_DCtx objects can be re-init multiple times.
*
* Use ZBUFFv07_decompressContinue() repetitively to consume your input.
* *srcSizePtr and *dstCapacityPtr can be any size.
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
* The content of @dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change @dst.
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
* or 0 when a frame is completely decoded,
* or an error code, which can be tested using ZBUFFv07_isError().
*
* Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()
* output : ZBUFFv07_recommendedDOutSize==128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
* input : ZBUFFv07_recommendedDInSize == 128KB + 3;
* just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
* *******************************************************************************/
typedef enum { ZBUFFds_init, ZBUFFds_loadHeader,
ZBUFFds_read, ZBUFFds_load, ZBUFFds_flush } ZBUFFv07_dStage;
/* *** Resource management *** */
struct ZBUFFv07_DCtx_s {
ZSTDv07_DCtx* zd;
ZSTDv07_frameParams fParams;
ZBUFFv07_dStage stage;
char* inBuff;
size_t inBuffSize;
size_t inPos;
char* outBuff;
size_t outBuffSize;
size_t outStart;
size_t outEnd;
size_t blockSize;
BYTE headerBuffer[ZSTDv07_FRAMEHEADERSIZE_MAX];
size_t lhSize;
ZSTDv07_customMem customMem;
}; /* typedef'd to ZBUFFv07_DCtx within "zstd_buffered.h" */
ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem);
ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void)
{
return ZBUFFv07_createDCtx_advanced(defaultCustomMem);
}
ZBUFFv07_DCtx* ZBUFFv07_createDCtx_advanced(ZSTDv07_customMem customMem)
{
ZBUFFv07_DCtx* zbd;
if (!customMem.customAlloc && !customMem.customFree)
customMem = defaultCustomMem;
if (!customMem.customAlloc || !customMem.customFree)
return NULL;
zbd = (ZBUFFv07_DCtx*)customMem.customAlloc(customMem.opaque, sizeof(ZBUFFv07_DCtx));
if (zbd==NULL) return NULL;
memset(zbd, 0, sizeof(ZBUFFv07_DCtx));
memcpy(&zbd->customMem, &customMem, sizeof(ZSTDv07_customMem));
zbd->zd = ZSTDv07_createDCtx_advanced(customMem);
if (zbd->zd == NULL) { ZBUFFv07_freeDCtx(zbd); return NULL; }
zbd->stage = ZBUFFds_init;
return zbd;
}
size_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* zbd)
{
if (zbd==NULL) return 0; /* support free on null */
ZSTDv07_freeDCtx(zbd->zd);
if (zbd->inBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff);
if (zbd->outBuff) zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);
zbd->customMem.customFree(zbd->customMem.opaque, zbd);
return 0;
}
/* *** Initialization *** */
size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* zbd, const void* dict, size_t dictSize)
{
zbd->stage = ZBUFFds_loadHeader;
zbd->lhSize = zbd->inPos = zbd->outStart = zbd->outEnd = 0;
return ZSTDv07_decompressBegin_usingDict(zbd->zd, dict, dictSize);
}
size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* zbd)
{
return ZBUFFv07_decompressInitDictionary(zbd, NULL, 0);
}
/* internal util function */
MEM_STATIC size_t ZBUFFv07_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
size_t const length = MIN(dstCapacity, srcSize);
if (length > 0) {
memcpy(dst, src, length);
}
return length;
}
/* *** Decompression *** */
size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* zbd,
void* dst, size_t* dstCapacityPtr,
const void* src, size_t* srcSizePtr)
{
const char* const istart = (const char*)src;
const char* const iend = istart + *srcSizePtr;
const char* ip = istart;
char* const ostart = (char*)dst;
char* const oend = ostart + *dstCapacityPtr;
char* op = ostart;
U32 notDone = 1;
while (notDone) {
switch(zbd->stage)
{
case ZBUFFds_init :
return ERROR(init_missing);
case ZBUFFds_loadHeader :
{ size_t const hSize = ZSTDv07_getFrameParams(&(zbd->fParams), zbd->headerBuffer, zbd->lhSize);
if (ZSTDv07_isError(hSize)) return hSize;
if (hSize != 0) {
size_t const toLoad = hSize - zbd->lhSize; /* if hSize!=0, hSize > zbd->lhSize */
if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */
memcpy(zbd->headerBuffer + zbd->lhSize, ip, iend-ip);
zbd->lhSize += iend-ip;
*dstCapacityPtr = 0;
return (hSize - zbd->lhSize) + ZSTDv07_blockHeaderSize; /* remaining header bytes + next block header */
}
memcpy(zbd->headerBuffer + zbd->lhSize, ip, toLoad); zbd->lhSize = hSize; ip += toLoad;
break;
} }
/* Consume header */
{ size_t const h1Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd); /* == ZSTDv07_frameHeaderSize_min */
size_t const h1Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer, h1Size);
if (ZSTDv07_isError(h1Result)) return h1Result;
if (h1Size < zbd->lhSize) { /* long header */
size_t const h2Size = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
size_t const h2Result = ZSTDv07_decompressContinue(zbd->zd, NULL, 0, zbd->headerBuffer+h1Size, h2Size);
if (ZSTDv07_isError(h2Result)) return h2Result;
} }
zbd->fParams.windowSize = MAX(zbd->fParams.windowSize, 1U << ZSTDv07_WINDOWLOG_ABSOLUTEMIN);
/* Frame header instruct buffer sizes */
{ size_t const blockSize = MIN(zbd->fParams.windowSize, ZSTDv07_BLOCKSIZE_ABSOLUTEMAX);
zbd->blockSize = blockSize;
if (zbd->inBuffSize < blockSize) {
zbd->customMem.customFree(zbd->customMem.opaque, zbd->inBuff);
zbd->inBuffSize = blockSize;
zbd->inBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, blockSize);
if (zbd->inBuff == NULL) return ERROR(memory_allocation);
}
{ size_t const neededOutSize = zbd->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2;
if (zbd->outBuffSize < neededOutSize) {
zbd->customMem.customFree(zbd->customMem.opaque, zbd->outBuff);
zbd->outBuffSize = neededOutSize;
zbd->outBuff = (char*)zbd->customMem.customAlloc(zbd->customMem.opaque, neededOutSize);
if (zbd->outBuff == NULL) return ERROR(memory_allocation);
} } }
zbd->stage = ZBUFFds_read;
/* pass-through */
/* fall-through */
case ZBUFFds_read:
{ size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
if (neededInSize==0) { /* end of frame */
zbd->stage = ZBUFFds_init;
notDone = 0;
break;
}
if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd);
size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd,
zbd->outBuff + zbd->outStart, (isSkipFrame ? 0 : zbd->outBuffSize - zbd->outStart),
ip, neededInSize);
if (ZSTDv07_isError(decodedSize)) return decodedSize;
ip += neededInSize;
if (!decodedSize && !isSkipFrame) break; /* this was just a header */
zbd->outEnd = zbd->outStart + decodedSize;
zbd->stage = ZBUFFds_flush;
break;
}
if (ip==iend) { notDone = 0; break; } /* no more input */
zbd->stage = ZBUFFds_load;
}
/* fall-through */
case ZBUFFds_load:
{ size_t const neededInSize = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
size_t const toLoad = neededInSize - zbd->inPos; /* should always be <= remaining space within inBuff */
size_t loadedSize;
if (toLoad > zbd->inBuffSize - zbd->inPos) return ERROR(corruption_detected); /* should never happen */
loadedSize = ZBUFFv07_limitCopy(zbd->inBuff + zbd->inPos, toLoad, ip, iend-ip);
ip += loadedSize;
zbd->inPos += loadedSize;
if (loadedSize < toLoad) { notDone = 0; break; } /* not enough input, wait for more */
/* decode loaded input */
{ const int isSkipFrame = ZSTDv07_isSkipFrame(zbd->zd);
size_t const decodedSize = ZSTDv07_decompressContinue(zbd->zd,
zbd->outBuff + zbd->outStart, zbd->outBuffSize - zbd->outStart,
zbd->inBuff, neededInSize);
if (ZSTDv07_isError(decodedSize)) return decodedSize;
zbd->inPos = 0; /* input is consumed */
if (!decodedSize && !isSkipFrame) { zbd->stage = ZBUFFds_read; break; } /* this was just a header */
zbd->outEnd = zbd->outStart + decodedSize;
zbd->stage = ZBUFFds_flush;
/* break; */
/* pass-through */
}
}
/* fall-through */
case ZBUFFds_flush:
{ size_t const toFlushSize = zbd->outEnd - zbd->outStart;
size_t const flushedSize = ZBUFFv07_limitCopy(op, oend-op, zbd->outBuff + zbd->outStart, toFlushSize);
op += flushedSize;
zbd->outStart += flushedSize;
if (flushedSize == toFlushSize) {
zbd->stage = ZBUFFds_read;
if (zbd->outStart + zbd->blockSize > zbd->outBuffSize)
zbd->outStart = zbd->outEnd = 0;
break;
}
/* cannot flush everything */
notDone = 0;
break;
}
default: return ERROR(GENERIC); /* impossible */
} }
/* result */
*srcSizePtr = ip-istart;
*dstCapacityPtr = op-ostart;
{ size_t nextSrcSizeHint = ZSTDv07_nextSrcSizeToDecompress(zbd->zd);
nextSrcSizeHint -= zbd->inPos; /* already loaded*/
return nextSrcSizeHint;
}
}
/* *************************************
* Tool functions
***************************************/
size_t ZBUFFv07_recommendedDInSize(void) { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX + ZSTDv07_blockHeaderSize /* block header size*/ ; }
size_t ZBUFFv07_recommendedDOutSize(void) { return ZSTDv07_BLOCKSIZE_ABSOLUTEMAX; }
BYTE* const ostart = (BYTE* const)dst;
BYTE* const ostart = (BYTE* const)dst;
const BYTE* const istart = (const BYTE* const)src;
const BYTE* const in = (const BYTE* const)src;
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
/*
* All rights reserved.
*
* This source code is licensed under both the BSD-style license (found in the
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
* in the COPYING file in the root directory of this source tree).
* You may select, at your option, one of the above-listed licenses.
*/
#ifndef ZSTDv07_H_235446
#define ZSTDv07_H_235446
#if defined (__cplusplus)
extern "C" {
#endif
/*====== Dependency ======*/
#include <stddef.h> /* size_t */
/*====== Export for Windows ======*/
/*!
* ZSTDv07_DLL_EXPORT :
* Enable exporting of functions when building a Windows DLL
*/
#if defined(_WIN32) && defined(ZSTDv07_DLL_EXPORT) && (ZSTDv07_DLL_EXPORT==1)
# define ZSTDLIBv07_API __declspec(dllexport)
#else
# define ZSTDLIBv07_API
#endif
/* *************************************
* Simple API
***************************************/
/*! ZSTDv07_getDecompressedSize() :
* @return : decompressed size if known, 0 otherwise.
note 1 : if `0`, follow up with ZSTDv07_getFrameParams() to know precise failure cause.
note 2 : decompressed size could be wrong or intentionally modified !
always ensure results fit within application's authorized limits */
unsigned long long ZSTDv07_getDecompressedSize(const void* src, size_t srcSize);
/*! ZSTDv07_decompress() :
`compressedSize` : must be _exact_ size of compressed input, otherwise decompression will fail.
`dstCapacity` must be equal or larger than originalSize.
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
or an errorCode if it fails (which can be tested using ZSTDv07_isError()) */
ZSTDLIBv07_API size_t ZSTDv07_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
/**
ZSTDv07_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.7.x format
srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src'
cSize (output parameter) : the number of bytes that would be read to decompress this frame
or an error code if it fails (which can be tested using ZSTDv01_isError())
dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame
or ZSTD_CONTENTSIZE_ERROR if an error occurs
note : assumes `cSize` and `dBound` are _not_ NULL.
*/
void ZSTDv07_findFrameSizeInfoLegacy(const void *src, size_t srcSize,
size_t* cSize, unsigned long long* dBound);
/*====== Helper functions ======*/
ZSTDLIBv07_API unsigned ZSTDv07_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
ZSTDLIBv07_API const char* ZSTDv07_getErrorName(size_t code); /*!< provides readable string from an error code */
/*-*************************************
* Explicit memory management
***************************************/
/** Decompression context */
typedef struct ZSTDv07_DCtx_s ZSTDv07_DCtx;
ZSTDLIBv07_API ZSTDv07_DCtx* ZSTDv07_createDCtx(void);
ZSTDLIBv07_API size_t ZSTDv07_freeDCtx(ZSTDv07_DCtx* dctx); /*!< @return : errorCode */
/** ZSTDv07_decompressDCtx() :
* Same as ZSTDv07_decompress(), requires an allocated ZSTDv07_DCtx (see ZSTDv07_createDCtx()) */
ZSTDLIBv07_API size_t ZSTDv07_decompressDCtx(ZSTDv07_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/*-************************
* Simple dictionary API
***************************/
/*! ZSTDv07_decompress_usingDict() :
* Decompression using a pre-defined Dictionary content (see dictBuilder).
* Dictionary must be identical to the one used during compression.
* Note : This function load the dictionary, resulting in a significant startup time */
ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDict(ZSTDv07_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize);
/*-**************************
* Advanced Dictionary API
****************************/
/*! ZSTDv07_createDDict() :
* Create a digested dictionary, ready to start decompression operation without startup delay.
* `dict` can be released after creation */
typedef struct ZSTDv07_DDict_s ZSTDv07_DDict;
ZSTDLIBv07_API ZSTDv07_DDict* ZSTDv07_createDDict(const void* dict, size_t dictSize);
ZSTDLIBv07_API size_t ZSTDv07_freeDDict(ZSTDv07_DDict* ddict);
/*! ZSTDv07_decompress_usingDDict() :
* Decompression using a pre-digested Dictionary
* Faster startup than ZSTDv07_decompress_usingDict(), recommended when same dictionary is used multiple times. */
ZSTDLIBv07_API size_t ZSTDv07_decompress_usingDDict(ZSTDv07_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTDv07_DDict* ddict);
typedef struct {
unsigned long long frameContentSize;
unsigned windowSize;
unsigned dictID;
unsigned checksumFlag;
} ZSTDv07_frameParams;
ZSTDLIBv07_API size_t ZSTDv07_getFrameParams(ZSTDv07_frameParams* fparamsPtr, const void* src, size_t srcSize); /**< doesn't consume input */
/* *************************************
* Streaming functions
***************************************/
typedef struct ZBUFFv07_DCtx_s ZBUFFv07_DCtx;
ZSTDLIBv07_API ZBUFFv07_DCtx* ZBUFFv07_createDCtx(void);
ZSTDLIBv07_API size_t ZBUFFv07_freeDCtx(ZBUFFv07_DCtx* dctx);
ZSTDLIBv07_API size_t ZBUFFv07_decompressInit(ZBUFFv07_DCtx* dctx);
ZSTDLIBv07_API size_t ZBUFFv07_decompressInitDictionary(ZBUFFv07_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIBv07_API size_t ZBUFFv07_decompressContinue(ZBUFFv07_DCtx* dctx,
void* dst, size_t* dstCapacityPtr,
const void* src, size_t* srcSizePtr);
/*-***************************************************************************
* Streaming decompression howto
*
* A ZBUFFv07_DCtx object is required to track streaming operations.
* Use ZBUFFv07_createDCtx() and ZBUFFv07_freeDCtx() to create/release resources.
* Use ZBUFFv07_decompressInit() to start a new decompression operation,
* or ZBUFFv07_decompressInitDictionary() if decompression requires a dictionary.
* Note that ZBUFFv07_DCtx objects can be re-init multiple times.
*
* Use ZBUFFv07_decompressContinue() repetitively to consume your input.
* *srcSizePtr and *dstCapacityPtr can be any size.
* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr.
* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again.
* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`.
* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to help latency),
* or 0 when a frame is completely decoded,
* or an error code, which can be tested using ZBUFFv07_isError().
*
* Hint : recommended buffer sizes (not compulsory) : ZBUFFv07_recommendedDInSize() and ZBUFFv07_recommendedDOutSize()
* output : ZBUFFv07_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded.
* input : ZBUFFv07_recommendedDInSize == 128KB + 3;
* just follow indications from ZBUFFv07_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 .
* *******************************************************************************/
/* *************************************
* Tool functions
***************************************/
ZSTDLIBv07_API unsigned ZBUFFv07_isError(size_t errorCode);
ZSTDLIBv07_API const char* ZBUFFv07_getErrorName(size_t errorCode);
/** Functions below provide recommended buffer sizes for Compression or Decompression operations.
* These sizes are just hints, they tend to offer better latency */
ZSTDLIBv07_API size_t ZBUFFv07_recommendedDInSize(void);
ZSTDLIBv07_API size_t ZBUFFv07_recommendedDOutSize(void);
/*-*************************************
* Constants
***************************************/
#define ZSTDv07_MAGICNUMBER 0xFD2FB527 /* v0.7 */
#if defined (__cplusplus)
}
#endif
#endif /* ZSTDv07_H_235446 */
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
#ifndef ZSTDLIB_VISIBILITY
# if defined(__GNUC__) && (__GNUC__ >= 4)
# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default")))
#ifndef ZSTDLIB_VISIBLE
/* Backwards compatibility with old macro name */
# ifdef ZSTDLIB_VISIBILITY
# define ZSTDLIB_VISIBLE ZSTDLIB_VISIBILITY
# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
# define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default")))
# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
/* Deprecation warnings :
* Should these warnings be a problem, it is generally possible to disable them,
* typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual.
* Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS.
*/
#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS
# define ZSTD_DEPRECATED(message) /* disable deprecation warnings */
#else
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
# define ZSTD_DEPRECATED(message) [[deprecated(message)]]
# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
# define ZSTD_DEPRECATED(message) __attribute__((deprecated(message)))
# elif defined(__GNUC__) && (__GNUC__ >= 3)
# define ZSTD_DEPRECATED(message) __attribute__((deprecated))
# elif defined(_MSC_VER)
# define ZSTD_DEPRECATED(message) __declspec(deprecated(message))
# else
# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler")
# define ZSTD_DEPRECATED(message)
# endif
#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */
#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
/* ZSTD_compressBound() :
* maximum compressed size in worst case single-pass scenario.
* When invoking `ZSTD_compress()` or any other one-pass compression function,
* it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize)
* as it eliminates one potential failure scenario,
* aka not enough room in dst buffer to write the compressed frame.
* Note : ZSTD_compressBound() itself can fail, if @srcSize > ZSTD_MAX_INPUT_SIZE .
* In which case, ZSTD_compressBound() will return an error code
* which can be tested using ZSTD_isError().
*
* ZSTD_COMPRESSBOUND() :
* same as ZSTD_compressBound(), but as a macro.
* It can be used to produce constants, which can be useful for static allocation,
* for example to size a static array on stack.
* Will produce constant value 0 if srcSize too large.
*/
#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00LLU : 0xFF00FF00U)
#define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
/* ZSTD_isError() :
* Most ZSTD_* functions returning a size_t value can be tested for error,
* using ZSTD_isError().
* @return 1 if error, 0 otherwise
*/
ZSTD_c_experimentalParam12=1009
ZSTD_c_experimentalParam12=1009,
ZSTD_c_experimentalParam13=1010,
ZSTD_c_experimentalParam14=1011,
ZSTD_c_experimentalParam15=1012,
ZSTD_c_experimentalParam16=1013,
ZSTD_c_experimentalParam17=1014,
ZSTD_c_experimentalParam18=1015,
ZSTD_c_experimentalParam19=1016
/***************************************
* Advanced decompression API
***************************************/
/***********************************************
* Advanced decompression API (Requires v1.4.0+)
************************************************/
/*! ZSTD_decompressStream() :
* Streaming decompression function.
* Call repetitively to consume full input updating it as necessary.
* Function will update both input and output `pos` fields exposing current state via these fields:
* - `input.pos < input.size`, some input remaining and caller should provide remaining input
* on the next call.
* - `output.pos < output.size`, decoder finished and flushed all remaining buffers.
* - `output.pos == output.size`, potentially uncflushed data present in the internal buffers,
* call ZSTD_decompressStream() again to flush remaining data to output.
* Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.
*
* @return : 0 when a frame is completely decoded and fully flushed,
* or an error code, which can be tested using ZSTD_isError(),
* or any other value > 0, which means there is some decoding or flushing to do to complete current frame.
*/
/*! ZSTD_getDictID_fromDDict() :
/*! ZSTD_getDictID_fromCDict() : Requires v1.5.0+
* Provides the dictID of the dictionary loaded into `cdict`.
* If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
* Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
/*! ZSTD_getDictID_fromDDict() : Requires v1.4.0+
* Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
* To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
* Note 1 : Dictionary is sticky, it will be used for all future compressed frames,
* until parameters are reset, a new dictionary is loaded, or the dictionary
* is explicitly invalidated by loading a NULL dictionary.
/*! ZSTD_DCtx_loadDictionary() :
* Create an internal DDict from dict buffer,
* to be used to decompress next frames.
* The dictionary remains valid for all future frames, until explicitly invalidated.
/*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+
* Create an internal DDict from dict buffer, to be used to decompress all future frames.
* The dictionary remains valid for all future frames, until explicitly invalidated, or
* a new dictionary is loaded.
*
* If called with ZSTD_d_refMultipleDDicts enabled, repeated calls of this function
* will store the DDict references in a table, and the DDict used for decompression
* will be determined at decompression time, as per the dict ID in the frame.
* The memory for the table is allocated on the first call to refDDict, and can be
* freed with ZSTD_freeDCtx().
*
* If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary
* will be managed, and referencing a dictionary effectively "discards" any previous one.
*
/* This can be overridden externally to hide static symbols. */
#ifndef ZSTDLIB_STATIC_API
# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
# define ZSTDLIB_STATIC_API __declspec(dllexport) ZSTDLIB_VISIBLE
# elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
# define ZSTDLIB_STATIC_API __declspec(dllimport) ZSTDLIB_VISIBLE
# else
# define ZSTDLIB_STATIC_API ZSTDLIB_VISIBLE
# endif
#endif
typedef enum {
/* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final
* decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable
* or ZSTD_ps_disable allow for a force enable/disable the feature.
*/
ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */
ZSTD_ps_enable = 1, /* Force-enable the feature */
ZSTD_ps_disable = 2 /* Do not use the feature */
} ZSTD_paramSwitch_e;
ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
/*! ZSTD_decompressionMargin() :
* Zstd supports in-place decompression, where the input and output buffers overlap.
* In this case, the output buffer must be at least (Margin + Output_Size) bytes large,
* and the input buffer must be at the end of the output buffer.
*
* _______________________ Output Buffer ________________________
* | |
* | ____ Input Buffer ____|
* | | |
* v v v
* |---------------------------------------|-----------|----------|
* ^ ^ ^
* |___________________ Output_Size ___________________|_ Margin _|
*
* NOTE: See also ZSTD_DECOMPRESSION_MARGIN().
* NOTE: This applies only to single-pass decompression through ZSTD_decompress() or
* ZSTD_decompressDCtx().
* NOTE: This function supports multi-frame input.
*
* @param src The compressed frame(s)
* @param srcSize The size of the compressed frame(s)
* @returns The decompression margin or an error that can be checked with ZSTD_isError().
*/
ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize);
/*! ZSTD_DECOMPRESS_MARGIN() :
* Similar to ZSTD_decompressionMargin(), but instead of computing the margin from
* the compressed frame, compute it from the original size and the blockSizeLog.
* See ZSTD_decompressionMargin() for details.
*
* WARNING: This macro does not support multi-frame input, the input must be a single
* zstd frame. If you need that support use the function, or implement it yourself.
*
* @param originalSize The original uncompressed size of the data.
* @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX).
* Unless you explicitly set the windowLog smaller than
* ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX.
*/
#define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)( \
ZSTD_FRAMEHEADERSIZE_MAX /* Frame header */ + \
4 /* checksum */ + \
((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \
(blockSize) /* One block of margin */ \
))
/*! ZSTD_sequenceBound() :
* `srcSize` : size of the input buffer
* @return : upper-bound for the number of sequences that can be generated
* from a buffer of srcSize bytes
*
* note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence).
*/
ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize);
ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
size_t outSeqsSize, const void* src, size_t srcSize);
ZSTDLIB_STATIC_API size_t
ZSTD_generateSequences( ZSTD_CCtx* zc,
ZSTD_Sequence* outSeqs, size_t outSeqsSize,
const void* src, size_t srcSize);
* Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
* Compress an array of ZSTD_Sequence, associated with @src buffer, into dst.
* @src contains the entire input (not just the literals).
* If @srcSize > sum(sequence.length), the remaining bytes are considered all literals
* @return : final compressed size or a ZSTD error.
* @return : final compressed size, or a ZSTD error code.
*/
ZSTDLIB_STATIC_API size_t
ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize,
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
const void* src, size_t srcSize);
/*! ZSTD_writeSkippableFrame() :
* Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
*
* Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number,
* ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
* As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
* the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
*
* Returns an error if destination buffer is not large enough, if the source size is not representable
* with a 4-byte unsigned int, or if the parameter magicVariant is greater than 15 (and therefore invalid).
*
* @return : number of bytes written or a ZSTD error.
*/
ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
const void* src, size_t srcSize, unsigned magicVariant);
/*! ZSTD_readSkippableFrame() :
* Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.
*
* The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
* i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
* in the magicVariant.
*
* Returns an error if destination buffer is not large enough, or if the frame is not skippable.
*
* @return : number of bytes written or a ZSTD error.
*/
ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
const void* src, size_t srcSize);
/*! ZSTD_isSkippableFrame() :
* Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params);
ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void);
* In this case, get total size by adding ZSTD_estimate?DictSize */
ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
* In this case, get total size by adding ZSTD_estimate?DictSize
* Note 2 : only single-threaded compression is supported.
* ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
* Note 3 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.
* Size estimates assume that no external sequence producer is registered.
*/
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t windowSize);
ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize);
ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod);
ZSTDLIB_STATIC_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod);
ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */
ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize);
ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */
ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */
ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize);
ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */
ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
/* ! Thread pool :
* These prototypes make it possible to share a thread pool among multiple compression contexts.
* This can limit resources for applications with multiple threads where each one uses
* a threaded compression mode (via ZSTD_c_nbWorkers parameter).
* ZSTD_createThreadPool creates a new thread pool with a given number of threads.
* Note that the lifetime of such pool must exist while being used.
* ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
* to use an internal thread pool).
* ZSTD_freeThreadPool frees a thread pool.
/*! Thread pool :
* These prototypes make it possible to share a thread pool among multiple compression contexts.
* This can limit resources for applications with multiple threads where each one uses
* a threaded compression mode (via ZSTD_c_nbWorkers parameter).
* ZSTD_createThreadPool creates a new thread pool with a given number of threads.
* Note that the lifetime of such pool must exist while being used.
* ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value
* to use an internal thread pool).
* ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer.
ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool);
ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads);
ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool);
ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_customMem customMem);
ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_advanced(
const void* dict, size_t dictSize,
ZSTD_dictLoadMethod_e dictLoadMethod,
ZSTD_dictContentType_e dictContentType,
ZSTD_customMem customMem);
ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
/*! ZSTD_getDictID_fromCDict() :
* Provides the dictID of the dictionary loaded into `cdict`.
* If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
* Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict);
ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel);
ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
/*! ZSTD_CCtx_setCParams() :
* Set all parameters provided within @cparams into the working @cctx.
* Note : if modifying parameters during compression (MT mode only),
* note that changes to the .windowLog parameter will be ignored.
* @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams);
* This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */
ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
ZSTD_parameters params);
* This prototype will generate compilation warnings. */
ZSTD_DEPRECATED("use ZSTD_compress2")
ZSTDLIB_STATIC_API
size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
ZSTD_parameters params);
* This prototype will be marked as deprecated and generate compilation warning in some future version */
ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
* This prototype will generate compilation warnings. */
ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
ZSTDLIB_STATIC_API
size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
/* Controls how the literals are compressed (default is auto).
* The value must be of type ZSTD_literalCompressionMode_e.
* See ZSTD_literalCompressionMode_t enum definition for details.
/* Controlled with ZSTD_paramSwitch_e enum.
* Default is ZSTD_ps_auto.
* Set to ZSTD_ps_disable to never compress literals.
* Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals
* may still be emitted if huffman is not beneficial to use.)
*
* By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
* literals compression based on the compression parameters - specifically,
* negative compression levels do not use literal compression.
* Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
* between calls, except for the modifications that zstd makes to pos (the
* caller must not modify pos). This is checked by the compressor, and
* compression will fail if it ever changes. This means the only flush
* mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
* is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
* MUST not be modified during compression or you will get data corruption.
* Tells the compressor that input data presented with ZSTD_inBuffer
* will ALWAYS be the same between calls.
* Technically, the @src pointer must never be changed,
* and the @pos field can only be updated by zstd.
* However, it's possible to increase the @size field,
* allowing scenarios where more data can be appended after compressions starts.
* These conditions are checked by the compressor,
* and compression will fail if they are not respected.
* Also, data in the ZSTD_inBuffer within the range [src, src + pos)
* MUST not be modified during compression or it will result in data corruption.
* WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
* not be modified during compression or you will get data corruption. This
* is because zstd needs to reference data in the ZSTD_inBuffer to find
* WARNING: The data in the ZSTD_inBuffer in the range [src, src + pos) MUST
* not be modified during compression or it will result in data corruption.
* This is because zstd needs to reference data in the ZSTD_inBuffer to find
/* ZSTD_c_useBlockSplitter
* Controlled with ZSTD_paramSwitch_e enum.
* Default is ZSTD_ps_auto.
* Set to ZSTD_ps_disable to never use block splitter.
* Set to ZSTD_ps_enable to always use block splitter.
*
* By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
* block splitting based on the compression parameters.
*/
#define ZSTD_c_useBlockSplitter ZSTD_c_experimentalParam13
/* ZSTD_c_useRowMatchFinder
* Controlled with ZSTD_paramSwitch_e enum.
* Default is ZSTD_ps_auto.
* Set to ZSTD_ps_disable to never use row-based matchfinder.
* Set to ZSTD_ps_enable to force usage of row-based matchfinder.
*
* By default, in ZSTD_ps_auto, the library will decide at runtime whether to use
* the row-based matchfinder based on support for SIMD instructions and the window log.
* Note that this only pertains to compression strategies: greedy, lazy, and lazy2
*/
#define ZSTD_c_useRowMatchFinder ZSTD_c_experimentalParam14
/* ZSTD_c_deterministicRefPrefix
* Default is 0 == disabled. Set to 1 to enable.
*
* Zstd produces different results for prefix compression when the prefix is
* directly adjacent to the data about to be compressed vs. when it isn't.
* This is because zstd detects that the two buffers are contiguous and it can
* use a more efficient match finding algorithm. However, this produces different
* results than when the two buffers are non-contiguous. This flag forces zstd
* to always load the prefix in non-contiguous mode, even if it happens to be
* adjacent to the data, to guarantee determinism.
*
* If you really care about determinism when using a dictionary or prefix,
* like when doing delta compression, you should select this option. It comes
* at a speed penalty of about ~2.5% if the dictionary and data happened to be
* contiguous, and is free if they weren't contiguous. We don't expect that
* intentionally making the dictionary and data contiguous will be worth the
* cost to memcpy() the data.
*/
#define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15
/* ZSTD_c_prefetchCDictTables
* Controlled with ZSTD_paramSwitch_e enum. Default is ZSTD_ps_auto.
*
* In some situations, zstd uses CDict tables in-place rather than copying them
* into the working context. (See docs on ZSTD_dictAttachPref_e above for details).
* In such situations, compression speed is seriously impacted when CDict tables are
* "cold" (outside CPU cache). This parameter instructs zstd to prefetch CDict tables
* when they are used in-place.
*
* For sufficiently small inputs, the cost of the prefetch will outweigh the benefit.
* For sufficiently large inputs, zstd will by default memcpy() CDict tables
* into the working context, so there is no need to prefetch. This parameter is
* targeted at a middle range of input sizes, where a prefetch is cheap enough to be
* useful but memcpy() is too expensive. The exact range of input sizes where this
* makes sense is best determined by careful experimentation.
*
* Note: for this parameter, ZSTD_ps_auto is currently equivalent to ZSTD_ps_disable,
* but in the future zstd may conditionally enable this feature via an auto-detection
* heuristic for cold CDicts.
* Use ZSTD_ps_disable to opt out of prefetching under any circumstances.
*/
#define ZSTD_c_prefetchCDictTables ZSTD_c_experimentalParam16
/* ZSTD_c_enableSeqProducerFallback
* Allowed values are 0 (disable) and 1 (enable). The default setting is 0.
*
* Controls whether zstd will fall back to an internal sequence producer if an
* external sequence producer is registered and returns an error code. This fallback
* is block-by-block: the internal sequence producer will only be called for blocks
* where the external sequence producer returns an error code. Fallback parsing will
* follow any other cParam settings, such as compression level, the same as in a
* normal (fully-internal) compression operation.
*
* The user is strongly encouraged to read the full Block-Level Sequence Producer API
* documentation (below) before setting this parameter. */
#define ZSTD_c_enableSeqProducerFallback ZSTD_c_experimentalParam17
/* ZSTD_c_maxBlockSize
* Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB).
* The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default.
*
* This parameter can be used to set an upper bound on the blocksize
* that overrides the default ZSTD_BLOCKSIZE_MAX. It cannot be used to set upper
* bounds greater than ZSTD_BLOCKSIZE_MAX or bounds lower than 1KB (will make
* compressBound() innacurate). Only currently meant to be used for testing.
*
*/
#define ZSTD_c_maxBlockSize ZSTD_c_experimentalParam18
/* ZSTD_c_searchForExternalRepcodes
* This parameter affects how zstd parses external sequences, such as sequences
* provided through the compressSequences() API or from an external block-level
* sequence producer.
*
* If set to ZSTD_ps_enable, the library will check for repeated offsets in
* external sequences, even if those repcodes are not explicitly indicated in
* the "rep" field. Note that this is the only way to exploit repcode matches
* while using compressSequences() or an external sequence producer, since zstd
* currently ignores the "rep" field of external sequences.
*
* If set to ZSTD_ps_disable, the library will not exploit repeated offsets in
* external sequences, regardless of whether the "rep" field has been set. This
* reduces sequence compression overhead by about 25% while sacrificing some
* compression ratio.
*
* The default value is ZSTD_ps_auto, for which the library will enable/disable
* based on compression level.
*
* Note: for now, this param only has an effect if ZSTD_c_blockDelimiters is
* set to ZSTD_sf_explicitBlockDelimiters. That may change in the future.
*/
#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19
ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params);
ZSTDLIB_STATIC_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void);
ZSTDLIB_STATIC_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */
ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType);
ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
ZSTDLIB_STATIC_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType);
/* ZSTD_d_refMultipleDDicts
* Experimental parameter.
* Default is 0 == disabled. Set to 1 to enable
*
* If enabled and dctx is allocated on the heap, then additional memory will be allocated
* to store references to multiple ZSTD_DDict. That is, multiple calls of ZSTD_refDDict()
* using a given ZSTD_DCtx, rather than overwriting the previous DDict reference, will instead
* store all references. At decompression time, the appropriate dictID is selected
* from the set of DDicts based on the dictID in the frame.
*
* Usage is simply calling ZSTD_refDDict() on multiple dict buffers.
*
* Param has values of byte ZSTD_refMultipleDDicts_e
*
* WARNING: Enabling this parameter and calling ZSTD_DCtx_refDDict(), will trigger memory
* allocation for the hash table. ZSTD_freeDCtx() also frees this memory.
* Memory is allocated as per ZSTD_DCtx::customMem.
*
* Although this function allocates memory for the table, the user is still responsible for
* memory management of the underlying ZSTD_DDict* themselves.
*/
#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
/* ZSTD_d_disableHuffmanAssembly
* Set to 1 to disable the Huffman assembly implementation.
* The default value is 0, which allows zstd to use the Huffman assembly
* implementation if available.
*
* This parameter can be used to disable Huffman assembly at runtime.
* If you want to disable it at compile time you can define the macro
* ZSTD_DISABLE_ASM.
*/
#define ZSTD_d_disableHuffmanAssembly ZSTD_d_experimentalParam5
ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
ZSTDLIB_STATIC_API
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
ZSTDLIB_API size_t
ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
ZSTDLIB_STATIC_API
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
ZSTDLIB_STATIC_API
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions")
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions")
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression,
or ZSTD_compressBegin_advanced(), for finer parameter control.
It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.
ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTD_DEPRECATED("This function will likely be removed in a future release. It is misleading and has very limited utility.")
ZSTDLIB_STATIC_API
size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */
ZSTD_DEPRECATED("use advanced API to access custom parameters")
ZSTDLIB_STATIC_API
size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
ZSTD_DEPRECATED("use advanced API to access custom parameters")
ZSTDLIB_STATIC_API
size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
@result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
>0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
>0 : `srcSize` is too small, please provide at least result bytes on next attempt.
ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
ZSTD_DEPRECATED("This function will likely be removed in the next minor release. It is misleading and has very limited utility.")
ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
/* ********************* BLOCK-LEVEL SEQUENCE PRODUCER API *********************
*
* *** OVERVIEW ***
* The Block-Level Sequence Producer API allows users to provide their own custom
* sequence producer which libzstd invokes to process each block. The produced list
* of sequences (literals and matches) is then post-processed by libzstd to produce
* valid compressed blocks.
*
* This block-level offload API is a more granular complement of the existing
* frame-level offload API compressSequences() (introduced in v1.5.1). It offers
* an easier migration story for applications already integrated with libzstd: the
* user application continues to invoke the same compression functions
* ZSTD_compress2() or ZSTD_compressStream2() as usual, and transparently benefits
* from the specific advantages of the external sequence producer. For example,
* the sequence producer could be tuned to take advantage of known characteristics
* of the input, to offer better speed / ratio, or could leverage hardware
* acceleration not available within libzstd itself.
*
* See contrib/externalSequenceProducer for an example program employing the
* Block-Level Sequence Producer API.
*
* *** USAGE ***
* The user is responsible for implementing a function of type
* ZSTD_sequenceProducer_F. For each block, zstd will pass the following
* arguments to the user-provided function:
*
* - sequenceProducerState: a pointer to a user-managed state for the sequence
* producer.
*
* - outSeqs, outSeqsCapacity: an output buffer for the sequence producer.
* outSeqsCapacity is guaranteed >= ZSTD_sequenceBound(srcSize). The memory
* backing outSeqs is managed by the CCtx.
*
* - src, srcSize: an input buffer for the sequence producer to parse.
* srcSize is guaranteed to be <= ZSTD_BLOCKSIZE_MAX.
*
* - dict, dictSize: a history buffer, which may be empty, which the sequence
* producer may reference as it parses the src buffer. Currently, zstd will
* always pass dictSize == 0 into external sequence producers, but this will
* change in the future.
*
* - compressionLevel: a signed integer representing the zstd compression level
* set by the user for the current operation. The sequence producer may choose
* to use this information to change its compression strategy and speed/ratio
* tradeoff. Note: the compression level does not reflect zstd parameters set
* through the advanced API.
*
* - windowSize: a size_t representing the maximum allowed offset for external
* sequences. Note that sequence offsets are sometimes allowed to exceed the
* windowSize if a dictionary is present, see doc/zstd_compression_format.md
* for details.
*
* The user-provided function shall return a size_t representing the number of
* sequences written to outSeqs. This return value will be treated as an error
* code if it is greater than outSeqsCapacity. The return value must be non-zero
* if srcSize is non-zero. The ZSTD_SEQUENCE_PRODUCER_ERROR macro is provided
* for convenience, but any value greater than outSeqsCapacity will be treated as
* an error code.
*
* If the user-provided function does not return an error code, the sequences
* written to outSeqs must be a valid parse of the src buffer. Data corruption may
* occur if the parse is not valid. A parse is defined to be valid if the
* following conditions hold:
* - The sum of matchLengths and literalLengths must equal srcSize.
* - All sequences in the parse, except for the final sequence, must have
* matchLength >= ZSTD_MINMATCH_MIN. The final sequence must have
* matchLength >= ZSTD_MINMATCH_MIN or matchLength == 0.
* - All offsets must respect the windowSize parameter as specified in
* doc/zstd_compression_format.md.
* - If the final sequence has matchLength == 0, it must also have offset == 0.
*
* zstd will only validate these conditions (and fail compression if they do not
* hold) if the ZSTD_c_validateSequences cParam is enabled. Note that sequence
* validation has a performance cost.
*
* If the user-provided function returns an error, zstd will either fall back
* to an internal sequence producer or fail the compression operation. The user can
* choose between the two behaviors by setting the ZSTD_c_enableSeqProducerFallback
* cParam. Fallback compression will follow any other cParam settings, such as
* compression level, the same as in a normal compression operation.
*
* The user shall instruct zstd to use a particular ZSTD_sequenceProducer_F
* function by calling
* ZSTD_registerSequenceProducer(cctx,
* sequenceProducerState,
* sequenceProducer)
* This setting will persist until the next parameter reset of the CCtx.
*
* The sequenceProducerState must be initialized by the user before calling
* ZSTD_registerSequenceProducer(). The user is responsible for destroying the
* sequenceProducerState.
*
* *** LIMITATIONS ***
* This API is compatible with all zstd compression APIs which respect advanced parameters.
* However, there are three limitations:
*
* First, the ZSTD_c_enableLongDistanceMatching cParam is not currently supported.
* COMPRESSION WILL FAIL if it is enabled and the user tries to compress with a block-level
* external sequence producer.
* - Note that ZSTD_c_enableLongDistanceMatching is auto-enabled by default in some
* cases (see its documentation for details). Users must explicitly set
* ZSTD_c_enableLongDistanceMatching to ZSTD_ps_disable in such cases if an external
* sequence producer is registered.
* - As of this writing, ZSTD_c_enableLongDistanceMatching is disabled by default
* whenever ZSTD_c_windowLog < 128MB, but that's subject to change. Users should
* check the docs on ZSTD_c_enableLongDistanceMatching whenever the Block-Level Sequence
* Producer API is used in conjunction with advanced settings (like ZSTD_c_windowLog).
*
* Second, history buffers are not currently supported. Concretely, zstd will always pass
* dictSize == 0 to the external sequence producer (for now). This has two implications:
* - Dictionaries are not currently supported. Compression will *not* fail if the user
* references a dictionary, but the dictionary won't have any effect.
* - Stream history is not currently supported. All advanced compression APIs, including
* streaming APIs, work with external sequence producers, but each block is treated as
* an independent chunk without history from previous blocks.
*
* Third, multi-threading within a single compression is not currently supported. In other words,
* COMPRESSION WILL FAIL if ZSTD_c_nbWorkers > 0 and an external sequence producer is registered.
* Multi-threading across compressions is fine: simply create one CCtx per thread.
*
* Long-term, we plan to overcome all three limitations. There is no technical blocker to
* overcoming them. It is purely a question of engineering effort.
*/
typedef size_t ZSTD_sequenceProducer_F (
void* sequenceProducerState,
ZSTD_Sequence* outSeqs, size_t outSeqsCapacity,
const void* src, size_t srcSize,
const void* dict, size_t dictSize,
int compressionLevel,
size_t windowSize
);
/*! ZSTD_registerSequenceProducer() :
* Instruct zstd to use a block-level external sequence producer function.
*
* The sequenceProducerState must be initialized by the caller, and the caller is
* responsible for managing its lifetime. This parameter is sticky across
* compressions. It will remain set until the user explicitly resets compression
* parameters.
*
* Sequence producer registration is considered to be an "advanced parameter",
* part of the "advanced API". This means it will only have an effect on compression
* APIs which respect advanced parameters, such as compress2() and compressStream2().
* Older compression APIs such as compressCCtx(), which predate the introduction of
* "advanced parameters", will ignore any external sequence producer setting.
*
* The sequence producer can be "cleared" by registering a NULL function pointer. This
* removes all limitations described above in the "LIMITATIONS" section of the API docs.
*
* The user is strongly encouraged to read the full API documentation (above) before
* calling this function. */
ZSTDLIB_STATIC_API void
ZSTD_registerSequenceProducer(
ZSTD_CCtx* cctx,
void* sequenceProducerState,
ZSTD_sequenceProducer_F* sequenceProducer
);
/* Hashset for storing references to multiple ZSTD_DDict within ZSTD_DCtx */
typedef struct {
const ZSTD_DDict** ddictPtrTable;
size_t ddictPtrTableSize;
size_t ddictPtrCount;
} ZSTD_DDictHashSet;
#ifndef ZSTD_DECODER_INTERNAL_BUFFER
# define ZSTD_DECODER_INTERNAL_BUFFER (1 << 16)
#endif
#define ZSTD_LBMIN 64
#define ZSTD_LBMAX (128 << 10)
/* extra buffer, compensates when dst is not large enough to store litBuffer */
#define ZSTD_LITBUFFEREXTRASIZE BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX)
typedef enum {
ZSTD_not_in_dst = 0, /* Stored entirely within litExtraBuffer */
ZSTD_in_dst = 1, /* Stored entirely within dst (in memory after current output write) */
ZSTD_split = 2 /* Split between litExtraBuffer and dst */
} ZSTD_litLocation_e;
BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
BYTE* litBuffer;
const BYTE* litBufferEnd;
ZSTD_litLocation_e litBufferLocation;
BYTE litExtraBuffer[ZSTD_LITBUFFEREXTRASIZE + WILDCOPY_OVERLENGTH]; /* literal buffer can be split between storage within dst and within this scratch buffer */
/* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */
static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize,
const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately)
{
if (streaming == not_streaming && dstCapacity > ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH)
{
/* room for litbuffer to fit without read faulting */
dctx->litBuffer = (BYTE*)dst + ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH;
dctx->litBufferEnd = dctx->litBuffer + litSize;
dctx->litBufferLocation = ZSTD_in_dst;
}
else if (litSize > ZSTD_LITBUFFEREXTRASIZE)
{
/* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
if (splitImmediately) {
/* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */
dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE;
}
else {
/* initially this will be stored entirely in dst during huffman decoding, it will partially be shifted to litExtraBuffer after */
dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize;
dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize;
}
dctx->litBufferLocation = ZSTD_split;
}
else
{
/* fits entirely within litExtraBuffer, so no split is necessary */
dctx->litBuffer = dctx->litExtraBuffer;
dctx->litBufferEnd = dctx->litBuffer + litSize;
dctx->litBufferLocation = ZSTD_not_in_dst;
}
}
* Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored
* in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current
* block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being
* stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write.
*
}
if (dctx->litBufferLocation == ZSTD_split)
{
ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE);
dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH;
dctx->litBufferEnd -= WILDCOPY_OVERLENGTH;
ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize);
if (dctx->litBufferLocation == ZSTD_split)
{
ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize - ZSTD_LITBUFFEREXTRASIZE);
ZSTD_memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE);
}
else
{
ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize);
}
ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, "");
ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1);
if (dctx->litBufferLocation == ZSTD_split)
{
ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize - ZSTD_LITBUFFEREXTRASIZE);
ZSTD_memset(dctx->litExtraBuffer, istart[lhSize], ZSTD_LITBUFFEREXTRASIZE);
}
else
{
ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize);
}
op = oend_w;
op += oend_w - op;
}
/* Handle the leftovers. */
while (op < oend) *op++ = *ip++;
}
/* ZSTD_safecopyDstBeforeSrc():
* This version allows overlap with dst before src, or handles the non-overlap case with dst after src
* Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */
static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length) {
ptrdiff_t const diff = op - ip;
BYTE* const oend = op + length;
if (length < 8 || diff > -8) {
/* Handle short lengths, close overlaps, and dst not before src. */
while (op < oend) *op++ = *ip++;
return;
}
if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) {
ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap);
ip += oend - WILDCOPY_OVERLENGTH - op;
op += oend - WILDCOPY_OVERLENGTH - op;
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
} }
ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
}
}
ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst);
return sequenceLength;
}
/* ZSTD_execSequenceEndSplitLitBuffer():
* This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case.
*/
FORCE_NOINLINE
size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
/* bounds checks : careful of address space overflow in 32-bit mode */
RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer");
RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer");
assert(op < op + sequenceLength);
assert(oLitEnd < op + sequenceLength);
/* copy literals */
RETURN_ERROR_IF(op > *litPtr && op < *litPtr + sequence.litLength, dstSize_tooSmall, "output should not catch up to and overwrite literal buffer");
ZSTD_safecopyDstBeforeSrc(op, *litPtr, sequence.litLength);
op = oLitEnd;
*litPtr = iLitEnd;
/* copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix */
RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, "");
match = dictEnd - (prefixStart - match);
if (match + sequence.matchLength <= dictEnd) {
ZSTD_memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
}
}
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
const BYTE* const iLitEnd = *litPtr + sequence.litLength;
const BYTE* match = oLitEnd - sequence.offset;
assert(op != NULL /* Precondition */);
assert(oend_w < oend /* No underflow */);
#if defined(__aarch64__)
/* prefetch sequence starting from match that will be used for copy later */
PREFETCH_L1(match);
#endif
/* Handle edge cases in a slow path:
* - Read beyond end of literals
* - Match end is within WILDCOPY_OVERLIMIT of oend
* - 32-bit mode and the match length overflows
*/
if (UNLIKELY(
iLitEnd > litLimit ||
oMatchEnd > oend_w ||
(MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH)))
return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd);
/* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */
assert(op <= oLitEnd /* No overflow */);
assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */);
assert(oMatchEnd <= oend /* No underflow */);
assert(iLitEnd <= litLimit /* Literal length is in bounds */);
assert(oLitEnd <= oend_w /* Can wildcopy literals */);
assert(oMatchEnd <= oend_w /* Can wildcopy matches */);
/* Copy Literals:
* Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9.
* We likely don't need the full 32-byte wildcopy.
*/
assert(WILDCOPY_OVERLENGTH >= 16);
ZSTD_copy16(op, (*litPtr));
if (UNLIKELY(sequence.litLength > 16)) {
ZSTD_wildcopy(op + 16, (*litPtr) + 16, sequence.litLength - 16, ZSTD_no_overlap);
}
op = oLitEnd;
*litPtr = iLitEnd; /* update for next sequence */
/* Copy Match */
if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
/* offset beyond prefix -> go into extDict */
RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, "");
match = dictEnd + (match - prefixStart);
if (match + sequence.matchLength <= dictEnd) {
ZSTD_memmove(oLitEnd, match, sequence.matchLength);
return sequenceLength;
}
/* span extDict & currentPrefixSegment */
{ size_t const length1 = dictEnd - match;
ZSTD_memmove(oLitEnd, match, length1);
op = oLitEnd + length1;
sequence.matchLength -= length1;
match = prefixStart;
}
}
/* Match within prefix of 1 or more bytes */
assert(op <= oMatchEnd);
assert(oMatchEnd <= oend_w);
assert(match >= prefixStart);
assert(sequence.matchLength >= 1);
/* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy
* without overlap checking.
*/
if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) {
/* We bet on a full wildcopy for matches, since we expect matches to be
* longer than literals (in general). In silesia, ~10% of matches are longer
* than 16 bytes.
*/
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap);
return sequenceLength;
}
assert(sequence.offset < WILDCOPY_VECLEN);
/* Copy 8 bytes and spread the offset to be >= 8. */
ZSTD_overlapCopy8(&op, &match, sequence.offset);
/* If the match length is > 8 bytes, then continue with the wildcopy. */
if (sequence.matchLength > 8) {
assert(op < oMatchEnd);
ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst);
}
return sequenceLength;
}
HINT_INLINE
size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit,
const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd)
{
BYTE* const oLitEnd = op + sequence.litLength;
size_t const sequenceLength = sequence.litLength + sequence.matchLength;
BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state];
ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state];
ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state];
U32 const llBase = llDInfo.baseValue;
U32 const mlBase = mlDInfo.baseValue;
U32 const ofBase = ofDInfo.baseValue;
BYTE const llBits = llDInfo.nbAdditionalBits;
BYTE const mlBits = mlDInfo.nbAdditionalBits;
BYTE const ofBits = ofDInfo.nbAdditionalBits;
BYTE const totalBits = llBits+mlBits+ofBits;
/*
* ZSTD_seqSymbol is a structure with a total of 64 bits wide. So it can be
* loaded in one operation and extracted its fields by simply shifting or
* bit-extracting on aarch64.
* GCC doesn't recognize this and generates more unnecessary ldr/ldrb/ldrh
* operations that cause performance drop. This can be avoided by using this
* ZSTD_memcpy hack.
*/
#if defined(__aarch64__) && (defined(__GNUC__) && !defined(__clang__))
ZSTD_seqSymbol llDInfoS, mlDInfoS, ofDInfoS;
ZSTD_seqSymbol* const llDInfo = &llDInfoS;
ZSTD_seqSymbol* const mlDInfo = &mlDInfoS;
ZSTD_seqSymbol* const ofDInfo = &ofDInfoS;
ZSTD_memcpy(llDInfo, seqState->stateLL.table + seqState->stateLL.state, sizeof(ZSTD_seqSymbol));
ZSTD_memcpy(mlDInfo, seqState->stateML.table + seqState->stateML.state, sizeof(ZSTD_seqSymbol));
ZSTD_memcpy(ofDInfo, seqState->stateOffb.table + seqState->stateOffb.state, sizeof(ZSTD_seqSymbol));
#else
const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state;
const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state;
const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state;
#endif
seq.matchLength = mlDInfo->baseValue;
seq.litLength = llDInfo->baseValue;
{ U32 const ofBase = ofDInfo->baseValue;
BYTE const llBits = llDInfo->nbAdditionalBits;
BYTE const mlBits = mlDInfo->nbAdditionalBits;
BYTE const ofBits = ofDInfo->nbAdditionalBits;
BYTE const totalBits = llBits+mlBits+ofBits;
/* sequence */
{ size_t offset;
if (ofBits > 1) {
ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
assert(ofBits <= MaxOff);
if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
BIT_reloadDStream(&seqState->DStream);
if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
U16 const llNext = llDInfo->nextState;
U16 const mlNext = mlDInfo->nextState;
U16 const ofNext = ofDInfo->nextState;
U32 const llnbBits = llDInfo->nbBits;
U32 const mlnbBits = mlDInfo->nbBits;
U32 const ofnbBits = ofDInfo->nbBits;
assert(llBits <= MaxLLBits);
assert(mlBits <= MaxMLBits);
assert(ofBits <= MaxOff);
/*
* As gcc has better branch and block analyzers, sometimes it is only
* valuable to mark likeliness for clang, it gives around 3-4% of
* performance.
*/
/* sequence */
{ size_t offset;
#if defined(__clang__)
if (LIKELY(ofBits > 1)) {
#else
if (ofBits > 1) {
#endif
ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 > LONG_OFFSETS_MAX_EXTRA_BITS_32);
ZSTD_STATIC_ASSERT(STREAM_ACCUMULATOR_MIN_32 - LONG_OFFSETS_MAX_EXTRA_BITS_32 >= MaxMLBits);
if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
/* Always read extra bits, this keeps the logic simple,
* avoids branches, and avoids accidentally reading 0 bits.
*/
U32 const extraBits = LONG_OFFSETS_MAX_EXTRA_BITS_32;
offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
BIT_reloadDStream(&seqState->DStream);
offset += BIT_readBitsFast(&seqState->DStream, extraBits);
} else {
offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
}
seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset;
offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
}
seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset;
} else {
U32 const ll0 = (llBase == 0);
if (LIKELY((ofBits == 0))) {
if (LIKELY(!ll0))
offset = seqState->prevOffset[0];
else {
offset = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
U32 const ll0 = (llDInfo->baseValue == 0);
if (LIKELY((ofBits == 0))) {
offset = seqState->prevOffset[ll0];
seqState->prevOffset[1] = seqState->prevOffset[!ll0];
}
} else {
offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
{ size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset = temp;
} } }
seq.offset = offset;
}
} else {
offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1);
{ size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
seqState->prevOffset[1] = seqState->prevOffset[0];
seqState->prevOffset[0] = offset = temp;
} } }
seq.offset = offset;
}
seq.matchLength = mlBase;
if (mlBits > 0)
seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
#if defined(__clang__)
if (UNLIKELY(mlBits > 0))
#else
if (mlBits > 0)
#endif
seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/);
if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
BIT_reloadDStream(&seqState->DStream);
if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
BIT_reloadDStream(&seqState->DStream);
/* Ensure there are enough bits to read the rest of data in 64-bit mode. */
ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
BIT_reloadDStream(&seqState->DStream);
if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
BIT_reloadDStream(&seqState->DStream);
/* Ensure there are enough bits to read the rest of data in 64-bit mode. */
ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
seq.litLength = llBase;
if (llBits > 0)
seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
if (MEM_32bits())
BIT_reloadDStream(&seqState->DStream);
#if defined(__clang__)
if (UNLIKELY(llBits > 0))
#else
if (llBits > 0)
#endif
seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/);
if (prefetch == ZSTD_p_prefetch) {
size_t const pos = seqState->pos + seq.litLength;
const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
* No consequence though : no memory access will occur, offset is only used for prefetching */
seqState->pos = pos + seq.matchLength;
}
DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
(U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
/* ANS state update
* gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo().
* clang-9.2.0 does 7% worse with ZSTD_updateFseState().
* Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the
* better option, so it is the default for other compilers. But, if you
* measure that it is worse, please put up a pull request.
*/
{
#if defined(__GNUC__) && !defined(__clang__)
const int kUseUpdateFseState = 1;
#else
const int kUseUpdateFseState = 0;
#endif
if (kUseUpdateFseState) {
ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
} else {
ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo); /* <= 9 bits */
ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo); /* <= 9 bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo); /* <= 8 bits */
}
ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */
ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */
if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */
/* decompress without overrunning litPtr begins */
{
seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
/* Align the decompression loop to 32 + 16 bytes.
*
* zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
* speed swings based on the alignment of the decompression loop. This
* performance swing is caused by parts of the decompression loop falling
* out of the DSB. The entire decompression loop should fit in the DSB,
* when it can't we get much worse performance. You can measure if you've
* hit the good case or the bad case with this perf command for some
* compressed file test.zst:
*
* perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
* -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
*
* If you see most cycles served out of the MITE you've hit the bad case.
* If you see most cycles served out of the DSB you've hit the good case.
* If it is pretty even then you may be in an okay case.
*
* This issue has been reproduced on the following CPUs:
* - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
* Use Instruments->Counters to get DSB/MITE cycles.
* I never got performance swings, but I was able to
* go from the good case of mostly DSB to half of the
* cycles served from MITE.
* - Coffeelake: Intel i9-9900k
* - Coffeelake: Intel i7-9700k
*
* I haven't been able to reproduce the instability or DSB misses on any
* of the following CPUS:
* - Haswell
* - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
* - Skylake
*
* Alignment is done for each of the three major decompression loops:
* - ZSTD_decompressSequences_bodySplitLitBuffer - presplit section of the literal buffer
* - ZSTD_decompressSequences_bodySplitLitBuffer - postsplit section of the literal buffer
* - ZSTD_decompressSequences_body
* Alignment choices are made to minimize large swings on bad cases and influence on performance
* from changes external to this code, rather than to overoptimize on the current commit.
*
* If you are seeing performance stability this script can help test.
* It tests on 4 commits in zstd where I saw performance change.
*
* https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
*/
/* Align the decompression loop to 32 + 16 bytes.
*
* zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression
* speed swings based on the alignment of the decompression loop. This
* performance swing is caused by parts of the decompression loop falling
* out of the DSB. The entire decompression loop should fit in the DSB,
* when it can't we get much worse performance. You can measure if you've
* hit the good case or the bad case with this perf command for some
* compressed file test.zst:
*
* perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \
* -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst
*
* If you see most cycles served out of the MITE you've hit the bad case.
* If you see most cycles served out of the DSB you've hit the good case.
* If it is pretty even then you may be in an okay case.
*
* I've been able to reproduce this issue on the following CPUs:
* - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9
* Use Instruments->Counters to get DSB/MITE cycles.
* I never got performance swings, but I was able to
* go from the good case of mostly DSB to half of the
* cycles served from MITE.
* - Coffeelake: Intel i9-9900k
*
* I haven't been able to reproduce the instability or DSB misses on any
* of the following CPUS:
* - Haswell
* - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH
* - Skylake
*
* If you are seeing performance stability this script can help test.
* It tests on 4 commits in zstd where I saw performance change.
*
* https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4
*/
__asm__(".p2align 5");
__asm__("nop");
__asm__(".p2align 4");
__asm__(".p2align 6");
# if __GNUC__ >= 7
/* good for gcc-7, gcc-9, and gcc-11 */
__asm__("nop");
__asm__(".p2align 5");
__asm__("nop");
__asm__(".p2align 4");
# if __GNUC__ == 8 || __GNUC__ == 10
/* good for gcc-8 and gcc-10 */
__asm__("nop");
__asm__(".p2align 3");
# endif
# endif
#endif
/* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */
for (; litPtr + sequence.litLength <= dctx->litBufferEnd; ) {
size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
if (UNLIKELY(ZSTD_isError(oneSeqSize)))
return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
op += oneSeqSize;
if (UNLIKELY(!--nbSeq))
break;
BIT_reloadDStream(&(seqState.DStream));
sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
}
/* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */
if (nbSeq > 0) {
const size_t leftoverLit = dctx->litBufferEnd - litPtr;
if (leftoverLit)
{
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
sequence.litLength -= leftoverLit;
op += leftoverLit;
}
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
{
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
if (UNLIKELY(ZSTD_isError(oneSeqSize)))
return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
op += oneSeqSize;
if (--nbSeq)
BIT_reloadDStream(&(seqState.DStream));
}
}
}
if (nbSeq > 0) /* there is remaining lit from extra buffer */
{
#if defined(__GNUC__) && defined(__x86_64__)
__asm__(".p2align 6");
__asm__("nop");
# if __GNUC__ != 7
/* worse for gcc-7 better for gcc-8, gcc-9, and gcc-10 and clang */
__asm__(".p2align 4");
__asm__("nop");
__asm__(".p2align 3");
# elif __GNUC__ >= 11
__asm__(".p2align 3");
# else
__asm__(".p2align 5");
__asm__("nop");
__asm__(".p2align 3");
# endif
#endif
for (; ; ) {
seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase);
#endif
if (UNLIKELY(ZSTD_isError(oneSeqSize)))
return oneSeqSize;
DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
op += oneSeqSize;
if (UNLIKELY(!--nbSeq))
break;
BIT_reloadDStream(&(seqState.DStream));
}
}
/* check if reached exact end */
DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq);
RETURN_ERROR_IF(nbSeq, corruption_detected, "");
RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, "");
/* save reps for next block */
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
}
/* last literal segment */
if (dctx->litBufferLocation == ZSTD_split) /* split hasn't been reached yet, first get dst then copy litExtraBuffer */
{
size_t const lastLLSize = litBufferEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memmove(op, litPtr, lastLLSize);
op += lastLLSize;
}
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
}
{ size_t const lastLLSize = litBufferEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memcpy(op, litPtr, lastLLSize);
op += lastLLSize;
}
}
return op-ostart;
}
FORCE_INLINE_TEMPLATE size_t
DONT_VECTORIZE
ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ostart + maxDstSize : dctx->litBuffer;
BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize;
const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart);
const BYTE* const vBase = (const BYTE*)(dctx->virtualStart);
const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd);
DEBUGLOG(5, "ZSTD_decompressSequences_body: nbSeq = %d", nbSeq);
(void)frame;
/* Regen sequences */
if (nbSeq) {
seqState_t seqState;
dctx->fseEntropy = 1;
{ U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
RETURN_ERROR_IF(
ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)),
corruption_detected, "");
ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
assert(dst != NULL);
ZSTD_STATIC_ASSERT(
BIT_DStream_unfinished < BIT_DStream_completed &&
BIT_DStream_endOfBuffer < BIT_DStream_completed &&
BIT_DStream_completed < BIT_DStream_overflow);
#if defined(__GNUC__) && defined(__x86_64__)
__asm__(".p2align 6");
__asm__("nop");
# if __GNUC__ >= 7
__asm__(".p2align 5");
__asm__("nop");
__asm__(".p2align 3");
# else
__asm__(".p2align 4");
__asm__("nop");
__asm__(".p2align 3");
# endif
static size_t
ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
FORCE_INLINE_TEMPLATE size_t
ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
const BYTE* const prefixStart, const BYTE* const dictEnd)
{
prefetchPos += sequence.litLength;
{ const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart;
const BYTE* const match = matchBase + prefetchPos - sequence.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
* No consequence though : memory address is only used for prefetching, not for dereferencing */
PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
}
return prefetchPos + sequence.matchLength;
}
/* This decoding function employs prefetching
* to reduce latency impact of cache misses.
* It's generally employed when block contains a significant portion of long-distance matches
* or when coupled with a "cold" dictionary */
BYTE* const ostart = (BYTE* const)dst;
BYTE* const oend = ostart + maxDstSize;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ostart + maxDstSize;
sequences[seqNb] = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
PREFETCH_L1(sequences[seqNb].match); PREFETCH_L1(sequences[seqNb].match + sequences[seqNb].matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
sequences[seqNb] = sequence;
/* decode and decompress */
for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_prefetch);
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
/* decompress without stomping litBuffer */
for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb < nbSeq); seqNb++) {
seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
size_t oneSeqSize;
if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd)
{
/* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */
const size_t leftoverLit = dctx->litBufferEnd - litPtr;
if (leftoverLit)
{
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit;
op += leftoverLit;
}
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
PREFETCH_L1(sequence.match); PREFETCH_L1(sequence.match + sequence.matchLength - 1); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
sequences[seqNb & STORED_SEQS_MASK] = sequence;
op += oneSeqSize;
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
sequences[seqNb & STORED_SEQS_MASK] = sequence;
op += oneSeqSize;
}
else
{
/* lit buffer is either wholly contained in first or second split, or not split at all*/
oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd);
sequences[seqNb & STORED_SEQS_MASK] = sequence;
op += oneSeqSize;
}
size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]);
if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd)
{
const size_t leftoverLit = dctx->litBufferEnd - litPtr;
if (leftoverLit)
{
RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer");
ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit);
sequence->litLength -= leftoverLit;
op += leftoverLit;
}
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
dctx->litBufferLocation = ZSTD_not_in_dst;
{
size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
}
}
else
{
size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ?
ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) :
ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd);
#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE)
assert(!ZSTD_isError(oneSeqSize));
if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart);
#endif
if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
op += oneSeqSize;
}
{ size_t const lastLLSize = litEnd - litPtr;
if (dctx->litBufferLocation == ZSTD_split) /* first deplete literal buffer in dst, then copy litExtraBuffer */
{
size_t const lastLLSize = litBufferEnd - litPtr;
RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, "");
if (op != NULL) {
ZSTD_memmove(op, litPtr, lastLLSize);
op += lastLLSize;
}
litPtr = dctx->litExtraBuffer;
litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE;
}
{ size_t const lastLLSize = litBufferEnd - litPtr;
}
static BMI2_TARGET_ATTRIBUTE size_t
DONT_VECTORIZE
ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx,
void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
static size_t
ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
const void* seqStart, size_t seqSize, int nbSeq,
const ZSTD_longOffset_e isLongOffset,
const int frame)
{
DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer");
#if DYNAMIC_BMI2
if (ZSTD_DCtx_get_bmi2(dctx)) {
return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
}
#endif
return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame);
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
/* ZSTD_getLongOffsetsShare() :
typedef struct {
unsigned longOffsetShare;
unsigned maxNbAdditionalBits;
} ZSTD_OffsetInfo;
/* ZSTD_getOffsetInfo() :
* compared to maximum possible of (1<<OffFSELog) */
static unsigned
ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
* compared to maximum possible of (1<<OffFSELog),
* as well as the maximum number additional bits required.
*/
static ZSTD_OffsetInfo
ZSTD_getOffsetInfo(const ZSTD_seqSymbol* offTable, int nbSeq)
const void* ptr = offTable;
U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
const ZSTD_seqSymbol* table = offTable + 1;
U32 const max = 1 << tableLog;
U32 u, total = 0;
DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
ZSTD_OffsetInfo info = {0, 0};
/* If nbSeq == 0, then the offTable is uninitialized, but we have
* no sequences, so both values should be 0.
*/
if (nbSeq != 0) {
const void* ptr = offTable;
U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
const ZSTD_seqSymbol* table = offTable + 1;
U32 const max = 1 << tableLog;
U32 u;
DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
assert(max <= (1 << OffFSELog)); /* max not too large */
for (u=0; u<max; u++) {
info.maxNbAdditionalBits = MAX(info.maxNbAdditionalBits, table[u].nbAdditionalBits);
if (table[u].nbAdditionalBits > 22) info.longOffsetShare += 1;
}
return total;
/**
* @returns The maximum offset we can decode in one read of our bitstream, without
* reloading more bits in the middle of the offset bits read. Any offsets larger
* than this must use the long offset decoder.
*/
static size_t ZSTD_maxShortOffset(void)
{
if (MEM_64bits()) {
/* We can decode any offset without reloading bits.
* This might change if the max window size grows.
*/
ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
return (size_t)-1;
} else {
/* The maximum offBase is (1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1.
* This offBase would require STREAM_ACCUMULATOR_MIN extra bits.
* Then we have to subtract ZSTD_REP_NUM to get the maximum possible offset.
*/
size_t const maxOffbase = ((size_t)1 << (STREAM_ACCUMULATOR_MIN + 1)) - 1;
size_t const maxOffset = maxOffbase - ZSTD_REP_NUM;
assert(ZSTD_highbit32((U32)maxOffbase) == STREAM_ACCUMULATOR_MIN);
return maxOffset;
}
/* isLongOffset must be true if there are long offsets.
* Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
* We don't expect that to be the case in 64-bit mode.
* In block mode, window size is not known, so we have to be conservative.
* (note: but it could be evaluated from current-lowLimit)
*/
ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN))));
RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
/* Note : the wording of the specification
* allows compressed block to be sized exactly ZSTD_BLOCKSIZE_MAX.
* This generally does not happen, as it makes little sense,
* since an uncompressed block would feature same size and have no decompression cost.
* Also, note that decoder from reference libzstd before < v1.5.4
* would consider this edge case as an error.
* As a consequence, avoid generating compressed blocks of size ZSTD_BLOCKSIZE_MAX
* for broader compatibility with the deployed ecosystem of zstd decoders */
RETURN_ERROR_IF(srcSize > ZSTD_BLOCKSIZE_MAX, srcSize_wrong, "");
{ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
{ size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming);
DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : cSize=%u, nbLiterals=%zu", (U32)litCSize, dctx->litSize);
/* Compute the maximum block size, which must also work when !frame and fParams are unset.
* Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t.
*/
size_t const blockSizeMax = MIN(dstCapacity, (frame ? dctx->fParams.blockSizeMax : ZSTD_BLOCKSIZE_MAX));
size_t const totalHistorySize = ZSTD_totalHistorySize((BYTE*)dst + blockSizeMax, (BYTE const*)dctx->virtualStart);
/* isLongOffset must be true if there are long offsets.
* Offsets are long if they are larger than ZSTD_maxShortOffset().
* We don't expect that to be the case in 64-bit mode.
*
* We check here to see if our history is large enough to allow long offsets.
* If it isn't, then we can't possible have (valid) long offsets. If the offset
* is invalid, then it is okay to read it incorrectly.
*
* If isLongOffsets is true, then we will later check our decoding table to see
* if it is even possible to generate long offsets.
*/
ZSTD_longOffset_e isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (totalHistorySize > ZSTD_maxShortOffset()));
#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \
!defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG)
if ( !usePrefetchDecoder
&& (!frame || (dctx->fParams.windowSize > (1<<24)))
&& (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */
U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
usePrefetchDecoder = (shareLongOffsets >= minShare);
/* If we could potentially have long offsets, or we might want to use the prefetch decoder,
* compute information about the share of long offsets, and the maximum nbAdditionalBits.
* NOTE: could probably use a larger nbSeq limit
*/
if (isLongOffset || (!usePrefetchDecoder && (totalHistorySize > (1u << 24)) && (nbSeq > 8))) {
ZSTD_OffsetInfo const info = ZSTD_getOffsetInfo(dctx->OFTptr, nbSeq);
if (isLongOffset && info.maxNbAdditionalBits <= STREAM_ACCUMULATOR_MIN) {
/* If isLongOffset, but the maximum number of additional bits that we see in our table is small
* enough, then we know it is impossible to have too long an offset in this block, so we can
* use the regular offset decoder.
*/
isLongOffset = ZSTD_lo_isRegularOffset;
}
if (!usePrefetchDecoder) {
U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
usePrefetchDecoder = (info.longOffsetShare >= minShare);
}
return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
if (dctx->litBufferLocation == ZSTD_split)
return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
else
return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame);
ZSTD_checkContinuity(dctx, dst);
dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
ZSTD_checkContinuity(dctx, dst, dstCapacity);
dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0, not_streaming);
#define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4
#define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float.
* Currently, that means a 0.75 load factor.
* So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded
* the load factor of the ddict hash set.
*/
#define DDICT_HASHSET_TABLE_BASE_SIZE 64
#define DDICT_HASHSET_RESIZE_FACTOR 2
/* Hash function to determine starting position of dict insertion within the table
* Returns an index between [0, hashSet->ddictPtrTableSize]
*/
static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) {
const U64 hash = XXH64(&dictID, sizeof(U32), 0);
/* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */
return hash & (hashSet->ddictPtrTableSize - 1);
}
/* Adds DDict to a hashset without resizing it.
* If inserting a DDict with a dictID that already exists in the set, replaces the one in the set.
* Returns 0 if successful, or a zstd error code if something went wrong.
*/
static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) {
const U32 dictID = ZSTD_getDictID_fromDDict(ddict);
size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!");
DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
while (hashSet->ddictPtrTable[idx] != NULL) {
/* Replace existing ddict if inserting ddict with same dictID */
if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) {
DEBUGLOG(4, "DictID already exists, replacing rather than adding");
hashSet->ddictPtrTable[idx] = ddict;
return 0;
}
idx &= idxRangeMask;
idx++;
}
DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
hashSet->ddictPtrTable[idx] = ddict;
hashSet->ddictPtrCount++;
return 0;
}
/* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and
* rehashes all values, allocates new table, frees old table.
* Returns 0 on success, otherwise a zstd error code.
*/
static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR;
const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem);
const ZSTD_DDict** oldTable = hashSet->ddictPtrTable;
size_t oldTableSize = hashSet->ddictPtrTableSize;
size_t i;
DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize);
RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!");
hashSet->ddictPtrTable = newTable;
hashSet->ddictPtrTableSize = newTableSize;
hashSet->ddictPtrCount = 0;
for (i = 0; i < oldTableSize; ++i) {
if (oldTable[i] != NULL) {
FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), "");
}
}
ZSTD_customFree((void*)oldTable, customMem);
DEBUGLOG(4, "Finished re-hash");
return 0;
}
/* Fetches a DDict with the given dictID
* Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL.
*/
static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) {
size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID);
const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1;
DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx);
for (;;) {
size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]);
if (currDictID == dictID || currDictID == 0) {
/* currDictID == 0 implies a NULL ddict entry */
break;
} else {
idx &= idxRangeMask; /* Goes to start of table when we reach the end */
idx++;
}
}
DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx);
return hashSet->ddictPtrTable[idx];
}
/* Allocates space for and returns a ddict hash set
* The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with.
* Returns NULL if allocation failed.
*/
static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) {
ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem);
DEBUGLOG(4, "Allocating new hash set");
if (!ret)
return NULL;
ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem);
if (!ret->ddictPtrTable) {
ZSTD_customFree(ret, customMem);
return NULL;
}
ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE;
ret->ddictPtrCount = 0;
return ret;
}
/* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself.
* Note: The ZSTD_DDict* within the table are NOT freed.
*/
static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) {
DEBUGLOG(4, "Freeing ddict hash set");
if (hashSet && hashSet->ddictPtrTable) {
ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem);
}
if (hashSet) {
ZSTD_customFree(hashSet, customMem);
}
}
/* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set.
* Returns 0 on success, or a ZSTD error.
*/
static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) {
DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize);
if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) {
FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), "");
}
FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), "");
return 0;
}
}
/* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on
* the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then
* accordingly sets the ddict to be used to decompress the frame.
*
* If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is.
*
* ZSTD_d_refMultipleDDicts must be enabled for this function to be called.
*/
static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) {
assert(dctx->refMultipleDDicts && dctx->ddictSet);
DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame");
if (dctx->ddict) {
const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID);
if (frameDDict) {
DEBUGLOG(4, "DDict found!");
ZSTD_clearDict(dctx);
dctx->dictID = dctx->fParams.dictID;
dctx->ddict = frameDDict;
dctx->dictUses = ZSTD_use_indefinitely;
}
}
return 0;
}
/*! ZSTD_isSkippableFrame() :
* Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame.
* Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
*/
unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size)
{
if (size < ZSTD_FRAMEIDSIZE) return 0;
{ U32 const magic = MEM_readLE32(buffer);
if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
}
ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */
if (srcSize < minInputSize) return minInputSize;
RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter");
DEBUGLOG(5, "ZSTD_getFrameHeader_advanced: minInputSize = %zu, srcSize = %zu", minInputSize, srcSize);
if (srcSize > 0) {
/* note : technically could be considered an assert(), since it's an invalid entry */
RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter : src==NULL, but srcSize>0");
}
if (srcSize < minInputSize) {
if (srcSize > 0 && format != ZSTD_f_zstd1_magicless) {
/* when receiving less than @minInputSize bytes,
* control these bytes at least correspond to a supported magic number
* in order to error out early if they don't.
**/
size_t const toCopy = MIN(4, srcSize);
unsigned char hbuf[4]; MEM_writeLE32(hbuf, ZSTD_MAGICNUMBER);
assert(src != NULL);
ZSTD_memcpy(hbuf, src, toCopy);
if ( MEM_readLE32(hbuf) != ZSTD_MAGICNUMBER ) {
/* not a zstd frame : let's check if it's a skippable frame */
MEM_writeLE32(hbuf, ZSTD_MAGIC_SKIPPABLE_START);
ZSTD_memcpy(hbuf, src, toCopy);
if ((MEM_readLE32(hbuf) & ZSTD_MAGIC_SKIPPABLE_MASK) != ZSTD_MAGIC_SKIPPABLE_START) {
RETURN_ERROR(prefix_unknown,
"first bytes don't correspond to any supported magic number");
} } }
return minInputSize;
}
}
/*! ZSTD_readSkippableFrame() :
* Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer.
*
* The parameter magicVariant will receive the magicVariant that was supplied when the frame was written,
* i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested
* in the magicVariant.
*
* Returns an error if destination buffer is not large enough, or if the frame is not skippable.
*
* @return : number of bytes written or a ZSTD error.
*/
ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant,
const void* src, size_t srcSize)
{
U32 const magicNumber = MEM_readLE32(src);
size_t skippableFrameSize = readSkippableFrameSize(src, srcSize);
size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE;
/* check input validity */
RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, "");
RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, "");
RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, "");
/* deliver payload */
if (skippableContentSize > 0 && dst != NULL)
ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize);
if (magicVariant != NULL)
*magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START;
return skippableContentSize;
/* Iterate over each frame */
while (srcSize > 0) {
ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize);
size_t const compressedSize = frameSizeInfo.compressedSize;
unsigned long long const decompressedBound = frameSizeInfo.decompressedBound;
ZSTD_frameHeader zfh;
FORWARD_IF_ERROR(ZSTD_getFrameHeader(&zfh, src, srcSize), "");
if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR)
return ERROR(corruption_detected);
if (zfh.frameType == ZSTD_frame) {
/* Add the frame header to our margin */
margin += zfh.headerSize;
/* Add the checksum to our margin */
margin += zfh.checksumFlag ? 4 : 0;
/* Add 3 bytes per block */
margin += 3 * frameSizeInfo.nbBlocks;
/* Compute the max block size */
maxBlockSize = MAX(maxBlockSize, zfh.blockSizeMax);
} else {
assert(zfh.frameType == ZSTD_skippableFrame);
/* Add the entire skippable frame size to our margin. */
margin += compressedSize;
}
assert(srcSize >= compressedSize);
src = (const BYTE*)src + compressedSize;
srcSize -= compressedSize;
}
/* Add the max block size back to the margin. */
margin += maxBlockSize;
return margin;
}
static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, unsigned streaming)
{
#if ZSTD_TRACE
if (dctx->traceCtx && ZSTD_trace_decompress_end != NULL) {
ZSTD_Trace trace;
ZSTD_memset(&trace, 0, sizeof(trace));
trace.version = ZSTD_VERSION_NUMBER;
trace.streaming = streaming;
if (dctx->ddict) {
trace.dictionaryID = ZSTD_getDictID_fromDDict(dctx->ddict);
trace.dictionarySize = ZSTD_DDict_dictSize(dctx->ddict);
trace.dictionaryIsCold = dctx->ddictIsCold;
}
trace.uncompressedSize = (size_t)uncompressedSize;
trace.compressedSize = (size_t)compressedSize;
trace.dctx = dctx;
ZSTD_trace_decompress_end(dctx->traceCtx, &trace);
}
#else
(void)dctx;
(void)uncompressedSize;
(void)compressedSize;
(void)streaming;
#endif
}
if (ip >= op && ip < oBlockEnd) {
/* We are decompressing in-place. Limit the output pointer so that we
* don't overwrite the block that we are currently reading. This will
* fail decompression if the input & output pointers aren't spaced
* far enough apart.
*
* This is important to set, even when the pointers are far enough
* apart, because ZSTD_decompressBlock_internal() can decide to store
* literals in the output buffer, after the block it is decompressing.
* Since we don't want anything to overwrite our input, we have to tell
* ZSTD_decompressBlock_internal to never write past ip.
*
* See ZSTD_allocateLiteralsBuffer() for reference.
*/
oBlockEnd = op + (ip - op);
}
* Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed,
* we allow taking a partial block as the input. Currently only raw uncompressed blocks can
* Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we
* allow taking a partial block as the input. Currently only raw uncompressed blocks can
if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) {
if (dctx->ddictSet == NULL) {
dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem);
if (!dctx->ddictSet) {
RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!");
}
}
assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */
FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), "");
}
return 0;
case ZSTD_d_refMultipleDDicts:
CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value);
if (dctx->staticSize != 0) {
RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!");
}
dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value;
return 0;
case ZSTD_d_disableHuffmanAssembly:
CHECK_DBOUNDS(ZSTD_d_disableHuffmanAssembly, value);
dctx->disableHufAsm = value != 0;
unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
/* space is needed to store the litbuffer after the output of a given block without stomping the extDict of a previous run, as well as to cover both windows against wildcopy*/
unsigned long long const neededRBSize = windowSize + blockSize + ZSTD_BLOCKSIZE_MAX + (WILDCOPY_OVERLENGTH * 2);
ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
ZSTD_inBuffer input = { src, srcSize, *srcPos };
/* ZSTD_compress_generic() will check validity of dstPos and srcPos */
size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
*dstPos = output.pos;
*srcPos = input.pos;
return cErr;
ZSTD_outBuffer output;
ZSTD_inBuffer input;
output.dst = dst;
output.size = dstCapacity;
output.pos = *dstPos;
input.src = src;
input.size = srcSize;
input.pos = *srcPos;
{ size_t const cErr = ZSTD_decompressStream(dctx, &output, &input);
*dstPos = output.pos;
*srcPos = input.pos;
return cErr;
}
/* When DYNAMIC_BMI2 is enabled, fast decoders are only called when bmi2 is
* supported at runtime, so we can add the BMI2 target attribute.
* When it is disabled, we will still get BMI2 if it is enabled statically.
*/
#if DYNAMIC_BMI2
# define HUF_FAST_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE
#else
# define HUF_FAST_BMI2_ATTRS
#endif
static size_t HUF_initFastDStream(BYTE const* ip) {
BYTE const lastByte = ip[7];
size_t const bitsConsumed = lastByte ? 8 - ZSTD_highbit32(lastByte) : 0;
size_t const value = MEM_readLEST(ip) | 1;
assert(bitsConsumed <= 8);
assert(sizeof(size_t) == 8);
return value << bitsConsumed;
}
/**
* The input/output arguments to the Huffman fast decoding loop:
*
* ip [in/out] - The input pointers, must be updated to reflect what is consumed.
* op [in/out] - The output pointers, must be updated to reflect what is written.
* bits [in/out] - The bitstream containers, must be updated to reflect the current state.
* dt [in] - The decoding table.
* ilimit [in] - The input limit, stop when any input pointer is below ilimit.
* oend [in] - The end of the output stream. op[3] must not cross oend.
* iend [in] - The end of each input stream. ip[i] may cross iend[i],
* as long as it is above ilimit, but that indicates corruption.
*/
typedef struct {
BYTE const* ip[4];
BYTE* op[4];
U64 bits[4];
void const* dt;
BYTE const* ilimit;
BYTE* oend;
BYTE const* iend[4];
} HUF_DecompressFastArgs;
typedef void (*HUF_DecompressFastLoopFn)(HUF_DecompressFastArgs*);
/**
* Initializes args for the fast decoding loop.
* @returns 1 on success
* 0 if the fallback implementation should be used.
* Or an error code on failure.
*/
static size_t HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable)
{
void const* dt = DTable + 1;
U32 const dtLog = HUF_getDTableDesc(DTable).tableLog;
const BYTE* const ilimit = (const BYTE*)src + 6 + 8;
BYTE* const oend = (BYTE*)dst + dstSize;
/* The fast decoding loop assumes 64-bit little-endian.
* This condition is false on x32.
*/
if (!MEM_isLittleEndian() || MEM_32bits())
return 0;
/* strict minimum : jump table + 1 byte per stream */
if (srcSize < 10)
return ERROR(corruption_detected);
/* Must have at least 8 bytes per stream because we don't handle initializing smaller bit containers.
* If table log is not correct at this point, fallback to the old decoder.
* On small inputs we don't have enough data to trigger the fast loop, so use the old decoder.
*/
if (dtLog != HUF_DECODER_FAST_TABLELOG)
return 0;
/* Read the jump table. */
{
const BYTE* const istart = (const BYTE*)src;
size_t const length1 = MEM_readLE16(istart);
size_t const length2 = MEM_readLE16(istart+2);
size_t const length3 = MEM_readLE16(istart+4);
size_t const length4 = srcSize - (length1 + length2 + length3 + 6);
args->iend[0] = istart + 6; /* jumpTable */
args->iend[1] = args->iend[0] + length1;
args->iend[2] = args->iend[1] + length2;
args->iend[3] = args->iend[2] + length3;
/* HUF_initFastDStream() requires this, and this small of an input
* won't benefit from the ASM loop anyways.
* length1 must be >= 16 so that ip[0] >= ilimit before the loop
* starts.
*/
if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8)
return 0;
if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */
}
/* ip[] contains the position that is currently loaded into bits[]. */
args->ip[0] = args->iend[1] - sizeof(U64);
args->ip[1] = args->iend[2] - sizeof(U64);
args->ip[2] = args->iend[3] - sizeof(U64);
args->ip[3] = (BYTE const*)src + srcSize - sizeof(U64);
/* op[] contains the output pointers. */
args->op[0] = (BYTE*)dst;
args->op[1] = args->op[0] + (dstSize+3)/4;
args->op[2] = args->op[1] + (dstSize+3)/4;
args->op[3] = args->op[2] + (dstSize+3)/4;
/* No point to call the ASM loop for tiny outputs. */
if (args->op[3] >= oend)
return 0;
/* bits[] is the bit container.
* It is read from the MSB down to the LSB.
* It is shifted left as it is read, and zeros are
* shifted in. After the lowest valid bit a 1 is
* set, so that CountTrailingZeros(bits[]) can be used
* to count how many bits we've consumed.
*/
args->bits[0] = HUF_initFastDStream(args->ip[0]);
args->bits[1] = HUF_initFastDStream(args->ip[1]);
args->bits[2] = HUF_initFastDStream(args->ip[2]);
args->bits[3] = HUF_initFastDStream(args->ip[3]);
/* If ip[] >= ilimit, it is guaranteed to be safe to
* reload bits[]. It may be beyond its section, but is
* guaranteed to be valid (>= istart).
*/
args->ilimit = ilimit;
args->oend = oend;
args->dt = dt;
return 1;
}
static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressFastArgs const* args, int stream, BYTE* segmentEnd)
{
/* Validate that we haven't overwritten. */
if (args->op[stream] > segmentEnd)
return ERROR(corruption_detected);
/* Validate that we haven't read beyond iend[].
* Note that ip[] may be < iend[] because the MSB is
* the next bit to read, and we may have consumed 100%
* of the stream, so down to iend[i] - 8 is valid.
*/
if (args->ip[stream] < args->iend[stream] - 8)
return ERROR(corruption_detected);
/* Construct the BIT_DStream_t. */
assert(sizeof(size_t) == 8);
bit->bitContainer = MEM_readLEST(args->ip[stream]);
bit->bitsConsumed = ZSTD_countTrailingZeros64(args->bits[stream]);
bit->start = (const char*)args->iend[0];
bit->limitPtr = bit->start + sizeof(size_t);
bit->ptr = (const char*)args->ip[stream];
return 0;
}
}
/**
* Increase the tableLog to targetTableLog and rescales the stats.
* If tableLog > targetTableLog this is a no-op.
* @returns New tableLog
*/
static U32 HUF_rescaleStats(BYTE* huffWeight, U32* rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog)
{
if (tableLog > targetTableLog)
return tableLog;
if (tableLog < targetTableLog) {
U32 const scale = targetTableLog - tableLog;
U32 s;
/* Increase the weight for all non-zero probability symbols by scale. */
for (s = 0; s < nbSymbols; ++s) {
huffWeight[s] += (BYTE)((huffWeight[s] == 0) ? 0 : scale);
}
/* Update rankVal to reflect the new weights.
* All weights except 0 get moved to weight + scale.
* Weights [1, scale] are empty.
*/
for (s = targetTableLog; s > scale; --s) {
rankVal[s] = rankVal[s - scale];
}
for (s = scale; s > 0; --s) {
rankVal[s] = 0;
}
}
return targetTableLog;
size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize)
size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags)
iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2);
iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), flags);
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
if ((pEnd - p) > 3) {
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) {
HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
HUF_DECODE_SYMBOLX1_1(p, bitDPtr);
HUF_DECODE_SYMBOLX1_2(p, bitDPtr);
HUF_DECODE_SYMBOLX1_0(p, bitDPtr);
}
} else {
BIT_reloadDStream(bitDPtr);
for ( ; (endSignal) & (op4 < olimit) ; ) {
HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
if ((size_t)(oend - op4) >= sizeof(size_t)) {
for ( ; (endSignal) & (op4 < olimit) ; ) {
HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
HUF_DECODE_SYMBOLX1_1(op1, &bitD1);
HUF_DECODE_SYMBOLX1_1(op2, &bitD2);
HUF_DECODE_SYMBOLX1_1(op3, &bitD3);
HUF_DECODE_SYMBOLX1_1(op4, &bitD4);
HUF_DECODE_SYMBOLX1_2(op1, &bitD1);
HUF_DECODE_SYMBOLX1_2(op2, &bitD2);
HUF_DECODE_SYMBOLX1_2(op3, &bitD3);
HUF_DECODE_SYMBOLX1_2(op4, &bitD4);
HUF_DECODE_SYMBOLX1_0(op1, &bitD1);
HUF_DECODE_SYMBOLX1_0(op2, &bitD2);
HUF_DECODE_SYMBOLX1_0(op3, &bitD3);
HUF_DECODE_SYMBOLX1_0(op4, &bitD4);
endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
}
#if HUF_NEED_BMI2_FUNCTION
static BMI2_TARGET_ATTRIBUTE
size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable) {
return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
}
#endif
static
size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable) {
return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
}
#if ZSTD_ENABLE_ASM_X86_64_BMI2
HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN;
#endif
static HUF_FAST_BMI2_ATTRS
void HUF_decompress4X1_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args)
{
U64 bits[4];
BYTE const* ip[4];
BYTE* op[4];
U16 const* const dtable = (U16 const*)args->dt;
BYTE* const oend = args->oend;
BYTE const* const ilimit = args->ilimit;
/* Copy the arguments to local variables */
ZSTD_memcpy(&bits, &args->bits, sizeof(bits));
ZSTD_memcpy(&ip, &args->ip, sizeof(ip));
ZSTD_memcpy(&op, &args->op, sizeof(op));
assert(MEM_isLittleEndian());
assert(!MEM_32bits());
for (;;) {
BYTE* olimit;
int stream;
int symbol;
/* Assert loop preconditions */
#ifndef NDEBUG
for (stream = 0; stream < 4; ++stream) {
assert(op[stream] <= (stream == 3 ? oend : op[stream + 1]));
assert(ip[stream] >= ilimit);
}
#endif
/* Compute olimit */
{
/* Each iteration produces 5 output symbols per stream */
size_t const oiters = (size_t)(oend - op[3]) / 5;
/* Each iteration consumes up to 11 bits * 5 = 55 bits < 7 bytes
* per stream.
*/
size_t const iiters = (size_t)(ip[0] - ilimit) / 7;
/* We can safely run iters iterations before running bounds checks */
size_t const iters = MIN(oiters, iiters);
size_t const symbols = iters * 5;
/* We can simply check that op[3] < olimit, instead of checking all
* of our bounds, since we can't hit the other bounds until we've run
* iters iterations, which only happens when op[3] == olimit.
*/
olimit = op[3] + symbols;
typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize,
const void *cSrc,
size_t cSrcSize,
const HUF_DTable *DTable);
/* Exit the decoding loop if any input pointer has crossed the
* previous one. This indicates corruption, and a precondition
* to our loop is that ip[i] >= ip[0].
*/
for (stream = 1; stream < 4; ++stream) {
if (ip[stream] < ip[stream - 1])
goto _out;
}
}
#ifndef NDEBUG
for (stream = 1; stream < 4; ++stream) {
assert(ip[stream] >= ip[stream - 1]);
}
#endif
HUF_DGEN(HUF_decompress1X1_usingDTable_internal)
HUF_DGEN(HUF_decompress4X1_usingDTable_internal)
do {
/* Decode 5 symbols in each of the 4 streams */
for (symbol = 0; symbol < 5; ++symbol) {
for (stream = 0; stream < 4; ++stream) {
int const index = (int)(bits[stream] >> 53);
int const entry = (int)dtable[index];
bits[stream] <<= (entry & 63);
op[stream][symbol] = (BYTE)((entry >> 8) & 0xFF);
}
}
/* Reload the bitstreams */
for (stream = 0; stream < 4; ++stream) {
int const ctz = ZSTD_countTrailingZeros64(bits[stream]);
int const nbBits = ctz & 7;
int const nbBytes = ctz >> 3;
op[stream] += 5;
ip[stream] -= nbBytes;
bits[stream] = MEM_read64(ip[stream]) | 1;
bits[stream] <<= nbBits;
}
} while (op[3] < olimit);
}
size_t HUF_decompress1X1_usingDTable(
/**
* @returns @p dstSize on success (>= 6)
* 0 if the fallback implementation should be used
* An error if an error occurred
*/
static HUF_FAST_BMI2_ATTRS
size_t
HUF_decompress4X1_usingDTable_internal_fast(
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 0) return ERROR(GENERIC);
return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
}
void const* dt = DTable + 1;
const BYTE* const iend = (const BYTE*)cSrc + 6;
BYTE* const oend = (BYTE*)dst + dstSize;
HUF_DecompressFastArgs args;
{ size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
FORWARD_IF_ERROR(ret, "Failed to init fast loop args");
if (ret == 0)
return 0;
}
size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
/* Our loop guarantees that ip[] >= ilimit and that we haven't
* overwritten any op[].
*/
assert(args.ip[0] >= iend);
assert(args.ip[1] >= iend);
assert(args.ip[2] >= iend);
assert(args.ip[3] >= iend);
assert(args.op[3] <= oend);
(void)iend;
return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0);
/* finish bit streams one by one. */
{ size_t const segmentSize = (dstSize+3) / 4;
BYTE* segmentEnd = (BYTE*)dst;
int i;
for (i = 0; i < 4; ++i) {
BIT_DStream_t bit;
if (segmentSize <= (size_t)(oend - segmentEnd))
segmentEnd += segmentSize;
else
segmentEnd = oend;
FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
/* Decompress and validate that we've produced exactly the expected length. */
args.op[i] += HUF_decodeStreamX1(args.op[i], &bit, segmentEnd, (HUF_DEltX1 const*)dt, HUF_DECODER_FAST_TABLELOG);
if (args.op[i] != segmentEnd) return ERROR(corruption_detected);
}
}
/* decoded size */
assert(dstSize != 0);
return dstSize;
size_t HUF_decompress4X1_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable, int flags)
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 0) return ERROR(GENERIC);
return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X1_usingDTable_internal_default;
HUF_DecompressFastLoopFn loopFn = HUF_decompress4X1_usingDTable_internal_fast_c_loop;
#if DYNAMIC_BMI2
if (flags & HUF_flags_bmi2) {
fallbackFn = HUF_decompress4X1_usingDTable_internal_bmi2;
# if ZSTD_ENABLE_ASM_X86_64_BMI2
if (!(flags & HUF_flags_disableAsm)) {
loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop;
}
# endif
} else {
return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
}
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
if (!(flags & HUF_flags_disableAsm)) {
loopFn = HUF_decompress4X1_usingDTable_internal_fast_asm_loop;
}
#endif
if (!(flags & HUF_flags_disableFast)) {
size_t const ret = HUF_decompress4X1_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn);
if (ret != 0)
return ret;
}
return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
/**
* Constructs a HUF_DEltX2 in a U32.
*/
static U32 HUF_buildDEltX2U32(U32 symbol, U32 nbBits, U32 baseSeq, int level)
{
U32 seq;
DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, sequence) == 0);
DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, nbBits) == 2);
DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, length) == 3);
DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U32));
if (MEM_isLittleEndian()) {
seq = level == 1 ? symbol : (baseSeq + (symbol << 8));
return seq + (nbBits << 16) + ((U32)level << 24);
} else {
seq = level == 1 ? (symbol << 8) : ((baseSeq << 8) + symbol);
return (seq << 16) + (nbBits << 8) + (U32)level;
}
}
/* HUF_fillDTableX2Level2() :
* `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed,
const U32* rankValOrigin, const int minWeight,
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
U32 nbBitsBaseline, U16 baseSeq)
/**
* Constructs a HUF_DEltX2.
*/
static HUF_DEltX2 HUF_buildDEltX2(U32 symbol, U32 nbBits, U32 baseSeq, int level)
/* get pre-calculated rankVal */
ZSTD_memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/**
* Constructs 2 HUF_DEltX2s and packs them into a U64.
*/
static U64 HUF_buildDEltX2U64(U32 symbol, U32 nbBits, U16 baseSeq, int level)
{
U32 DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level);
return (U64)DElt + ((U64)DElt << 32);
}
/* fill skipped values */
if (minWeight>1) {
U32 i, skipSize = rankVal[minWeight];
MEM_writeLE16(&(DElt.sequence), baseSeq);
DElt.nbBits = (BYTE)(consumed);
DElt.length = 1;
for (i = 0; i < skipSize; i++)
DTable[i] = DElt;
/**
* Fills the DTable rank with all the symbols from [begin, end) that are each
* nbBits long.
*
* @param DTableRank The start of the rank in the DTable.
* @param begin The first symbol to fill (inclusive).
* @param end The last symbol to fill (exclusive).
* @param nbBits Each symbol is nbBits long.
* @param tableLog The table log.
* @param baseSeq If level == 1 { 0 } else { the first level symbol }
* @param level The level in the table. Must be 1 or 2.
*/
static void HUF_fillDTableX2ForWeight(
HUF_DEltX2* DTableRank,
sortedSymbol_t const* begin, sortedSymbol_t const* end,
U32 nbBits, U32 tableLog,
U16 baseSeq, int const level)
{
U32 const length = 1U << ((tableLog - nbBits) & 0x1F /* quiet static-analyzer */);
const sortedSymbol_t* ptr;
assert(level >= 1 && level <= 2);
switch (length) {
case 1:
for (ptr = begin; ptr != end; ++ptr) {
HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
*DTableRank++ = DElt;
}
break;
case 2:
for (ptr = begin; ptr != end; ++ptr) {
HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level);
DTableRank[0] = DElt;
DTableRank[1] = DElt;
DTableRank += 2;
}
break;
case 4:
for (ptr = begin; ptr != end; ++ptr) {
U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
DTableRank += 4;
}
break;
case 8:
for (ptr = begin; ptr != end; ++ptr) {
U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
DTableRank += 8;
}
break;
default:
for (ptr = begin; ptr != end; ++ptr) {
U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level);
HUF_DEltX2* const DTableRankEnd = DTableRank + length;
for (; DTableRank != DTableRankEnd; DTableRank += 8) {
ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2));
}
}
break;
/* fill DTable */
{ U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
const U32 symbol = sortedSymbols[s].symbol;
const U32 weight = sortedSymbols[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 length = 1 << (sizeLog-nbBits);
const U32 start = rankVal[weight];
U32 i = start;
const U32 end = start + length;
}
MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
DElt.nbBits = (BYTE)(nbBits + consumed);
DElt.length = 2;
do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
/* HUF_fillDTableX2Level2() :
* `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 consumedBits,
const U32* rankVal, const int minWeight, const int maxWeight1,
const sortedSymbol_t* sortedSymbols, U32 const* rankStart,
U32 nbBitsBaseline, U16 baseSeq)
{
/* Fill skipped values (all positions up to rankVal[minWeight]).
* These are positions only get a single symbol because the combined weight
* is too large.
*/
if (minWeight>1) {
U32 const length = 1U << ((targetLog - consumedBits) & 0x1F /* quiet static-analyzer */);
U64 const DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, /* baseSeq */ 0, /* level */ 1);
int const skipSize = rankVal[minWeight];
assert(length > 1);
assert((U32)skipSize < length);
switch (length) {
case 2:
assert(skipSize == 1);
ZSTD_memcpy(DTable, &DEltX2, sizeof(DEltX2));
break;
case 4:
assert(skipSize <= 4);
ZSTD_memcpy(DTable + 0, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTable + 2, &DEltX2, sizeof(DEltX2));
break;
default:
{
int i;
for (i = 0; i < skipSize; i += 8) {
ZSTD_memcpy(DTable + i + 0, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTable + i + 2, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTable + i + 4, &DEltX2, sizeof(DEltX2));
ZSTD_memcpy(DTable + i + 6, &DEltX2, sizeof(DEltX2));
}
}
}
}
rankVal[weight] += length;
} }
/* Fill each of the second level symbols by weight. */
{
int w;
for (w = minWeight; w < maxWeight1; ++w) {
int const begin = rankStart[w];
int const end = rankStart[w+1];
U32 const nbBits = nbBitsBaseline - w;
U32 const totalBits = nbBits + consumedBits;
HUF_fillDTableX2ForWeight(
DTable + rankVal[w],
sortedSymbols + begin, sortedSymbols + end,
totalBits, targetLog,
baseSeq, /* level */ 2);
}
}
/* fill DTable */
for (s=0; s<sortedListSize; s++) {
const U16 symbol = sortedList[s].symbol;
const U32 weight = sortedList[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 start = rankVal[weight];
const U32 length = 1 << (targetLog-nbBits);
/* Fill DTable in order of weight. */
for (w = 1; w < wEnd; ++w) {
int const begin = (int)rankStart[w];
int const end = (int)rankStart[w+1];
U32 const nbBits = nbBitsBaseline - w;
if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
U32 sortedRank;
if (targetLog-nbBits >= minBits) {
/* Enough room for a second symbol. */
int start = rankVal[w];
U32 const length = 1U << ((targetLog - nbBits) & 0x1F /* quiet static-analyzer */);
sortedRank = rankStart[minWeight];
HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits,
rankValOrigin[nbBits], minWeight,
sortedList+sortedRank, sortedListSize-sortedRank,
nbBitsBaseline, symbol);
/* Fill the DTable for every symbol of weight w.
* These symbols get at least 1 second symbol.
*/
for (s = begin; s != end; ++s) {
HUF_fillDTableX2Level2(
DTable + start, targetLog, nbBits,
rankValOrigin[nbBits], minWeight, wEnd,
sortedList, rankStart,
nbBitsBaseline, sortedList[s].symbol);
start += length;
}
HUF_DEltX2 DElt;
MEM_writeLE16(&(DElt.sequence), symbol);
DElt.nbBits = (BYTE)(nbBits);
DElt.length = 1;
{ U32 const end = start + length;
U32 u;
for (u = start; u < end; u++) DTable[u] = DElt;
} }
rankVal[weight] += length;
/* Only a single symbol. */
HUF_fillDTableX2ForWeight(
DTable + rankVal[w],
sortedList + begin, sortedList + end,
nbBits, targetLog,
/* baseSeq */ 0, /* level */ 1);
}
typedef struct {
rankValCol_t rankVal[HUF_TABLELOG_MAX];
U32 rankStats[HUF_TABLELOG_MAX + 1];
U32 rankStart0[HUF_TABLELOG_MAX + 3];
sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
} HUF_ReadDTableX2_Workspace;
rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32);
spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2;
rankStats = (U32 *)workSpace + spaceUsed32;
spaceUsed32 += HUF_TABLELOG_MAX + 1;
rankStart0 = (U32 *)workSpace + spaceUsed32;
spaceUsed32 += HUF_TABLELOG_MAX + 2;
sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t);
spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2;
weightList = (BYTE *)((U32 *)workSpace + spaceUsed32);
spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2;
if (sizeof(*wksp) > wkspSize) return ERROR(GENERIC);
if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge);
rankStart = rankStart0 + 1;
ZSTD_memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1));
rankStart = wksp->rankStart0 + 1;
ZSTD_memset(wksp->rankStats, 0, sizeof(wksp->rankStats));
ZSTD_memset(wksp->rankStart0, 0, sizeof(wksp->rankStart0));
iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), flags);
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
if ((size_t)(pEnd - p) >= sizeof(bitDPtr->bitContainer)) {
if (dtLog <= 11 && MEM_64bits()) {
/* up to 10 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-9)) {
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
}
} else {
/* up to 8 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
}
}
} else {
BIT_reloadDStream(bitDPtr);
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
if ((size_t)(pEnd - p) >= 2) {
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished;
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished;
endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished;
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal = (U32)LIKELY(
(BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal = (U32)LIKELY((U32)
(BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished)
& (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished));
HUF_DGEN(HUF_decompress1X2_usingDTable_internal)
HUF_DGEN(HUF_decompress4X2_usingDTable_internal)
#if HUF_NEED_BMI2_FUNCTION
static BMI2_TARGET_ATTRIBUTE
size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable) {
return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
}
#endif
static
size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable) {
return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable);
}
size_t HUF_decompress1X2_usingDTable(
#if ZSTD_ENABLE_ASM_X86_64_BMI2
HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_fast_asm_loop(HUF_DecompressFastArgs* args) ZSTDLIB_HIDDEN;
#endif
static HUF_FAST_BMI2_ATTRS
void HUF_decompress4X2_usingDTable_internal_fast_c_loop(HUF_DecompressFastArgs* args)
{
U64 bits[4];
BYTE const* ip[4];
BYTE* op[4];
BYTE* oend[4];
HUF_DEltX2 const* const dtable = (HUF_DEltX2 const*)args->dt;
BYTE const* const ilimit = args->ilimit;
/* Copy the arguments to local registers. */
ZSTD_memcpy(&bits, &args->bits, sizeof(bits));
ZSTD_memcpy(&ip, &args->ip, sizeof(ip));
ZSTD_memcpy(&op, &args->op, sizeof(op));
oend[0] = op[1];
oend[1] = op[2];
oend[2] = op[3];
oend[3] = args->oend;
assert(MEM_isLittleEndian());
assert(!MEM_32bits());
for (;;) {
BYTE* olimit;
int stream;
int symbol;
/* Assert loop preconditions */
#ifndef NDEBUG
for (stream = 0; stream < 4; ++stream) {
assert(op[stream] <= oend[stream]);
assert(ip[stream] >= ilimit);
}
#endif
/* Compute olimit */
{
/* Each loop does 5 table lookups for each of the 4 streams.
* Each table lookup consumes up to 11 bits of input, and produces
* up to 2 bytes of output.
*/
/* We can consume up to 7 bytes of input per iteration per stream.
* We also know that each input pointer is >= ip[0]. So we can run
* iters loops before running out of input.
*/
size_t iters = (size_t)(ip[0] - ilimit) / 7;
/* Each iteration can produce up to 10 bytes of output per stream.
* Each output stream my advance at different rates. So take the
* minimum number of safe iterations among all the output streams.
*/
for (stream = 0; stream < 4; ++stream) {
size_t const oiters = (size_t)(oend[stream] - op[stream]) / 10;
iters = MIN(iters, oiters);
}
/* Each iteration produces at least 5 output symbols. So until
* op[3] crosses olimit, we know we haven't executed iters
* iterations yet. This saves us maintaining an iters counter,
* at the expense of computing the remaining # of iterations
* more frequently.
*/
olimit = op[3] + (iters * 5);
/* Exit the fast decoding loop if we are too close to the end. */
if (op[3] + 10 > olimit)
break;
/* Exit the decoding loop if any input pointer has crossed the
* previous one. This indicates corruption, and a precondition
* to our loop is that ip[i] >= ip[0].
*/
for (stream = 1; stream < 4; ++stream) {
if (ip[stream] < ip[stream - 1])
goto _out;
}
}
#ifndef NDEBUG
for (stream = 1; stream < 4; ++stream) {
assert(ip[stream] >= ip[stream - 1]);
}
#endif
do {
/* Do 5 table lookups for each of the first 3 streams */
for (symbol = 0; symbol < 5; ++symbol) {
for (stream = 0; stream < 3; ++stream) {
int const index = (int)(bits[stream] >> 53);
HUF_DEltX2 const entry = dtable[index];
MEM_write16(op[stream], entry.sequence);
bits[stream] <<= (entry.nbBits);
op[stream] += (entry.length);
}
}
/* Do 1 table lookup from the final stream */
{
int const index = (int)(bits[3] >> 53);
HUF_DEltX2 const entry = dtable[index];
MEM_write16(op[3], entry.sequence);
bits[3] <<= (entry.nbBits);
op[3] += (entry.length);
}
/* Do 4 table lookups from the final stream & reload bitstreams */
for (stream = 0; stream < 4; ++stream) {
/* Do a table lookup from the final stream.
* This is interleaved with the reloading to reduce register
* pressure. This shouldn't be necessary, but compilers can
* struggle with codegen with high register pressure.
*/
{
int const index = (int)(bits[3] >> 53);
HUF_DEltX2 const entry = dtable[index];
MEM_write16(op[3], entry.sequence);
bits[3] <<= (entry.nbBits);
op[3] += (entry.length);
}
/* Reload the bistreams. The final bitstream must be reloaded
* after the 5th symbol was decoded.
*/
{
int const ctz = ZSTD_countTrailingZeros64(bits[stream]);
int const nbBits = ctz & 7;
int const nbBytes = ctz >> 3;
ip[stream] -= nbBytes;
bits[stream] = MEM_read64(ip[stream]) | 1;
bits[stream] <<= nbBits;
}
}
} while (op[3] < olimit);
}
_out:
/* Save the final values of each of the state variables back to args. */
ZSTD_memcpy(&args->bits, &bits, sizeof(bits));
ZSTD_memcpy(&args->ip, &ip, sizeof(ip));
ZSTD_memcpy(&args->op, &op, sizeof(op));
}
static HUF_FAST_BMI2_ATTRS size_t
HUF_decompress4X2_usingDTable_internal_fast(
const HUF_DTable* DTable)
const HUF_DTable* DTable,
HUF_DecompressFastLoopFn loopFn) {
void const* dt = DTable + 1;
const BYTE* const iend = (const BYTE*)cSrc + 6;
BYTE* const oend = (BYTE*)dst + dstSize;
HUF_DecompressFastArgs args;
{
size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
FORWARD_IF_ERROR(ret, "Failed to init asm args");
if (ret == 0)
return 0;
}
assert(args.ip[0] >= args.ilimit);
loopFn(&args);
/* note : op4 already verified within main loop */
assert(args.ip[0] >= iend);
assert(args.ip[1] >= iend);
assert(args.ip[2] >= iend);
assert(args.ip[3] >= iend);
assert(args.op[3] <= oend);
(void)iend;
/* finish bitStreams one by one */
{
size_t const segmentSize = (dstSize+3) / 4;
BYTE* segmentEnd = (BYTE*)dst;
int i;
for (i = 0; i < 4; ++i) {
BIT_DStream_t bit;
if (segmentSize <= (size_t)(oend - segmentEnd))
segmentEnd += segmentSize;
else
segmentEnd = oend;
FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption");
args.op[i] += HUF_decodeStreamX2(args.op[i], &bit, segmentEnd, (HUF_DEltX2 const*)dt, HUF_DECODER_FAST_TABLELOG);
if (args.op[i] != segmentEnd)
return ERROR(corruption_detected);
}
}
/* decoded size */
return dstSize;
}
static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc,
size_t cSrcSize, HUF_DTable const* DTable, int flags)
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 1) return ERROR(GENERIC);
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
HUF_DecompressUsingDTableFn fallbackFn = HUF_decompress4X2_usingDTable_internal_default;
HUF_DecompressFastLoopFn loopFn = HUF_decompress4X2_usingDTable_internal_fast_c_loop;
#if DYNAMIC_BMI2
if (flags & HUF_flags_bmi2) {
fallbackFn = HUF_decompress4X2_usingDTable_internal_bmi2;
# if ZSTD_ENABLE_ASM_X86_64_BMI2
if (!(flags & HUF_flags_disableAsm)) {
loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop;
}
# endif
} else {
return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
}
#endif
#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)
if (!(flags & HUF_flags_disableAsm)) {
loopFn = HUF_decompress4X2_usingDTable_internal_fast_asm_loop;
}
#endif
if (!(flags & HUF_flags_disableFast)) {
size_t const ret = HUF_decompress4X2_usingDTable_internal_fast(dst, dstSize, cSrc, cSrcSize, DTable, loopFn);
if (ret != 0)
return ret;
}
return fallbackFn(dst, dstSize, cSrc, cSrcSize, DTable);
size_t HUF_decompress4X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 1) return ERROR(GENERIC);
return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
}
static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize,
static size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
void* workSpace, size_t wkspSize)
{
return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0);
return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, flags);
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dtd;
assert(dtd.tableType == 1);
return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#else
return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#endif
}
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)dtd;
assert(dtd.tableType == 0);
return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)dtd;
assert(dtd.tableType == 1);
return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#else
return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) :
HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0);
#endif
}
{{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
{{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
{{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
{{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
{{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
{{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
{{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
{{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
{{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
{{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
{{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
{{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
{{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
{{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
{{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
{{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
{{0,0}, {1,1}}, /* Q==0 : impossible */
{{0,0}, {1,1}}, /* Q==1 : impossible */
{{ 150,216}, { 381,119}}, /* Q == 2 : 12-18% */
{{ 170,205}, { 514,112}}, /* Q == 3 : 18-25% */
{{ 177,199}, { 539,110}}, /* Q == 4 : 25-32% */
{{ 197,194}, { 644,107}}, /* Q == 5 : 32-38% */
{{ 221,192}, { 735,107}}, /* Q == 6 : 38-44% */
{{ 256,189}, { 881,106}}, /* Q == 7 : 44-50% */
{{ 359,188}, {1167,109}}, /* Q == 8 : 50-56% */
{{ 582,187}, {1570,114}}, /* Q == 9 : 56-62% */
{{ 688,187}, {1712,122}}, /* Q ==10 : 62-69% */
{{ 825,186}, {1965,136}}, /* Q ==11 : 69-75% */
{{ 976,185}, {2131,150}}, /* Q ==12 : 75-81% */
{{1180,186}, {2070,175}}, /* Q ==13 : 81-87% */
{{1377,185}, {1731,202}}, /* Q ==14 : 87-93% */
{{1412,185}, {1695,202}}, /* Q ==15 : 93-99% */
#endif
}
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst,
size_t dstSize, const void* cSrc,
size_t cSrcSize, void* workSpace,
size_t wkspSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize == 0) return ERROR(corruption_detected);
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
#else
return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc,
cSrcSize, workSpace, wkspSize):
HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize);
size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags)
return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) :
HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags)
size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2)
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags)
return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) :
HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2);
return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags) :
HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, flags);
size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2)
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags)
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
#else
return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) :
HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2);
#endif
}
}
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_readDTableX1_wksp(DTable, src, srcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize);
}
#endif
#ifndef HUF_FORCE_DECOMPRESS_X1
size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_readDTableX2_wksp(DTable, src, srcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
}
#endif
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX);
return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
}
#endif
#ifndef HUF_FORCE_DECOMPRESS_X1
size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX);
return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize);
}
#endif
typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2)
static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 };
#endif
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
#if defined(HUF_FORCE_DECOMPRESS_X1)
(void)algoNb;
assert(algoNb == 0);
return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize);
#elif defined(HUF_FORCE_DECOMPRESS_X2)
(void)algoNb;
assert(algoNb == 1);
return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize);
#else
return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
#endif
}
}
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { ZSTD_memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { ZSTD_memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags) :
HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, flags);
size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}
size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize)
{
U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize,
workSpace, sizeof(workSpace));
}
#endif
/* We need 2 output buffers per worker since each dstBuff must be flushed after it is released.
* The 3 additional buffers are as follows:
* 1 buffer for input loading
* 1 buffer for "next input" when submitting current one
* 1 buffer stuck in queue */
#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) (2*(nbWorkers) + 3)
/* Don't create chunks smaller than the zstd block size.
* This stops us from regressing compression ratio too much,
* and ensures our output fits in ZSTD_compressBound().
*
* If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then
* ZSTD_COMPRESSBOUND() will need to be updated.
*/
#define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX
#define RSYNC_MIN_BLOCK_SIZE (1<<RSYNC_MIN_BLOCK_LOG)
U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20);
U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20;
assert(jobSizeMB >= 1);
U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10);
U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10);
/* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our
* expected job size is at least 4x larger. */
assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2);
return bufferStart < rangeEnd && rangeStart < bufferEnd;
{
BYTE const* const bufferEnd = bufferStart + buffer.capacity;
BYTE const* const rangeEnd = rangeStart + range.size;
/* Empty ranges cannot overlap */
if (bufferStart == bufferEnd || rangeStart == rangeEnd)
return 0;
return bufferStart < rangeEnd && rangeStart < bufferEnd;
}
if (mtctx->inBuff.filled >= RSYNC_LENGTH) {
/* We have enough bytes buffered to initialize the hash.
if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) {
/* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions
* because they can't possibly be a sync point. So we can start
* part way through the input buffer.
*/
pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled;
if (pos >= RSYNC_LENGTH) {
prev = istart + pos - RSYNC_LENGTH;
hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
} else {
assert(mtctx->inBuff.filled >= RSYNC_LENGTH);
prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos));
hash = ZSTD_rollingHash_append(hash, istart, pos);
}
} else {
/* We have enough bytes buffered to initialize the hash,
* and have processed enough bytes to find a sync point.
} else {
/* We don't have enough bytes buffered to initialize the hash, but
* we know we have at least RSYNC_LENGTH bytes total.
* Start scanning after the first RSYNC_LENGTH bytes less the bytes
* already buffered.
*/
pos = RSYNC_LENGTH - mtctx->inBuff.filled;
prev = (BYTE const*)mtctx->inBuff.buffer.start - pos;
hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled);
hash = ZSTD_rollingHash_append(hash, istart, pos);
/* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */
/* This assert is very expensive, and Debian compiles with asserts enabled.
* So disable it for now. We can get similar coverage by checking it at the
* beginning & end of the loop.
* assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
*/
#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
#define ZSTD_PREDEF_THRESHOLD 8 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */
/* ZSTD_downscaleStat() :
* reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus)
* return the resulting sum of elements */
static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus)
static U32 sum_u32(const unsigned table[], size_t nbElts)
{
size_t n;
U32 total = 0;
for (n=0; n<nbElts; n++) {
total += table[n];
}
return total;
}
typedef enum { base_0possible=0, base_1guaranteed=1 } base_directive_e;
static U32
ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift, base_directive_e base1)
DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1);
assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31);
DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)",
(unsigned)lastEltIndex+1, (unsigned)shift );
assert(shift < 30);
}
/* ZSTD_scaleStats() :
* reduce all elt frequencies in table if sum too large
* return the resulting sum of elements */
static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget)
{
U32 const prevsum = sum_u32(table, lastEltIndex+1);
U32 const factor = prevsum >> logTarget;
DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget);
assert(logTarget < 30);
if (factor <= 1) return prevsum;
return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor), base_1guaranteed);
if (optPtr->litLengthSum == 0) { /* first block : init */
if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */
DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef");
/* heuristic: use pre-defined stats for too small inputs */
if (srcSize <= ZSTD_PREDEF_THRESHOLD) {
DEBUGLOG(5, "srcSize <= %i : use predefined stats", ZSTD_PREDEF_THRESHOLD);
{ unsigned ll;
for (ll=0; ll<=MaxLL; ll++)
optPtr->litLengthFreq[ll] = 1;
{ unsigned const baseLLfreqs[MaxLL+1] = {
4, 2, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1
};
ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs));
optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1);
{ unsigned of;
for (of=0; of<=MaxOff; of++)
optPtr->offCodeFreq[of] = 1;
{ unsigned const baseOFCfreqs[MaxOff+1] = {
6, 2, 1, 1, 2, 3, 4, 4,
4, 3, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1
};
ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs));
optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1);
optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12);
optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11);
optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11);
optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11);
assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */
price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel);
U32 litPrice = WEIGHT(optPtr->litFreq[literals[u]], optLevel);
if (UNLIKELY(litPrice > litPriceMax)) litPrice = litPriceMax;
price -= litPrice;
if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel);
assert(litLength <= ZSTD_BLOCKSIZE_MAX);
if (optPtr->priceType == zop_predef)
return WEIGHT(litLength, optLevel);
/* ZSTD_LLcode() can't compute litLength price for sizes >= ZSTD_BLOCKSIZE_MAX
* because it isn't representable in the zstd format.
* So instead just pretend it would cost 1 bit more than ZSTD_BLOCKSIZE_MAX - 1.
* In such a case, the block would be all literals.
*/
if (litLength == ZSTD_BLOCKSIZE_MAX)
return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel);
* optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */
* @offBase : sumtype, representing an offset or a repcode, and using numeric representation of ZSTD_storeSeq()
* @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency)
*/
if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */
return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER);
if (optPtr->priceType == zop_predef) /* fixed scheme, does not use statistics */
return WEIGHT(mlBase, optLevel)
+ ((16 + offCode) * BITCOST_MULTIPLIER); /* emulated offset cost */
U32 const windowLow = ms->window.lowLimit;
/* windowLow is based on target because
* we only need positions that will be in the window at the end of the tree update.
*/
U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog);
FORCE_INLINE_TEMPLATE
U32 ZSTD_insertBtAndGetAllMatches (
ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
ZSTD_matchState_t* ms,
U32* nextToUpdate3,
const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode,
const U32 rep[ZSTD_REP_NUM],
U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
const U32 lengthToBeat,
U32 const mls /* template */)
FORCE_INLINE_TEMPLATE U32
ZSTD_insertBtAndGetAllMatches (
ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
ZSTD_matchState_t* ms,
U32* nextToUpdate3,
const BYTE* const ip, const BYTE* const iLimit,
const ZSTD_dictMode_e dictMode,
const U32 rep[ZSTD_REP_NUM],
const U32 ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */
const U32 lengthToBeat,
const U32 mls /* template */)
DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)",
(U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
DEBUGLOG(8, "found match of length %u at distance %u (offBase=%u)",
(U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex));
DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)",
(U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE);
DEBUGLOG(8, "found dms match of length %u at distance %u (offBase=%u)",
(U32)matchLength, curr - matchIndex, OFFSET_TO_OFFBASE(curr - matchIndex));
FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches (
ZSTD_match_t* matches, /* store result (match found, increasing size) in this table */
ZSTD_matchState_t* ms,
U32* nextToUpdate3,
const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode,
const U32 rep[ZSTD_REP_NUM],
U32 const ll0,
U32 const lengthToBeat)
FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal(
ZSTD_match_t* matches,
ZSTD_matchState_t* ms,
U32* nextToUpdate3,
const BYTE* ip,
const BYTE* const iHighLimit,
const U32 rep[ZSTD_REP_NUM],
U32 const ll0,
U32 const lengthToBeat,
const ZSTD_dictMode_e dictMode,
const U32 mls)
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32 const matchLengthSearch = cParams->minMatch;
DEBUGLOG(8, "ZSTD_BtGetAllMatches");
if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode);
switch(matchLengthSearch)
{
case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3);
default :
case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4);
case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5);
case 7 :
case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6);
assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls);
DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls);
if (ip < ms->window.base + ms->nextToUpdate)
return 0; /* skipped area */
ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode);
return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls);
}
#define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls
#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \
static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \
ZSTD_match_t* matches, \
ZSTD_matchState_t* ms, \
U32* nextToUpdate3, \
const BYTE* ip, \
const BYTE* const iHighLimit, \
const U32 rep[ZSTD_REP_NUM], \
U32 const ll0, \
U32 const lengthToBeat) \
{ \
return ZSTD_btGetAllMatches_internal( \
matches, ms, nextToUpdate3, ip, iHighLimit, \
rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \
}
#define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \
GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6)
GEN_ZSTD_BT_GET_ALL_MATCHES(noDict)
GEN_ZSTD_BT_GET_ALL_MATCHES(extDict)
GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState)
#define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \
{ \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \
ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \
static ZSTD_getAllMatchesFn
ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode)
{
ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = {
ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict),
ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict),
ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState)
};
U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6);
assert((U32)dictMode < 3);
assert(mls - 3 < 4);
return getAllMatchesFns[(int)dictMode][mls - 3];
rawSeqStore_t seqStore; /* External match candidates store for this block */
U32 startPosInBlock; /* Start position of the current match candidate */
U32 endPosInBlock; /* End position of the current match candidate */
U32 offset; /* Offset of the match candidate */
rawSeqStore_t seqStore; /* External match candidates store for this block */
U32 startPosInBlock; /* Start position of the current match candidate */
U32 endPosInBlock; /* End position of the current match candidate */
U32 offset; /* Offset of the match candidate */
/* Calculate appropriate bytes left in matchLength and litLength after adjusting
based on ldmSeqStore->posInSequence */
/* Calculate appropriate bytes left in matchLength and litLength
* after adjusting based on ldmSeqStore->posInSequence */
* Adds a match if it's long enough, based on it's 'matchStartPosInBlock'
* and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches'
* Adds a match if it's long enough,
* based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock',
* into 'matches'. Maintains the correct ordering of 'matches'.
ZSTD_optLdm_t* optLdm, U32 currPosInBlock) {
U32 posDiff = currPosInBlock - optLdm->startPosInBlock;
/* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */
U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE;
const ZSTD_optLdm_t* optLdm, U32 currPosInBlock)
{
U32 const posDiff = currPosInBlock - optLdm->startPosInBlock;
/* Note: ZSTD_match_t actually contains offBase and matchLength (before subtracting MINMATCH) */
U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff;
DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u",
candidateOffCode, candidateMatchLength, currPosInBlock);
U32 const candidateOffBase = OFFSET_TO_OFFBASE(optLdm->offset);
DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offBase: %u matchLength %u) at block position=%u",
candidateOffBase, candidateMatchLength, currPosInBlock);
static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches,
U32 currPosInBlock, U32 remainingBytes) {
static void
ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm,
ZSTD_match_t* matches, U32* nbMatches,
U32 currPosInBlock, U32 remainingBytes)
{
U32 const maxOffset = matches[nbMatches-1].off;
DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
U32 const maxOffBase = matches[nbMatches-1].off;
DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffBase=%u at cPos=%u => start new series",
nbMatches, maxML, maxOffBase, (U32)(ip-prefixStart));
+ ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+ ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
- ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
+ (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel)
+ (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel)
- (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel);
ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen);
ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH);
ZSTD_updateStats(optStatePtr, llen, anchor, offBase, mlen);
ZSTD_storeSeq(seqStore, llen, anchor, iend, offBase, mlen);
static size_t ZSTD_compressBlock_opt0(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
{
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode);
}
static size_t ZSTD_compressBlock_opt2(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode)
{
return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode);
}
/* used in 2-pass strategy */
static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
{
U32 s, sum=0;
assert(ZSTD_FREQ_DIV+bonus >= 0);
for (s=0; s<lastEltIndex+1; s++) {
table[s] <<= ZSTD_FREQ_DIV+bonus;
table[s]--;
sum += table[s];
}
return sum;
}
/* used in 2-pass strategy */
MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
{
if (ZSTD_compressedLiterals(optPtr))
optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
}
&& (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
&& (srcSize > ZSTD_PREDEF_THRESHOLD)
&& (curr == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */
&& (srcSize > ZSTD_PREDEF_THRESHOLD) /* input large enough to not employ default stats */
#define LDM_HASH_CHAR_OFFSET 10
typedef struct {
U64 rolling;
U64 stopMask;
} ldmRollingHashState_t;
/** ZSTD_ldm_gear_init():
*
* Initializes the rolling hash state such that it will honor the
* settings in params. */
static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
{
unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
unsigned hashRateLog = params->hashRateLog;
state->rolling = ~(U32)0;
/* The choice of the splitting criterion is subject to two conditions:
* 1. it has to trigger on average every 2^(hashRateLog) bytes;
* 2. ideally, it has to depend on a window of minMatchLength bytes.
*
* In the gear hash algorithm, bit n depends on the last n bytes;
* so in order to obtain a good quality splitting criterion it is
* preferable to use bits with high weight.
*
* To match condition 1 we use a mask with hashRateLog bits set
* and, because of the previous remark, we make sure these bits
* have the highest possible weight while still respecting
* condition 2.
*/
if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
} else {
/* In this degenerate case we simply honor the hash rate. */
state->stopMask = ((U64)1 << hashRateLog) - 1;
}
}
/** ZSTD_ldm_gear_reset()
* Feeds [data, data + minMatchLength) into the hash without registering any
* splits. This effectively resets the hash state. This is used when skipping
* over data, either at the beginning of a block, or skipping sections.
*/
static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
BYTE const* data, size_t minMatchLength)
{
U64 hash = state->rolling;
size_t n = 0;
#define GEAR_ITER_ONCE() do { \
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
n += 1; \
} while (0)
while (n + 3 < minMatchLength) {
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
}
while (n < minMatchLength) {
GEAR_ITER_ONCE();
}
#undef GEAR_ITER_ONCE
}
/** ZSTD_ldm_gear_feed():
*
* Registers in the splits array all the split points found in the first
* size bytes following the data pointer. This function terminates when
* either all the data has been processed or LDM_BATCH_SIZE splits are
* present in the splits array.
*
* Precondition: The splits array must not be full.
* Returns: The number of bytes processed. */
static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
BYTE const* data, size_t size,
size_t* splits, unsigned* numSplits)
{
size_t n;
U64 hash, mask;
hash = state->rolling;
mask = state->stopMask;
n = 0;
#define GEAR_ITER_ONCE() do { \
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
n += 1; \
if (UNLIKELY((hash & mask) == 0)) { \
splits[*numSplits] = n; \
*numSplits += 1; \
if (*numSplits == LDM_BATCH_SIZE) \
goto done; \
} \
} while (0)
while (n + 3 < size) {
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
GEAR_ITER_ONCE();
}
while (n < size) {
GEAR_ITER_ONCE();
}
#undef GEAR_ITER_ONCE
return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
}
/** ZSTD_ldm_getSmallHash() :
* numBits should be <= 32
* If numBits==0, returns 0.
* @return : the most significant numBits of value. */
static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
{
assert(numBits <= 32);
return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
}
/** ZSTD_ldm_getChecksum() :
* numBitsToDiscard should be <= 32
* @return : the next most significant 32 bits after numBitsToDiscard */
static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
{
assert(numBitsToDiscard <= 32);
return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
/** ZSTD_ldm_getTag() ;
* Given the hash, returns the most significant numTagBits bits
* after (32 + hbits) bits.
*
* If there are not enough bits remaining, return the last
* numTagBits bits. */
static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
{
assert(numTagBits < 32 && hbits <= 32);
if (32 - hbits < numTagBits) {
return hash & (((U32)1 << numTagBits) - 1);
} else {
return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
}
}
BYTE* const bucketOffsets = ldmState->bucketOffsets;
*(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;
bucketOffsets[hash]++;
bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;
}
BYTE* const pOffset = ldmState->bucketOffsets + hash;
unsigned const offset = *pOffset;
/** ZSTD_ldm_makeEntryAndInsertByTag() :
*
* Gets the small hash, checksum, and tag from the rollingHash.
*
* If the tag matches (1 << ldmParams.hashRateLog)-1, then
* creates an ldmEntry from the offset, and inserts it into the hash table.
*
* hBits is the length of the small hash, which is the most significant hBits
* of rollingHash. The checksum is the next 32 most significant bits, followed
* by ldmParams.hashRateLog bits that make up the tag. */
static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
U64 const rollingHash,
U32 const hBits,
U32 const offset,
ldmParams_t const ldmParams)
{
U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog);
U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1;
if (tag == tagMask) {
U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
ldmEntry_t entry;
entry.offset = offset;
entry.checksum = checksum;
ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
}
*(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
*pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
/** ZSTD_ldm_fillLdmHashTable() :
*
* Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
* lastHash is the rolling hash that corresponds to lastHashed.
*
* Returns the rolling hash corresponding to position iend-1. */
static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
U64 lastHash, const BYTE* lastHashed,
const BYTE* iend, const BYTE* base,
U32 hBits, ldmParams_t const ldmParams)
{
U64 rollingHash = lastHash;
const BYTE* cur = lastHashed + 1;
while (cur < iend) {
rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1],
cur[ldmParams.minMatchLength-1],
state->hashPower);
ZSTD_ldm_makeEntryAndInsertByTag(state,
rollingHash, hBits,
(U32)(cur - base), ldmParams);
++cur;
}
return rollingHash;
}
U32 const minMatchLength = params->minMatchLength;
U32 const hBits = params->hashLog - params->bucketSizeLog;
BYTE const* const base = ldmState->window.base;
BYTE const* const istart = ip;
ldmRollingHashState_t hashState;
size_t* const splits = ldmState->splitIndices;
unsigned numSplits;
if ((size_t)(iend - ip) >= params->minMatchLength) {
U64 startingHash = ZSTD_rollingHash_compute(ip, params->minMatchLength);
ZSTD_ldm_fillLdmHashTable(
state, startingHash, ip, iend - params->minMatchLength, state->window.base,
params->hashLog - params->bucketSizeLog,
*params);
ZSTD_ldm_gear_init(&hashState, params);
while (ip < iend) {
size_t hashed;
unsigned n;
numSplits = 0;
hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
for (n = 0; n < numSplits; n++) {
if (ip + splits[n] >= istart + minMatchLength) {
BYTE const* const split = ip + splits[n] - minMatchLength;
U64 const xxhash = XXH64(split, minMatchLength, 0);
U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
ldmEntry_t entry;
entry.offset = (U32)(split - base);
entry.checksum = (U32)(xxhash >> 32);
ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
}
}
ip += hashed;
/* Rolling hash */
BYTE const* lastHashed = NULL;
U64 rollingHash = 0;
/* Rolling hash state */
ldmRollingHashState_t hashState;
/* Arrays for staged-processing */
size_t* const splits = ldmState->splitIndices;
ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
unsigned numSplits;
if (srcSize < minMatchLength)
return iend - anchor;
/* Initialize the rolling hash state with the first minMatchLength bytes */
ZSTD_ldm_gear_init(&hashState, params);
ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
ip += minMatchLength;
while (ip < ilimit) {
size_t hashed;
unsigned n;
numSplits = 0;
hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
splits, &numSplits);
for (n = 0; n < numSplits; n++) {
BYTE const* const split = ip + splits[n] - minMatchLength;
U64 const xxhash = XXH64(split, minMatchLength, 0);
U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
while (ip <= ilimit) {
size_t mLength;
U32 const curr = (U32)(ip - base);
size_t forwardMatchLength = 0, backwardMatchLength = 0;
ldmEntry_t* bestEntry = NULL;
if (ip != istart) {
rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0],
lastHashed[minMatchLength],
hashPower);
} else {
rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength);
candidates[n].split = split;
candidates[n].hash = hash;
candidates[n].checksum = (U32)(xxhash >> 32);
candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
PREFETCH_L1(candidates[n].bucket);
lastHashed = ip;
for (n = 0; n < numSplits; n++) {
size_t forwardMatchLength = 0, backwardMatchLength = 0,
bestMatchLength = 0, mLength;
U32 offset;
BYTE const* const split = candidates[n].split;
U32 const checksum = candidates[n].checksum;
U32 const hash = candidates[n].hash;
ldmEntry_t* const bucket = candidates[n].bucket;
ldmEntry_t const* cur;
ldmEntry_t const* bestEntry = NULL;
ldmEntry_t newEntry;
/* Do not insert and do not look for a match */
if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) {
ip++;
continue;
}
newEntry.offset = (U32)(split - base);
newEntry.checksum = checksum;
/* Get the best entry and compute the match lengths */
{
ldmEntry_t* const bucket =
ZSTD_ldm_getBucket(ldmState,
ZSTD_ldm_getSmallHash(rollingHash, hBits),
*params);
ldmEntry_t* cur;
size_t bestMatchLength = 0;
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
/* If a split point would generate a sequence overlapping with
* the previous one, we merely register it in the hash table and
* move on */
if (split < anchor) {
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
continue;
}
curBackwardMatchLength =
ZSTD_ldm_countBackwardsMatch_2segments(ip, anchor,
pMatch, lowMatchPtr,
dictStart, dictEnd);
curTotalMatchLength = curForwardMatchLength +
curBackwardMatchLength;
curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
/* No match found -- continue searching */
if (bestEntry == NULL) {
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
hBits, curr,
*params);
ip++;
continue;
}
/* No match found -- insert an entry into the hash table
* and process the next candidate match */
if (bestEntry == NULL) {
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
continue;
}
/* Match found */
mLength = forwardMatchLength + backwardMatchLength;
ip -= backwardMatchLength;
/* Match found */
offset = (U32)(split - base) - bestEntry->offset;
mLength = forwardMatchLength + backwardMatchLength;
{
rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
{
/* Store the sequence:
* ip = curr - backwardMatchLength
* The match is at (bestEntry->offset - backwardMatchLength)
*/
U32 const matchIndex = bestEntry->offset;
U32 const offset = curr - matchIndex;
rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
/* Out of sequence storage */
if (rawSeqStore->size == rawSeqStore->capacity)
return ERROR(dstSize_tooSmall);
seq->litLength = (U32)(ip - anchor);
seq->matchLength = (U32)mLength;
seq->offset = offset;
rawSeqStore->size++;
}
/* Out of sequence storage */
if (rawSeqStore->size == rawSeqStore->capacity)
return ERROR(dstSize_tooSmall);
seq->litLength = (U32)(split - backwardMatchLength - anchor);
seq->matchLength = (U32)mLength;
seq->offset = offset;
rawSeqStore->size++;
}
/* Insert the current entry into the hash table */
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
(U32)(lastHashed - base),
*params);
/* Insert the current entry into the hash table --- it must be
* done after the previous block to avoid clobbering bestEntry */
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
/* Fill the hash table from lastHashed+1 to ip+mLength*/
/* Heuristic: don't need to fill the entire table at end of block */
if (ip + mLength <= ilimit) {
rollingHash = ZSTD_ldm_fillLdmHashTable(
ldmState, rollingHash, lastHashed,
ip + mLength, base, hBits, *params);
lastHashed = ip + mLength - 1;
/* If we find a match that ends after the data that we've hashed
* then we have a repeating, overlapping, pattern. E.g. all zeros.
* If one repetition of the pattern matches our `stopMask` then all
* repetitions will. We don't need to insert them all into out table,
* only the first one. So skip over overlapping matches.
* This is a major speed boost (20x) for compressing a single byte
* repeated, when that byte ends up in the table.
*/
if (anchor > ip + hashed) {
ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
/* Continue the outer loop at anchor (ip + hashed == anchor). */
ip = anchor - hashed;
break;
}
size_t ZSTD_compressBlock_lazy2_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_dictMatchState_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_dictMatchState_row(
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_greedy_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
size_t ZSTD_compressBlock_lazy2_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize);
curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex);
bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, OFFSET_TO_OFFBASE(curr - matchIndex), dictMatchIndex, matchIndex);
bestLength = matchLength, *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)*offBasePtr)) )
bestLength = matchLength, *offBasePtr = OFFSET_TO_OFFBASE(curr - matchIndex);
return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
}
static size_t
ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* const iLimit,
size_t* offsetPtr)
{
switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
case 7 :
case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
}
return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode);
static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* const iLimit,
size_t* offsetPtr)
{
switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
case 7 :
case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
}
}
static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* const iLimit,
size_t* offsetPtr)
{
switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
case 7 :
case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
}
}
/* *********************************
* Hash Chain
/***********************************
* Dedicated dict search
/* Update chains up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */
FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
ZSTD_matchState_t* ms,
const ZSTD_compressionParameters* const cParams,
const BYTE* ip, U32 const mls)
{
U32* const hashTable = ms->hashTable;
const U32 hashLog = cParams->hashLog;
U32* const chainTable = ms->chainTable;
const U32 chainMask = (1 << cParams->chainLog) - 1;
const BYTE* const base = ms->window.base;
const U32 target = (U32)(ip - base);
U32 idx = ms->nextToUpdate;
while(idx < target) { /* catch up */
size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
hashTable[h] = idx;
idx++;
}
ms->nextToUpdate = target;
return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
}
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
const ZSTD_compressionParameters* const cParams = &ms->cParams;
return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
}
hashTable[h] = idx;
}
ms->nextToUpdate = target;
}
/* Returns the longest match length found in the dedicated dict search structure.
* If none are longer than the argument ml, then ml will be returned.
*/
FORCE_INLINE_TEMPLATE
size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
const ZSTD_matchState_t* const dms,
const BYTE* const ip, const BYTE* const iLimit,
const BYTE* const prefixStart, const U32 curr,
const U32 dictLimit, const size_t ddsIdx) {
const U32 ddsLowestIndex = dms->window.dictLimit;
const BYTE* const ddsBase = dms->window.base;
const BYTE* const ddsEnd = dms->window.nextSrc;
const U32 ddsSize = (U32)(ddsEnd - ddsBase);
const U32 ddsIndexDelta = dictLimit - ddsSize;
const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
U32 ddsAttempt;
U32 matchIndex;
for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
}
{
U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
U32 const chainIndex = chainPackedPointer >> 8;
PREFETCH_L1(&dms->chainTable[chainIndex]);
}
for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
size_t currentMl=0;
const BYTE* match;
matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
match = ddsBase + matchIndex;
if (!matchIndex) {
return ml;
}
/* guaranteed by table construction */
(void)ddsLowestIndex;
assert(matchIndex >= ddsLowestIndex);
assert(match+4 <= ddsEnd);
if (MEM_read32(match) == MEM_read32(ip)) {
/* assumption : matchIndex <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
}
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
*offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));
if (ip+currentMl == iLimit) {
/* best possible, avoids read overflow on next attempt */
return ml;
}
}
}
{
U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
U32 chainIndex = chainPackedPointer >> 8;
U32 const chainLength = chainPackedPointer & 0xFF;
U32 const chainAttempts = nbAttempts - ddsAttempt;
U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
U32 chainAttempt;
for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
}
for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
size_t currentMl=0;
const BYTE* match;
matchIndex = dms->chainTable[chainIndex];
match = ddsBase + matchIndex;
/* guaranteed by table construction */
assert(matchIndex >= ddsLowestIndex);
assert(match+4 <= ddsEnd);
if (MEM_read32(match) == MEM_read32(ip)) {
/* assumption : matchIndex <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
}
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
*offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
}
}
return ml;
}
/* *********************************
* Hash Chain
***********************************/
#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
/* Update chains up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */
FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
ZSTD_matchState_t* ms,
const ZSTD_compressionParameters* const cParams,
const BYTE* ip, U32 const mls)
{
U32* const hashTable = ms->hashTable;
const U32 hashLog = cParams->hashLog;
U32* const chainTable = ms->chainTable;
const U32 chainMask = (1 << cParams->chainLog) - 1;
const BYTE* const base = ms->window.base;
const U32 target = (U32)(ip - base);
U32 idx = ms->nextToUpdate;
while(idx < target) { /* catch up */
size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
const U32 ddsLowestIndex = dms->window.dictLimit;
const BYTE* const ddsBase = dms->window.base;
const BYTE* const ddsEnd = dms->window.nextSrc;
const U32 ddsSize = (U32)(ddsEnd - ddsBase);
const U32 ddsIndexDelta = dictLimit - ddsSize;
const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
U32 ddsAttempt;
for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
}
{
U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
U32 const chainIndex = chainPackedPointer >> 8;
PREFETCH_L1(&dms->chainTable[chainIndex]);
}
for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
size_t currentMl=0;
const BYTE* match;
matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
match = ddsBase + matchIndex;
if (!matchIndex) {
return ml;
}
/* guaranteed by table construction */
(void)ddsLowestIndex;
assert(matchIndex >= ddsLowestIndex);
assert(match+4 <= ddsEnd);
if (MEM_read32(match) == MEM_read32(ip)) {
/* assumption : matchIndex <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
}
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
*offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
if (ip+currentMl == iLimit) {
/* best possible, avoids read overflow on next attempt */
return ml;
}
}
}
{
U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
U32 chainIndex = chainPackedPointer >> 8;
U32 const chainLength = chainPackedPointer & 0xFF;
U32 const chainAttempts = nbAttempts - ddsAttempt;
U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
U32 chainAttempt;
for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
}
for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
size_t currentMl=0;
const BYTE* match;
matchIndex = dms->chainTable[chainIndex];
match = ddsBase + matchIndex;
/* guaranteed by table construction */
assert(matchIndex >= ddsLowestIndex);
assert(match+4 <= ddsEnd);
if (MEM_read32(match) == MEM_read32(ip)) {
/* assumption : matchIndex <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
}
/* save best solution */
if (currentMl > ml) {
ml = currentMl;
*offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
}
}
ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,
ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
}
/* *********************************
* (SIMD) Row-based matchfinder
***********************************/
/* Constants for row-based hash */
#define ZSTD_ROW_HASH_TAG_OFFSET 16 /* byte offset of hashes in the match state's tagTable from the beginning of a row */
#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */
#define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)
typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */
/* ZSTD_VecMask_next():
* Starting from the LSB, returns the idx of the next non-zero bit.
* Basically counting the nb of trailing zeroes.
*/
MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
return ZSTD_countTrailingZeros64(val);
}
/* ZSTD_rotateRight_*():
* Rotates a bitfield to the right by "count" bits.
* https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts
*/
FORCE_INLINE_TEMPLATE
U64 ZSTD_rotateRight_U64(U64 const value, U32 count) {
assert(count < 64);
count &= 0x3F; /* for fickle pattern recognition */
return (value >> count) | (U64)(value << ((0U - count) & 0x3F));
}
FORCE_INLINE_TEMPLATE
U32 ZSTD_rotateRight_U32(U32 const value, U32 count) {
assert(count < 32);
count &= 0x1F; /* for fickle pattern recognition */
return (value >> count) | (U32)(value << ((0U - count) & 0x1F));
}
FORCE_INLINE_TEMPLATE
U16 ZSTD_rotateRight_U16(U16 const value, U32 count) {
assert(count < 16);
count &= 0x0F; /* for fickle pattern recognition */
return (value >> count) | (U16)(value << ((0U - count) & 0x0F));
/* ZSTD_row_nextIndex():
* Returns the next index to insert at within a tagTable row, and updates the "head"
* value to reflect the update. Essentially cycles backwards from [0, {entries per row})
*/
FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {
U32 const next = (*tagRow - 1) & rowMask;
*tagRow = (BYTE)next;
return next;
}
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* const iLimit,
size_t* offsetPtr)
/* ZSTD_isAligned():
* Checks that a pointer is aligned to "align" bytes which must be a power of 2.
*/
MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
assert((align & (align - 1)) == 0);
return (((size_t)ptr) & (align - 1)) == 0;
}
/* ZSTD_row_prefetch():
* Performs prefetching for the hashTable and tagTable at a given row.
*/
FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) {
PREFETCH_L1(hashTable + relRow);
if (rowLog >= 5) {
PREFETCH_L1(hashTable + relRow + 16);
/* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */
}
PREFETCH_L1(tagTable + relRow);
if (rowLog == 6) {
PREFETCH_L1(tagTable + relRow + 32);
}
assert(rowLog == 4 || rowLog == 5 || rowLog == 6);
assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */
assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */
}
/* ZSTD_row_fillHashCache():
* Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
* but not beyond iLimit.
*/
FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
U32 const rowLog, U32 const mls,
U32 idx, const BYTE* const iLimit)
{
U32 const* const hashTable = ms->hashTable;
U16 const* const tagTable = ms->tagTable;
U32 const hashLog = ms->rowHashLog;
U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);
U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);
for (; idx < lim; ++idx) {
U32 const hash = (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
}
DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1],
ms->hashCache[2], ms->hashCache[3], ms->hashCache[4],
ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]);
}
/* ZSTD_row_nextCachedHash():
* Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
* base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
*/
FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
U16 const* tagTable, BYTE const* base,
U32 idx, U32 const hashLog,
U32 const rowLog, U32 const mls)
{
U32 const newHash = (U32)ZSTD_hashPtr(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
{ U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash;
return hash;
}
}
/* ZSTD_row_update_internalImpl():
* Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
*/
FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
U32 updateStartIdx, U32 const updateEndIdx,
U32 const mls, U32 const rowLog,
U32 const rowMask, U32 const useCache)
{
U32* const hashTable = ms->hashTable;
U16* const tagTable = ms->tagTable;
U32 const hashLog = ms->rowHashLog;
const BYTE* const base = ms->window.base;
DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx);
for (; updateStartIdx < updateEndIdx; ++updateStartIdx) {
U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls)
: (U32)ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
U32* const row = hashTable + relRow;
BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte.
Explicit cast allows us to get exact desired position within each row */
U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
assert(hash == ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls));
((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK;
row[pos] = updateStartIdx;
}
}
/* ZSTD_row_update_internal():
* Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
* Skips sections of long matches as is necessary.
*/
FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
U32 const mls, U32 const rowLog,
U32 const rowMask, U32 const useCache)
switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
case 7 :
case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
U32 idx = ms->nextToUpdate;
const BYTE* const base = ms->window.base;
const U32 target = (U32)(ip - base);
const U32 kSkipThreshold = 384;
const U32 kMaxMatchStartPositionsToUpdate = 96;
const U32 kMaxMatchEndPositionsToUpdate = 32;
if (useCache) {
/* Only skip positions when using hash cache, i.e.
* if we are loading a dict, don't skip anything.
* If we decide to skip, then we only update a set number
* of positions at the beginning and end of the match.
*/
if (UNLIKELY(target - idx > kSkipThreshold)) {
U32 const bound = idx + kMaxMatchStartPositionsToUpdate;
ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache);
idx = target - kMaxMatchEndPositionsToUpdate;
ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1);
}
/* ZSTD_row_update():
* External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
* processing.
*/
void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
const U32 rowMask = (1u << rowLog) - 1;
const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* don't use cache */);
}
static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* const iLimit,
size_t* offsetPtr)
/* Returns the mask width of bits group of which will be set to 1. Given not all
* architectures have easy movemask instruction, this helps to iterate over
* groups of bits easier and faster.
*/
FORCE_INLINE_TEMPLATE U32
ZSTD_row_matchMaskGroupWidth(const U32 rowEntries)
switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
case 7 :
case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
(void)rowEntries;
#if defined(ZSTD_ARCH_ARM_NEON)
/* NEON path only works for little endian */
if (!MEM_isLittleEndian()) {
return 1;
}
if (rowEntries == 16) {
return 4;
}
if (rowEntries == 32) {
return 2;
#if defined(ZSTD_ARCH_X86_SSE2)
FORCE_INLINE_TEMPLATE ZSTD_VecMask
ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head)
{
const __m128i comparisonMask = _mm_set1_epi8((char)tag);
int matches[4] = {0};
int i;
assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4);
for (i=0; i<nbChunks; i++) {
const __m128i chunk = _mm_loadu_si128((const __m128i*)(const void*)(src + 16*i));
const __m128i equalMask = _mm_cmpeq_epi8(chunk, comparisonMask);
matches[i] = _mm_movemask_epi8(equalMask);
}
if (nbChunks == 1) return ZSTD_rotateRight_U16((U16)matches[0], head);
if (nbChunks == 2) return ZSTD_rotateRight_U32((U32)matches[1] << 16 | (U32)matches[0], head);
assert(nbChunks == 4);
return ZSTD_rotateRight_U64((U64)matches[3] << 48 | (U64)matches[2] << 32 | (U64)matches[1] << 16 | (U64)matches[0], head);
}
#endif
static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS (
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* const iLimit,
size_t* offsetPtr)
#if defined(ZSTD_ARCH_ARM_NEON)
FORCE_INLINE_TEMPLATE ZSTD_VecMask
ZSTD_row_getNEONMask(const U32 rowEntries, const BYTE* const src, const BYTE tag, const U32 headGrouped)
switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch);
case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch);
case 7 :
case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch);
assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
if (rowEntries == 16) {
/* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits.
* After that groups of 4 bits represent the equalMask. We lower
* all bits except the highest in these groups by doing AND with
* 0x88 = 0b10001000.
*/
const uint8x16_t chunk = vld1q_u8(src);
const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
const uint8x8_t res = vshrn_n_u16(equalMask, 4);
const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0);
return ZSTD_rotateRight_U64(matches, headGrouped) & 0x8888888888888888ull;
} else if (rowEntries == 32) {
/* Same idea as with rowEntries == 16 but doing AND with
* 0x55 = 0b01010101.
*/
const uint16x8x2_t chunk = vld2q_u16((const uint16_t*)(const void*)src);
const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);
const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);
const uint8x16_t dup = vdupq_n_u8(tag);
const uint8x8_t t0 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk0, dup)), 6);
const uint8x8_t t1 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk1, dup)), 6);
const uint8x8_t res = vsli_n_u8(t0, t1, 4);
const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0) ;
return ZSTD_rotateRight_U64(matches, headGrouped) & 0x5555555555555555ull;
} else { /* rowEntries == 64 */
const uint8x16x4_t chunk = vld4q_u8(src);
const uint8x16_t dup = vdupq_n_u8(tag);
const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);
const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);
const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);
const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);
const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);
const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);
const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);
const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);
const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
return ZSTD_rotateRight_U64(matches, headGrouped);
#endif
/* Returns a ZSTD_VecMask (U64) that has the nth group (determined by
* ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag"
* matches the hash at the nth position in a row of the tagTable.
* Each row is a circular buffer beginning at the value of "headGrouped". So we
* must rotate the "matches" bitfield to match up with the actual layout of the
* entries within the hashTable */
FORCE_INLINE_TEMPLATE ZSTD_VecMask
ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries)
{
const BYTE* const src = tagRow + ZSTD_ROW_HASH_TAG_OFFSET;
assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ZSTD_VecMask) * 8);
#if defined(ZSTD_ARCH_X86_SSE2)
return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, headGrouped);
FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
# if defined(ZSTD_ARCH_ARM_NEON)
/* This NEON path only works for little endian - otherwise use SWAR below */
if (MEM_isLittleEndian()) {
return ZSTD_row_getNEONMask(rowEntries, src, tag, headGrouped);
}
# endif /* ZSTD_ARCH_ARM_NEON */
/* SWAR */
{ const int chunkSize = sizeof(size_t);
const size_t shiftAmount = ((chunkSize * 8) - chunkSize);
const size_t xFF = ~((size_t)0);
const size_t x01 = xFF / 0xFF;
const size_t x80 = x01 << 7;
const size_t splatChar = tag * x01;
ZSTD_VecMask matches = 0;
int i = rowEntries - chunkSize;
assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8));
if (MEM_isLittleEndian()) { /* runtime check so have two loops */
const size_t extractMagic = (xFF / 0x7F) >> chunkSize;
do {
size_t chunk = MEM_readST(&src[i]);
chunk ^= splatChar;
chunk = (((chunk | x80) - x01) | chunk) & x80;
matches <<= chunkSize;
matches |= (chunk * extractMagic) >> shiftAmount;
i -= chunkSize;
} while (i >= 0);
} else { /* big endian: reverse bits during extraction */
const size_t msb = xFF ^ (xFF >> 1);
const size_t extractMagic = (msb / 0x1FF) | msb;
do {
size_t chunk = MEM_readST(&src[i]);
chunk ^= splatChar;
chunk = (((chunk | x80) - x01) | chunk) & x80;
matches <<= chunkSize;
matches |= ((chunk >> 7) * extractMagic) >> shiftAmount;
i -= chunkSize;
} while (i >= 0);
}
matches = ~matches;
if (rowEntries == 16) {
return ZSTD_rotateRight_U16((U16)matches, headGrouped);
} else if (rowEntries == 32) {
return ZSTD_rotateRight_U32((U32)matches, headGrouped);
} else {
return ZSTD_rotateRight_U64((U64)matches, headGrouped);
}
}
#endif
}
/* The high-level approach of the SIMD row based match finder is as follows:
* - Figure out where to insert the new entry:
* - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
* - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
* which row to insert into.
* - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
* be considered as a circular buffer with a "head" index that resides in the tagTable.
* - Also insert the "tag" into the equivalent row and position in the tagTable.
* - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
* The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
* for alignment/performance reasons, leaving some bytes unused.
* - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
* generate a bitfield that we can cycle through to check the collisions in the hash table.
* - Pick the longest match.
*/
FORCE_INLINE_TEMPLATE
size_t ZSTD_RowFindBestMatch(
switch(ms->cParams.minMatch)
{
default : /* includes case 3 */
case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
case 7 :
case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
U32* const hashTable = ms->hashTable;
U16* const tagTable = ms->tagTable;
U32* const hashCache = ms->hashCache;
const U32 hashLog = ms->rowHashLog;
const ZSTD_compressionParameters* const cParams = &ms->cParams;
const BYTE* const base = ms->window.base;
const BYTE* const dictBase = ms->window.dictBase;
const U32 dictLimit = ms->window.dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const U32 curr = (U32)(ip-base);
const U32 maxDistance = 1U << cParams->windowLog;
const U32 lowestValid = ms->window.lowLimit;
const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
const U32 isDictionary = (ms->loadedDictEnd != 0);
const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
const U32 rowEntries = (1U << rowLog);
const U32 rowMask = rowEntries - 1;
const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
const U32 groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries);
U32 nbAttempts = 1U << cappedSearchLog;
size_t ml=4-1;
/* DMS/DDS variables that may be referenced laster */
const ZSTD_matchState_t* const dms = ms->dictMatchState;
/* Initialize the following variables to satisfy static analyzer */
size_t ddsIdx = 0;
U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */
U32 dmsTag = 0;
U32* dmsRow = NULL;
BYTE* dmsTagRow = NULL;
if (dictMode == ZSTD_dedicatedDictSearch) {
const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
{ /* Prefetch DDS hashtable entry */
ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG;
PREFETCH_L1(&dms->hashTable[ddsIdx]);
}
ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0;
}
if (dictMode == ZSTD_dictMatchState) {
/* Prefetch DMS rows */
U32* const dmsHashTable = dms->hashTable;
U16* const dmsTagTable = dms->tagTable;
U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow);
dmsRow = dmsHashTable + dmsRelRow;
ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog);
}
/* Update the hashTable and tagTable up to (but not including) ip */
ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
{ /* Get the hash for ip, compute the appropriate row */
U32 const hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls);
U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;
U32* const row = hashTable + relRow;
BYTE* tagRow = (BYTE*)(tagTable + relRow);
U32 const headGrouped = (*tagRow & rowMask) * groupWidth;
U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
size_t numMatches = 0;
size_t currMatch = 0;
ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, headGrouped, rowEntries);
/* Cycle through the matches and prefetch */
for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;
U32 const matchIndex = row[matchPos];
assert(numMatches < rowEntries);
if (matchIndex < lowLimit)
break;
if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
PREFETCH_L1(base + matchIndex);
} else {
PREFETCH_L1(dictBase + matchIndex);
}
matchBuffer[numMatches++] = matchIndex;
}
/* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop
in ZSTD_row_update_internal() at the next search. */
{
U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
tagRow[pos + ZSTD_ROW_HASH_TAG_OFFSET] = (BYTE)tag;
row[pos] = ms->nextToUpdate++;
}
/* Return the longest match */
for (; currMatch < numMatches; ++currMatch) {
U32 const matchIndex = matchBuffer[currMatch];
size_t currentMl=0;
assert(matchIndex < curr);
assert(matchIndex >= lowLimit);
if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
const BYTE* const match = base + matchIndex;
assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
/* read 4B starting from (match + ml + 1 - sizeof(U32)) */
if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */
currentMl = ZSTD_count(ip, match, iLimit);
} else {
const BYTE* const match = dictBase + matchIndex;
assert(match+4 <= dictEnd);
if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
}
/* Save best solution */
if (currentMl > ml) {
ml = currentMl;
*offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
}
}
}
assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
if (dictMode == ZSTD_dedicatedDictSearch) {
ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms,
ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
} else if (dictMode == ZSTD_dictMatchState) {
/* TODO: Measure and potentially add prefetching to DMS */
const U32 dmsLowestIndex = dms->window.dictLimit;
const BYTE* const dmsBase = dms->window.base;
const BYTE* const dmsEnd = dms->window.nextSrc;
const U32 dmsSize = (U32)(dmsEnd - dmsBase);
const U32 dmsIndexDelta = dictLimit - dmsSize;
{ U32 const headGrouped = (*dmsTagRow & rowMask) * groupWidth;
U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
size_t numMatches = 0;
size_t currMatch = 0;
ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, headGrouped, rowEntries);
for (; (matches > 0) && (nbAttempts > 0); --nbAttempts, matches &= (matches - 1)) {
U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;
U32 const matchIndex = dmsRow[matchPos];
if (matchIndex < dmsLowestIndex)
break;
PREFETCH_L1(dmsBase + matchIndex);
matchBuffer[numMatches++] = matchIndex;
}
/* Return the longest match */
for (; currMatch < numMatches; ++currMatch) {
U32 const matchIndex = matchBuffer[currMatch];
size_t currentMl=0;
assert(matchIndex >= dmsLowestIndex);
assert(matchIndex < curr);
{ const BYTE* const match = dmsBase + matchIndex;
assert(match+4 <= dmsEnd);
if (MEM_read32(match) == MEM_read32(ip))
currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
}
if (currentMl > ml) {
ml = currentMl;
assert(curr > matchIndex + dmsIndexDelta);
*offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta));
if (ip+currentMl == iLimit) break;
}
}
}
/**
* Generate search functions templated on (dictMode, mls, rowLog).
* These functions are outlined for code size & compilation time.
* ZSTD_searchMax() dispatches to the correct implementation function.
*
* TODO: The start of the search function involves loading and calculating a
* bunch of constants from the ZSTD_matchState_t. These computations could be
* done in an initialization function, and saved somewhere in the match state.
* Then we could pass a pointer to the saved state instead of the match state,
* and avoid duplicate computations.
*
* TODO: Move the match re-winding into searchMax. This improves compression
* ratio, and unlocks further simplifications with the next TODO.
*
* TODO: Try moving the repcode search into searchMax. After the re-winding
* and repcode search are in searchMax, there is no more logic in the match
* finder loop that requires knowledge about the dictMode. So we should be
* able to avoid force inlining it, and we can join the extDict loop with
* the single segment loop. It should go in searchMax instead of its own
* function to avoid having multiple virtual function calls per search.
*/
#define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls
#define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls
#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog
#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE
#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \
ZSTD_matchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offBasePtr) \
{ \
assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \
} \
#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \
ZSTD_matchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offsetPtr) \
{ \
assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \
} \
#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \
ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \
ZSTD_matchState_t* ms, \
const BYTE* ip, const BYTE* const iLimit, \
size_t* offsetPtr) \
{ \
assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \
return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \
} \
#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \
X(dictMode, mls, 4) \
X(dictMode, mls, 5) \
X(dictMode, mls, 6)
#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \
ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \
ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \
ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6)
#define ZSTD_FOR_EACH_MLS(X, dictMode) \
X(dictMode, 4) \
X(dictMode, 5) \
X(dictMode, 6)
#define ZSTD_FOR_EACH_DICT_MODE(X, ...) \
X(__VA_ARGS__, noDict) \
X(__VA_ARGS__, extDict) \
X(__VA_ARGS__, dictMatchState) \
X(__VA_ARGS__, dedicatedDictSearch)
/* Generate row search fns for each combination of (dictMode, mls, rowLog) */
ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN)
/* Generate binary Tree search fns for each combination of (dictMode, mls) */
ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN)
/* Generate hash chain search fns for each combination of (dictMode, mls) */
ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN)
typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \
case mls: \
return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \
case mls: \
return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \
case rowLog: \
return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr);
#define ZSTD_SWITCH_MLS(X, dictMode) \
switch (mls) { \
ZSTD_FOR_EACH_MLS(X, dictMode) \
}
#define ZSTD_SWITCH_ROWLOG(dictMode, mls) \
case mls: \
switch (rowLog) { \
ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \
} \
ZSTD_UNREACHABLE; \
break;
#define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \
switch (searchMethod) { \
case search_hashChain: \
ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \
break; \
case search_binaryTree: \
ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \
break; \
case search_rowHash: \
ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \
break; \
} \
ZSTD_UNREACHABLE;
/**
* Searches for the longest match at @p ip.
* Dispatches to the correct implementation function based on the
* (searchMethod, dictMode, mls, rowLog). We use switch statements
* here instead of using an indirect function call through a function
* pointer because after Spectre and Meltdown mitigations, indirect
* function calls can be very costly, especially in the kernel.
*
* NOTE: dictMode and searchMethod should be templated, so those switch
* statements should be optimized out. Only the mls & rowLog switches
* should be left.
*
* @param ms The match state.
* @param ip The position to search at.
* @param iend The end of the input data.
* @param[out] offsetPtr Stores the match offset into this pointer.
* @param mls The minimum search length, in the range [4, 6].
* @param rowLog The row log (if applicable), in the range [4, 6].
* @param searchMethod The search method to use (templated).
* @param dictMode The dictMode (templated).
*
* @returns The length of the longest match found, or < mls if no match is found.
* If a match is found its offset is stored in @p offsetPtr.
*/
FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
ZSTD_matchState_t* ms,
const BYTE* ip,
const BYTE* iend,
size_t* offsetPtr,
U32 const mls,
U32 const rowLog,
searchMethod_e const searchMethod,
ZSTD_dictMode_e const dictMode)
{
if (dictMode == ZSTD_noDict) {
ZSTD_SWITCH_SEARCH_METHOD(noDict)
} else if (dictMode == ZSTD_extDict) {
ZSTD_SWITCH_SEARCH_METHOD(extDict)
} else if (dictMode == ZSTD_dictMatchState) {
ZSTD_SWITCH_SEARCH_METHOD(dictMatchState)
} else if (dictMode == ZSTD_dedicatedDictSearch) {
ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch)
}
ZSTD_UNREACHABLE;
return 0;
}
/**
* This table is indexed first by the four ZSTD_dictMode_e values, and then
* by the two searchMethod_e values. NULLs are placed for configurations
* that should never occur (extDict modes go to the other implementation
* below and there is no DDSS for binary tree search yet).
*/
const searchMax_f searchFuncs[4][2] = {
{
ZSTD_HcFindBestMatch_selectMLS,
ZSTD_BtFindBestMatch_selectMLS
},
{
NULL,
NULL
},
{
ZSTD_HcFindBestMatch_dictMatchState_selectMLS,
ZSTD_BtFindBestMatch_dictMatchState_selectMLS
},
{
ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS,
NULL
}
};
searchMax_f const searchMax = searchFuncs[dictMode][searchMethod == search_binaryTree];
U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;
{ size_t offset2=999999999;
size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
{ size_t ofbCandidate=999999999;
size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);
{ size_t offset2=999999999;
size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
{ size_t ofbCandidate=999999999;
size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);
* start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
* (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
* overflows the pointer, which is undefined behavior.
* Pay attention that `start[-value]` can lead to strange undefined behavior
* notably if `value` is unsigned, resulting in a large positive `-value`.
while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
&& (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
while ( ((start > anchor) & (start - OFFBASE_TO_OFFSET(offBase) > prefixLowest))
&& (start[-1] == (start-OFFBASE_TO_OFFSET(offBase))[-1]) ) /* only search for offset within prefix */
{ size_t const litLength = start - anchor;
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
{ size_t const litLength = (size_t)(start - anchor);
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap repcodes */
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
/* Save reps for next block */
rep[0] = offset_1 ? offset_1 : savedOffset;
rep[1] = offset_2 ? offset_2 : savedOffset;
/* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
* rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
/* save reps for next block */
rep[0] = offset_1 ? offset_1 : offsetSaved1;
rep[1] = offset_2 ? offset_2 : offsetSaved2;
size_t ZSTD_compressBlock_lazy_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
}
size_t ZSTD_compressBlock_greedy_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
}
size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy_dictMatchState_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_greedy_dictMatchState_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
}
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
}
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
}
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
}
typedef size_t (*searchMax_f)(
ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
& (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
{ size_t offset2=999999999;
size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
{ size_t ofbCandidate = 999999999;
size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
{ size_t offset2=999999999;
size_t const ml2 = searchMax(ms, ip, iend, &offset2);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
{ size_t ofbCandidate = 999999999;
size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);
{ size_t const litLength = start - anchor;
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
{ size_t const litLength = (size_t)(start - anchor);
ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);
if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
& (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset history */
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
}
size_t ZSTD_compressBlock_greedy_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
}
size_t ZSTD_compressBlock_lazy_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
size_t ZSTD_compressBlock_lazy2_extDict_row(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize)
{
return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
}
static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashTable = ms->hashTable;
U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
U32 const mls = cParams->minMatch;
const BYTE* const base = ms->window.base;
const BYTE* ip = base + ms->nextToUpdate;
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
const U32 fastHashFillStep = 3;
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
/* Currently, we always use ZSTD_dtlm_full for filling CDict tables.
* Feel free to remove this assert if there's a good reason! */
assert(dtlm == ZSTD_dtlm_full);
/* Always insert every fastHashFillStep position into the hash table.
* Insert the other positions if their hash entry is empty.
*/
for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
U32 const curr = (U32)(ip - base);
{ size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls);
ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); }
if (dtlm == ZSTD_dtlm_fast) continue;
/* Only load extra positions for ZSTD_dtlm_full */
{ U32 p;
for (p = 1; p < fastHashFillStep; ++p) {
size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);
if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */
ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);
} } } }
}
static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
/**
* If you squint hard enough (and ignore repcodes), the search operation at any
* given position is broken into 4 stages:
*
* 1. Hash (map position to hash value via input read)
* 2. Lookup (map hash val to index via hashtable read)
* 3. Load (map index to value at that position via input read)
* 4. Compare
*
* Each of these steps involves a memory read at an address which is computed
* from the previous step. This means these steps must be sequenced and their
* latencies are cumulative.
*
* Rather than do 1->2->3->4 sequentially for a single position before moving
* onto the next, this implementation interleaves these operations across the
* next few positions:
*
* R = Repcode Read & Compare
* H = Hash
* T = Table Lookup
* M = Match Read & Compare
*
* Pos | Time -->
* ----+-------------------
* N | ... M
* N+1 | ... TM
* N+2 | R H T M
* N+3 | H TM
* N+4 | R H T M
* N+5 | H ...
* N+6 | R ...
*
* This is very much analogous to the pipelining of execution in a CPU. And just
* like a CPU, we have to dump the pipeline when we find a match (i.e., take a
* branch).
*
* When this happens, we throw away our current state, and do the following prep
* to re-enter the loop:
*
* Pos | Time -->
* ----+-------------------
* N | H T
* N+1 | H
*
* This is also the work we do at the beginning to enter the loop initially.
*/
/* init */
U32 rep_offset1 = rep[0];
U32 rep_offset2 = rep[1];
U32 offsetSaved1 = 0, offsetSaved2 = 0;
size_t hash0; /* hash for ip0 */
size_t hash1; /* hash for ip1 */
U32 idx; /* match idx for ip0 */
U32 mval; /* src value at match idx */
U32 offcode;
const BYTE* match0;
size_t mLength;
/* ip0 and ip1 are always adjacent. The targetLength skipping and
* uncompressibility acceleration is applied to every other position,
* matching the behavior of #1562. step therefore represents the gap
* between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
size_t step;
const BYTE* nextStep;
const size_t kStepIncr = (1 << (kSearchStrength - 1));
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0;
if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0;
}
/* start each op */
_start: /* Requires: ip0 */
step = stepSize;
nextStep = ip0 + kStepIncr;
/* calculate positions, ip0 - anchor == 0, so we skip step calc */
ip1 = ip0 + 1;
ip2 = ip0 + step;
ip3 = ip2 + 1;
if (ip3 >= ilimit) {
goto _cleanup;
/* Main Search Loop */
#ifdef __INTEL_COMPILER
/* From intel 'The vector pragma indicates that the loop should be
* vectorized if it is legal to do so'. Can be used together with
* #pragma ivdep (but have opted to exclude that because intel
* warns against using it).*/
#pragma vector always
#endif
while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
size_t mLength;
BYTE const* ip2 = ip0 + 2;
size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
U32 const val0 = MEM_read32(ip0);
size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
U32 const val1 = MEM_read32(ip1);
U32 const current0 = (U32)(ip0-base);
U32 const current1 = (U32)(ip1-base);
U32 const matchIndex0 = hashTable[h0];
U32 const matchIndex1 = hashTable[h1];
BYTE const* repMatch = ip2 - offset_1;
const BYTE* match0 = base + matchIndex0;
const BYTE* match1 = base + matchIndex1;
U32 offcode;
hash0 = ZSTD_hashPtr(ip0, hlog, mls);
hash1 = ZSTD_hashPtr(ip1, hlog, mls);
if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
ip0 = ip2 - mLength;
match0 = repMatch - mLength;
/* check repcode at ip[2] */
if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
ip0 = ip2;
match0 = ip0 - rep_offset1;
mLength = ip0[-1] == match0[-1];
ip0 -= mLength;
match0 -= mLength;
offcode = REPCODE1_TO_OFFBASE;
if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
/* found a regular match */
/* load match for ip[0] */
if (idx >= prefixStartIndex) {
mval = MEM_read32(base + idx);
} else {
mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
}
/* check match at ip[0] */
if (MEM_read32(ip0) == mval) {
/* found a match! */
/* First write next hash table entry; we've already calculated it.
* This write is known to be safe because the ip1 == ip0 + 1, so
* we know we will resume searching after ip1 */
hashTable[hash1] = (U32)(ip1 - base);
if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
/* found a regular match after one literal */
ip0 = ip1;
match0 = match1;
/* lookup ip[1] */
idx = hashTable[hash1];
/* hash ip[2] */
hash0 = hash1;
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
/* advance to next positions */
ip0 = ip1;
ip1 = ip2;
ip2 = ip3;
/* write back hash table entry */
current0 = (U32)(ip0 - base);
hashTable[hash0] = current0;
/* load match for ip[0] */
if (idx >= prefixStartIndex) {
mval = MEM_read32(base + idx);
} else {
mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
}
/* check match at ip[0] */
if (MEM_read32(ip0) == mval) {
/* found a match! */
/* first write next hash table entry; we've already calculated it */
if (step <= 4) {
/* We need to avoid writing an index into the hash table >= the
* position at which we will pick up our searching after we've
* taken this match.
*
* The minimum possible match has length 4, so the earliest ip0
* can be after we take this match will be the current ip0 + 4.
* ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely
* write this position.
*/
hashTable[hash1] = (U32)(ip1 - base);
}
{ size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
assert(step >= 2);
ip0 += step;
ip1 += step;
continue;
/* lookup ip[1] */
idx = hashTable[hash1];
/* hash ip[2] */
hash0 = hash1;
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
/* advance to next positions */
ip0 = ip1;
ip1 = ip2;
ip2 = ip0 + step;
ip3 = ip1 + step;
/* calculate step */
if (ip2 >= nextStep) {
step++;
PREFETCH_L1(ip1 + 64);
PREFETCH_L1(ip1 + 128);
nextStep += kStepIncr;
_offset: /* Requires: ip0, match0 */
/* Compute the offset code */
offset_2 = offset_1;
offset_1 = (U32)(ip0-match0);
offcode = offset_1 + ZSTD_REP_MOVE;
mLength = 4;
/* Count the backwards match length */
while (((ip0>anchor) & (match0>prefixStart))
&& (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
_match: /* Requires: ip0, match0, offcode */
/* Count the forward length */
mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
/* match found */
ip0 += mLength;
anchor = ip0;
} while (ip3 < ilimit);
if (ip0 <= ilimit) {
/* Fill Table */
assert(base+current0+2 > istart); /* check base overflow */
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
_cleanup:
/* Note that there are probably still a couple positions we could search.
* However, it seems to be a meaningful performance hit to try to search
* them. So let's not. */
if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
/* store sequence */
size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
ip0 += rLength;
ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
anchor = ip0;
continue; /* faster when present (confirmed on gcc-8) ... (?) */
} } }
ip1 = ip0 + 1;
}
/* When the repcodes are outside of the prefix, we set them to zero before the loop.
* When the offsets are still zero, we need to restore them after the block to have a correct
* repcode history. If only one offset was invalid, it is easy. The tricky case is when both
* offsets were invalid. We need to figure out which offset to refill with.
* - If both offsets are zero they are in the same order.
* - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.
* - If only one is zero, we need to decide which offset to restore.
* - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.
* - It is impossible for rep_offset2 to be non-zero.
*
* So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then
* set rep[0] = rep_offset1 and rep[1] = offsetSaved1.
*/
offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;
_offset: /* Requires: ip0, idx */
/* Compute the offset code. */
match0 = base + idx;
rep_offset2 = rep_offset1;
rep_offset1 = (U32)(ip0-match0);
offcode = OFFSET_TO_OFFBASE(rep_offset1);
mLength = 4;
/* Count the backwards match length. */
while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
ip0--;
match0--;
mLength++;
}
_match: /* Requires: ip0, match0, offcode */
/* Count the forward length. */
mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
ip0 += mLength;
anchor = ip0;
/* Fill table and check for immediate repcode. */
if (ip0 <= ilimit) {
/* Fill Table */
assert(base+current0+2 > istart); /* check base overflow */
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
/* store sequence */
size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
{ U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
ip0 += rLength;
ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
anchor = ip0;
continue; /* faster when present (confirmed on gcc-8) ... (?) */
} } }
goto _start;
#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
void const* src, size_t srcSize) \
{ \
return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
}
ZSTD_GEN_FAST_FN(noDict, 4, 1)
ZSTD_GEN_FAST_FN(noDict, 5, 1)
ZSTD_GEN_FAST_FN(noDict, 6, 1)
ZSTD_GEN_FAST_FN(noDict, 7, 1)
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
case 5 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
case 6 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
case 7 :
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
if (ms->cParams.targetLength > 1) {
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
case 5 :
return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
case 6 :
return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
case 7 :
return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
}
} else {
switch(mls)
{
default: /* includes case 3 */
case 4 :
return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
case 5 :
return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
case 6 :
return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
case 7 :
return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
}
const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
const U32 dictHLog = dictCParams->hashLog;
const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart);
const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
size_t const h = ZSTD_hashPtr(ip, hlog, mls);
U32 const curr = (U32)(ip-base);
U32 const matchIndex = hashTable[h];
const BYTE* match = base + matchIndex;
const U32 repIndex = curr + 1 - offset_1;
const BYTE* repMatch = (repIndex < prefixStartIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
hashTable[h] = curr; /* update hash table */
size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls);
size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls);
U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS];
int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0);
U32 matchIndex = hashTable[hash0];
U32 curr = (U32)(ip0 - base);
size_t step = stepSize;
const size_t kStepIncr = 1 << kSearchStrength;
const BYTE* nextStep = ip0 + kStepIncr;
/* Inner search loop */
while (1) {
const BYTE* match = base + matchIndex;
const U32 repIndex = curr + 1 - offset_1;
const BYTE* repMatch = (repIndex < prefixStartIndex) ?
dictBase + (repIndex - dictIndexDelta) :
base + repIndex;
const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls);
size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);
hashTable[hash0] = curr; /* update hash table */
if (((U32) ((prefixStartIndex - 1) - repIndex) >=
3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
&& (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;
ip0++;
ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
break;
}
if (dictTagsMatch) {
/* Found a possible dict match */
const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
const BYTE* dictMatch = dictBase + dictMatchIndex;
if (dictMatchIndex > dictStartIndex &&
MEM_read32(dictMatch) == MEM_read32(ip0)) {
/* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */
if (matchIndex <= prefixStartIndex) {
U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta);
mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4;
while (((ip0 > anchor) & (dictMatch > dictStart))
&& (ip0[-1] == dictMatch[-1])) {
ip0--;
dictMatch--;
mLength++;
} /* catch up */
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
break;
}
}
}
if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
} else if ( (matchIndex <= prefixStartIndex) ) {
size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
U32 const dictMatchIndex = dictHashTable[dictHash];
const BYTE* dictMatch = dictBase + dictMatchIndex;
if (dictMatchIndex <= dictStartIndex ||
MEM_read32(dictMatch) != MEM_read32(ip)) {
assert(stepSize >= 1);
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
} else {
/* found a dict match */
U32 const offset = (U32)(curr-dictMatchIndex-dictIndexDelta);
mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
while (((ip>anchor) & (dictMatch>dictStart))
&& (ip[-1] == dictMatch[-1])) {
ip--; dictMatch--; mLength++;
if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) {
/* found a regular match */
U32 const offset = (U32) (ip0 - match);
mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;
while (((ip0 > anchor) & (match > prefixStart))
&& (ip0[-1] == match[-1])) {
ip0--;
match--;
mLength++;
} else if (MEM_read32(match) != MEM_read32(ip)) {
/* it's not a match, and we're not going to check the dictionary */
assert(stepSize >= 1);
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
} else {
/* found a regular match */
U32 const offset = (U32)(ip-match);
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
while (((ip>anchor) & (match>prefixStart))
&& (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
offset_2 = offset_1;
offset_1 = offset;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
}
/* Prepare for next iteration */
dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS];
dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1);
matchIndex = hashTable[hash1];
if (ip1 >= nextStep) {
step++;
nextStep += kStepIncr;
}
ip0 = ip1;
ip1 = ip1 + step;
if (ip1 > ilimit) goto _cleanup;
curr = (U32)(ip0 - base);
hash0 = hash1;
} /* end inner search loop */
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
ip += repLength2;
anchor = ip;
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2;
ip0 += repLength2;
anchor = ip0;
U32 offsetSaved1 = 0, offsetSaved2 = 0;
const BYTE* ip0 = istart;
const BYTE* ip1;
const BYTE* ip2;
const BYTE* ip3;
U32 current0;
size_t hash0; /* hash for ip0 */
size_t hash1; /* hash for ip1 */
U32 idx; /* match idx for ip0 */
const BYTE* idxBase; /* base pointer for idx */
U32 offcode;
const BYTE* match0;
size_t mLength;
const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */
size_t step;
const BYTE* nextStep;
const size_t kStepIncr = (1 << (kSearchStrength - 1));
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
{ U32 const curr = (U32)(ip0 - base);
U32 const maxRep = curr - dictStartIndex;
if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0;
if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0;
}
/* start each op */
_start: /* Requires: ip0 */
step = stepSize;
nextStep = ip0 + kStepIncr;
/* calculate positions, ip0 - anchor == 0, so we skip step calc */
ip1 = ip0 + 1;
ip2 = ip0 + step;
ip3 = ip2 + 1;
if (ip3 >= ilimit) {
goto _cleanup;
}
/* Search Loop */
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
const size_t h = ZSTD_hashPtr(ip, hlog, mls);
const U32 matchIndex = hashTable[h];
const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
const BYTE* match = matchBase + matchIndex;
const U32 curr = (U32)(ip-base);
const U32 repIndex = curr + 1 - offset_1;
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
hashTable[h] = curr; /* update hash table */
DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr);
assert(offset_1 <= curr +1); /* check repIndex */
hash0 = ZSTD_hashPtr(ip0, hlog, mls);
hash1 = ZSTD_hashPtr(ip1, hlog, mls);
idx = hashTable[hash0];
idxBase = idx < prefixStartIndex ? dictBase : base;
if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex))
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
ip += rLength;
anchor = ip;
} else {
if ( (matchIndex < dictStartIndex) ||
(MEM_read32(match) != MEM_read32(ip)) ) {
assert(stepSize >= 1);
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
continue;
do {
{ /* load repcode match for ip[2] */
U32 const current2 = (U32)(ip2 - base);
U32 const repIndex = current2 - offset_1;
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
U32 rval;
if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */
& (offset_1 > 0) ) {
rval = MEM_read32(repBase + repIndex);
} else {
rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */
{ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
U32 const offset = curr - matchIndex;
size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
offset_2 = offset_1; offset_1 = offset; /* update offset history */
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
ip += mLength;
anchor = ip;
/* write back hash table entry */
current0 = (U32)(ip0 - base);
hashTable[hash0] = current0;
/* check repcode at ip[2] */
if (MEM_read32(ip2) == rval) {
ip0 = ip2;
match0 = repBase + repIndex;
matchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
assert((match0 != prefixStart) & (match0 != dictStart));
mLength = ip0[-1] == match0[-1];
ip0 -= mLength;
match0 -= mLength;
offcode = REPCODE1_TO_OFFBASE;
mLength += 4;
goto _match;
} }
{ /* load match for ip[0] */
U32 const mval = idx >= dictStartIndex ?
MEM_read32(idxBase + idx) :
MEM_read32(ip0) ^ 1; /* guaranteed not to match */
/* check match at ip[0] */
if (MEM_read32(ip0) == mval) {
/* found a match! */
goto _offset;
} }
/* lookup ip[1] */
idx = hashTable[hash1];
idxBase = idx < prefixStartIndex ? dictBase : base;
/* hash ip[2] */
hash0 = hash1;
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
/* advance to next positions */
ip0 = ip1;
ip1 = ip2;
ip2 = ip3;
/* write back hash table entry */
current0 = (U32)(ip0 - base);
hashTable[hash0] = current0;
{ /* load match for ip[0] */
U32 const mval = idx >= dictStartIndex ?
MEM_read32(idxBase + idx) :
MEM_read32(ip0) ^ 1; /* guaranteed not to match */
/* check match at ip[0] */
if (MEM_read32(ip0) == mval) {
/* found a match! */
goto _offset;
if (ip <= ilimit) {
/* Fill Table */
hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2;
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
/* check immediate repcode */
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
{ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
ip += repLength2;
anchor = ip;
continue;
}
break;
} } }
/* hash ip[2] */
hash0 = hash1;
hash1 = ZSTD_hashPtr(ip2, hlog, mls);
/* advance to next positions */
ip0 = ip1;
ip1 = ip2;
ip2 = ip0 + step;
ip3 = ip1 + step;
/* calculate step */
if (ip2 >= nextStep) {
step++;
PREFETCH_L1(ip1 + 64);
PREFETCH_L1(ip1 + 128);
nextStep += kStepIncr;
}
} while (ip3 < ilimit);
_cleanup:
/* Note that there are probably still a couple positions we could search.
* However, it seems to be a meaningful performance hit to try to search
* them. So let's not. */
_offset: /* Requires: ip0, idx, idxBase */
/* Compute the offset code. */
{ U32 const offset = current0 - idx;
const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart;
matchEnd = idx < prefixStartIndex ? dictEnd : iend;
match0 = idxBase + idx;
offset_2 = offset_1;
offset_1 = offset;
offcode = OFFSET_TO_OFFBASE(offset);
mLength = 4;
/* Count the backwards match length. */
while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) {
ip0--;
match0--;
mLength++;
} }
_match: /* Requires: ip0, match0, offcode, matchEnd */
/* Count the forward length. */
assert(matchEnd != 0);
mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart);
ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
ip0 += mLength;
anchor = ip0;
/* write next hash table entry */
if (ip1 < ip0) {
hashTable[hash1] = (U32)(ip1 - base);
}
/* Fill table and check for immediate repcode. */
if (ip0 <= ilimit) {
/* Fill Table */
assert(base+current0+2 > istart); /* check base overflow */
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
while (ip0 <= ilimit) {
U32 const repIndex2 = (U32)(ip0-base) - offset_2;
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */
&& (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
{ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
ip0 += repLength2;
anchor = ip0;
continue;
}
break;
} }
goto _start;
static void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm)
{
const ZSTD_compressionParameters* const cParams = &ms->cParams;
U32* const hashLarge = ms->hashTable;
U32 const hBitsL = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
U32 const mls = cParams->minMatch;
U32* const hashSmall = ms->chainTable;
U32 const hBitsS = cParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
const BYTE* const base = ms->window.base;
const BYTE* ip = base + ms->nextToUpdate;
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
const U32 fastHashFillStep = 3;
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
/* Always insert every fastHashFillStep position into the hash tables.
* Insert the other positions into the large hash table if their entry
* is empty.
*/
for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
U32 const curr = (U32)(ip - base);
U32 i;
for (i = 0; i < fastHashFillStep; ++i) {
size_t const smHashAndTag = ZSTD_hashPtr(ip + i, hBitsS, mls);
size_t const lgHashAndTag = ZSTD_hashPtr(ip + i, hBitsL, 8);
if (i == 0) {
ZSTD_writeTaggedIndex(hashSmall, smHashAndTag, curr + i);
}
if (i == 0 || hashLarge[lgHashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) {
ZSTD_writeTaggedIndex(hashLarge, lgHashAndTag, curr + i);
}
/* Only load extra positions for ZSTD_dtlm_full */
if (dtlm == ZSTD_dtlm_fast)
break;
} }
}
static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms,
} }
} }
}
void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
const void* const end,
ZSTD_dictTableLoadMethod_e dtlm,
ZSTD_tableFillPurpose_e tfp)
{
if (tfp == ZSTD_tfp_forCDict) {
ZSTD_fillDoubleHashTableForCDict(ms, end, dtlm);
} else {
ZSTD_fillDoubleHashTableForCCtx(ms, end, dtlm);
}
}
FORCE_INLINE_TEMPLATE
size_t ZSTD_compressBlock_doubleFast_noDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls /* template */)
{
ZSTD_compressionParameters const* cParams = &ms->cParams;
U32* const hashLong = ms->hashTable;
const U32 hBitsL = cParams->hashLog;
U32* const hashSmall = ms->chainTable;
const U32 hBitsS = cParams->chainLog;
const BYTE* const base = ms->window.base;
const BYTE* const istart = (const BYTE*)src;
const BYTE* anchor = istart;
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
/* presumes that, if there is a dictionary, it must be using Attach mode */
const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
const BYTE* const prefixLowest = base + prefixLowestIndex;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - HASH_READ_SIZE;
U32 offset_1=rep[0], offset_2=rep[1];
U32 offsetSaved1 = 0, offsetSaved2 = 0;
size_t mLength;
U32 offset;
U32 curr;
/* how many positions to search before increasing step size */
const size_t kStepIncr = 1 << kSearchStrength;
/* the position at which to increment the step size if no match is found */
const BYTE* nextStep;
size_t step; /* the current step size */
size_t hl0; /* the long hash at ip */
size_t hl1; /* the long hash at ip1 */
U32 idxl0; /* the long match index for ip */
U32 idxl1; /* the long match index for ip1 */
const BYTE* matchl0; /* the long match for ip */
const BYTE* matchs0; /* the short match for ip */
const BYTE* matchl1; /* the long match for ip1 */
const BYTE* ip = istart; /* the current position */
const BYTE* ip1; /* the next position */
DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
/* init */
ip += ((ip - prefixLowest) == 0);
{
U32 const current = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
U32 const maxRep = current - windowLow;
if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;
}
/* Outer Loop: one iteration per match found and stored */
while (1) {
step = 1;
nextStep = ip + kStepIncr;
ip1 = ip + step;
if (ip1 > ilimit) {
goto _cleanup;
}
hl0 = ZSTD_hashPtr(ip, hBitsL, 8);
idxl0 = hashLong[hl0];
matchl0 = base + idxl0;
/* Inner Loop: one iteration per search / position */
do {
const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls);
const U32 idxs0 = hashSmall[hs0];
curr = (U32)(ip-base);
matchs0 = base + idxs0;
hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */
/* check noDict repcode */
if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
goto _match_stored;
}
hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
if (idxl0 > prefixLowestIndex) {
/* check prefix long match */
if (MEM_read64(matchl0) == MEM_read64(ip)) {
mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
offset = (U32)(ip-matchl0);
while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
goto _match_found;
}
}
idxl1 = hashLong[hl1];
matchl1 = base + idxl1;
if (idxs0 > prefixLowestIndex) {
/* check prefix short match */
if (MEM_read32(matchs0) == MEM_read32(ip)) {
goto _search_next_long;
}
}
if (ip1 >= nextStep) {
PREFETCH_L1(ip1 + 64);
PREFETCH_L1(ip1 + 128);
step++;
nextStep += kStepIncr;
}
ip = ip1;
ip1 += step;
hl0 = hl1;
idxl0 = idxl1;
matchl0 = matchl1;
#if defined(__aarch64__)
PREFETCH_L1(ip+256);
#endif
} while (ip1 <= ilimit);
_cleanup:
/* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
* rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
/* save reps for next block */
rep[0] = offset_1 ? offset_1 : offsetSaved1;
rep[1] = offset_2 ? offset_2 : offsetSaved2;
/* Return the last literals size */
return (size_t)(iend - anchor);
_search_next_long:
/* check prefix long +1 match */
if (idxl1 > prefixLowestIndex) {
if (MEM_read64(matchl1) == MEM_read64(ip1)) {
ip = ip1;
mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
offset = (U32)(ip-matchl1);
while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
goto _match_found;
}
}
/* if no long +1 match, explore the short match we found */
mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
offset = (U32)(ip - matchs0);
while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
/* fall-through */
_match_found: /* requires ip, offset, mLength */
offset_2 = offset_1;
offset_1 = offset;
if (step < 4) {
/* It is unsafe to write this value back to the hashtable when ip1 is
* greater than or equal to the new ip we will have after we're done
* processing this match. Rather than perform that test directly
* (ip1 >= ip + mLength), which costs speed in practice, we do a simpler
* more predictable test. The minmatch even if we take a short match is
* 4 bytes, so as long as step, the distance between ip and ip1
* (initially) is less than 4, we know ip1 < new ip. */
hashLong[hl1] = (U32)(ip1 - base);
}
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
_match_stored:
/* match found */
ip += mLength;
anchor = ip;
if (ip <= ilimit) {
/* Complementary insertion */
/* done after iLimit test, as candidates could be > iend-8 */
{ U32 const indexToInsert = curr+2;
hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert;
hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert;
hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base);
}
/* check immediate repcode */
while ( (ip <= ilimit)
&& ( (offset_2>0)
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
/* store sequence */
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
ip += rLength;
anchor = ip;
continue; /* faster when present ... (?) */
}
}
}
const ZSTD_compressionParameters* const dictCParams =
dictMode == ZSTD_dictMatchState ?
&dms->cParams : NULL;
const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ?
dms->hashTable : NULL;
const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ?
dms->chainTable : NULL;
const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ?
dms->window.dictLimit : 0;
const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
dms->window.base : NULL;
const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ?
dictBase + dictStartIndex : NULL;
const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
dms->window.nextSrc : NULL;
const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
prefixLowestIndex - (U32)(dictEnd - dictBase) :
0;
const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ?
dictCParams->hashLog : hBitsL;
const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ?
dictCParams->chainLog : hBitsS;
const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
const U32* const dictHashLong = dms->hashTable;
const U32* const dictHashSmall = dms->chainTable;
const U32 dictStartIndex = dms->window.dictLimit;
const BYTE* const dictBase = dms->window.base;
const BYTE* const dictStart = dictBase + dictStartIndex;
const BYTE* const dictEnd = dms->window.nextSrc;
const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase);
const U32 dictHBitsL = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
const U32 dictHBitsS = dictCParams->chainLog + ZSTD_SHORT_CACHE_TAG_BITS;
/* if a dictionary is attached, it must be within window range */
if (dictMode == ZSTD_dictMatchState) {
assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex);
if (ms->prefetchCDictTables) {
size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
size_t const chainTableBytes = (((size_t)1) << dictCParams->chainLog) * sizeof(U32);
PREFETCH_AREA(dictHashLong, hashTableBytes)
PREFETCH_AREA(dictHashSmall, chainTableBytes)
if (dictMode == ZSTD_noDict) {
U32 const curr = (U32)(ip - base);
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
U32 const maxRep = curr - windowLow;
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
}
if (dictMode == ZSTD_dictMatchState) {
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
}
/* dictMatchState repCode checks don't currently handle repCode == 0
* disabling. */
assert(offset_1 <= dictAndPrefixLength);
assert(offset_2 <= dictAndPrefixLength);
size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8);
size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls);
size_t const dictHashAndTagL = ZSTD_hashPtr(ip, dictHBitsL, 8);
size_t const dictHashAndTagS = ZSTD_hashPtr(ip, dictHBitsS, mls);
U32 const dictMatchIndexAndTagL = dictHashLong[dictHashAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS];
U32 const dictMatchIndexAndTagS = dictHashSmall[dictHashAndTagS >> ZSTD_SHORT_CACHE_TAG_BITS];
int const dictTagsMatchL = ZSTD_comparePackedTags(dictMatchIndexAndTagL, dictHashAndTagL);
int const dictTagsMatchS = ZSTD_comparePackedTags(dictMatchIndexAndTagS, dictHashAndTagS);
/* check dictMatchState repcode */
if (dictMode == ZSTD_dictMatchState
&& ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
/* check repcode */
if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
ip++;
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
goto _match_stored;
}
/* check noDict repcode */
if ( dictMode == ZSTD_noDict
&& ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
if (dictMode == ZSTD_dictMatchState) {
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState
&& repIndex2 < prefixLowestIndex ?
dictBase + repIndex2 - dictIndexDelta :
base + repIndex2;
if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
ip += repLength2;
anchor = ip;
continue;
}
break;
} }
if (dictMode == ZSTD_noDict) {
while ( (ip <= ilimit)
&& ( (offset_2>0)
& (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
/* store sequence */
size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
ip += rLength;
while (ip <= ilimit) {
U32 const current2 = (U32)(ip-base);
U32 const repIndex2 = current2 - offset_2;
const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
dictBase + repIndex2 - dictIndexDelta :
base + repIndex2;
if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
ip += repLength2;
#define ZSTD_GEN_DFAST_FN(dictMode, mls) \
static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
void const* src, size_t srcSize) \
{ \
return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
}
}
/**
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
* Used to determine the number of bytes required for a given "aligned".
*/
MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
}
/**
* Returns the amount of additional space the cwksp must allocate
* for internal purposes (currently only alignment).
*/
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
/* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
* to align the beginning of tables section, as well as another n_2=[0, 63] bytes
* to align the beginning of the aligned section.
*
* n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
* aligneds being sized in multiples of 64 bytes.
*/
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
return slackSpace;
MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
/**
* Return the number of additional bytes required to align a pointer to the given number of bytes.
* alignBytes must be a power of two.
*/
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
size_t const alignBytesMask = alignBytes - 1;
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
assert((alignBytes & alignBytesMask) == 0);
assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
return bytes;
}
/**
* Internal function. Do not use directly.
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
* which counts from the end of the wksp (as opposed to the object/table segment).
*
* Returns a pointer to the beginning of that space.
*/
MEM_STATIC void*
ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
{
void* const alloc = (BYTE*)ws->allocStart - bytes;
void* const bottom = ws->tableEnd;
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(alloc >= bottom);
if (alloc < bottom) {
DEBUGLOG(4, "cwksp: alloc failed!");
ws->allocFailed = 1;
return NULL;
}
/* the area is reserved from the end of wksp.
* If it overlaps with tableValidEnd, it voids guarantees on values' range */
if (alloc < ws->tableValidEnd) {
ws->tableValidEnd = alloc;
}
ws->allocStart = alloc;
return alloc;
}
/**
* Moves the cwksp to the next phase, and does any necessary allocations.
* cwksp initialization must necessarily go through each phase in order.
* Returns a 0 on success, or zstd error
*/
MEM_STATIC size_t
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
{
/* If unaligned allocations down from a too-large top have left us
* unaligned, we need to realign our alloc ptr. Technically, this
* can consume space that is unaccounted for in the neededSpace
* calculation. However, I believe this can only happen when the
* workspace is too large, and specifically when it is too large
* by a larger margin than the space that will be consumed. */
/* TODO: cleaner, compiler warning friendly way to do this??? */
ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
if (ws->allocStart < ws->tableValidEnd) {
ws->tableValidEnd = ws->allocStart;
{ /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
size_t const bytesToAlign =
ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
memory_allocation, "aligned phase - alignment initial allocation failed!");
}
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
void* const alloc = ws->objectEnd;
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
void* const objectEnd = (BYTE*)alloc + bytesToAlign;
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
"table phase - alignment initial allocation failed!");
ws->objectEnd = objectEnd;
ws->tableEnd = objectEnd; /* table area starts being empty */
if (ws->tableValidEnd < ws->tableEnd) {
ws->tableValidEnd = ws->tableEnd;
} } }
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
ZSTD_cwksp_assert_internal_consistency(ws);
assert(alloc >= bottom);
if (alloc < bottom) {
DEBUGLOG(4, "cwksp: alloc failed!");
ws->allocFailed = 1;
return NULL;
}
if (alloc < ws->tableValidEnd) {
ws->tableValidEnd = alloc;
}
ws->allocStart = alloc;
alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
__asan_unpoison_memory_region(alloc, bytes);
if (alloc) {
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
/* We need to keep the redzone poisoned while unpoisoning the bytes that
* are actually allocated. */
__asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);
}
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
assert((bytes & (sizeof(U32)-1)) == 0);
return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
{
void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
ZSTD_cwksp_alloc_aligned);
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
return ptr;
void* alloc = ws->tableEnd;
void* end = (BYTE *)alloc + bytes;
void* top = ws->allocStart;
void* alloc;
void* end;
void* top;
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
return NULL;
}
alloc = ws->tableEnd;
end = (BYTE *)alloc + bytes;
top = ws->allocStart;
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
{
size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
/* ZSTD_alignmentSpaceWithinBounds() :
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
* actual amount of space used.
*/
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
size_t const estimatedSpace, int resizedWorkspace) {
if (resizedWorkspace) {
/* Resized/newly allocated wksp should have exact bounds */
return ZSTD_cwksp_used(ws) == estimatedSpace;
} else {
/* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
* than estimatedSpace. See the comments in zstd_cwksp.h for details.
*/
return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
}
}
/*-*************************************
* Superblock entropy buffer structs
***************************************/
/** ZSTD_hufCTablesMetadata_t :
* Stores Literals Block Type for a super-block in hType, and
* huffman tree description in hufDesBuffer.
* hufDesSize refers to the size of huffman tree description in bytes.
* This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */
typedef struct {
symbolEncodingType_e hType;
BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
size_t hufDesSize;
} ZSTD_hufCTablesMetadata_t;
/** ZSTD_fseCTablesMetadata_t :
* Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
* fse tables in fseTablesBuffer.
* fseTablesSize refers to the size of fse tables in bytes.
* This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */
typedef struct {
symbolEncodingType_e llType;
symbolEncodingType_e ofType;
symbolEncodingType_e mlType;
BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
size_t fseTablesSize;
size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */
} ZSTD_fseCTablesMetadata_t;
typedef struct {
ZSTD_hufCTablesMetadata_t hufMetadata;
ZSTD_fseCTablesMetadata_t fseMetadata;
} ZSTD_entropyCTablesMetadata_t;
/** ZSTD_buildSuperBlockEntropy_literal() :
* Builds entropy for the super-block literals.
* Stores literals block type (raw, rle, compressed, repeat) and
* huffman description table to hufMetadata.
* @return : size of huffman description table or error code */
static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize,
const ZSTD_hufCTables_t* prevHuf,
ZSTD_hufCTables_t* nextHuf,
ZSTD_hufCTablesMetadata_t* hufMetadata,
const int disableLiteralsCompression,
void* workspace, size_t wkspSize)
{
BYTE* const wkspStart = (BYTE*)workspace;
BYTE* const wkspEnd = wkspStart + wkspSize;
BYTE* const countWkspStart = wkspStart;
unsigned* const countWksp = (unsigned*)workspace;
const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
BYTE* const nodeWksp = countWkspStart + countWkspSize;
const size_t nodeWkspSize = wkspEnd-nodeWksp;
unsigned maxSymbolValue = 255;
unsigned huffLog = HUF_TABLELOG_DEFAULT;
HUF_repeat repeat = prevHuf->repeatMode;
DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize);
/* Prepare nextEntropy assuming reusing the existing table */
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
if (disableLiteralsCompression) {
DEBUGLOG(5, "set_basic - disabled");
hufMetadata->hType = set_basic;
return 0;
}
/* small ? don't even attempt compression (speed opt) */
# define COMPRESS_LITERALS_SIZE_MIN 63
{ size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
if (srcSize <= minLitSize) {
DEBUGLOG(5, "set_basic - too small");
hufMetadata->hType = set_basic;
return 0;
}
}
/* Scan input and build symbol stats */
{ size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize);
FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
if (largest == srcSize) {
DEBUGLOG(5, "set_rle");
hufMetadata->hType = set_rle;
return 0;
}
if (largest <= (srcSize >> 7)+4) {
DEBUGLOG(5, "set_basic - no gain");
hufMetadata->hType = set_basic;
return 0;
}
}
/* Validate the previous Huffman table */
if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
repeat = HUF_repeat_none;
}
/* Build Huffman Tree */
ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
maxSymbolValue, huffLog,
nodeWksp, nodeWkspSize);
FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
huffLog = (U32)maxBits;
{ /* Build and write the CTable */
size_t const newCSize = HUF_estimateCompressedSize(
(HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
size_t const hSize = HUF_writeCTable(
hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
(HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog);
/* Check against repeating the previous CTable */
if (repeat != HUF_repeat_none) {
size_t const oldCSize = HUF_estimateCompressedSize(
(HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
DEBUGLOG(5, "set_repeat - smaller");
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
hufMetadata->hType = set_repeat;
return 0;
}
}
if (newCSize + hSize >= srcSize) {
DEBUGLOG(5, "set_basic - no gains");
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
hufMetadata->hType = set_basic;
return 0;
}
DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
hufMetadata->hType = set_compressed;
nextHuf->repeatMode = HUF_repeat_check;
return hSize;
}
}
}
/** ZSTD_buildSuperBlockEntropy_sequences() :
* Builds entropy for the super-block sequences.
* Stores symbol compression modes and fse table to fseMetadata.
* @return : size of fse tables or error code */
static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr,
const ZSTD_fseCTables_t* prevEntropy,
ZSTD_fseCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
ZSTD_fseCTablesMetadata_t* fseMetadata,
void* workspace, size_t wkspSize)
{
BYTE* const wkspStart = (BYTE*)workspace;
BYTE* const wkspEnd = wkspStart + wkspSize;
BYTE* const countWkspStart = wkspStart;
unsigned* const countWksp = (unsigned*)workspace;
const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned);
BYTE* const cTableWksp = countWkspStart + countWkspSize;
const size_t cTableWkspSize = wkspEnd-cTableWksp;
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
const BYTE* const mlCodeTable = seqStorePtr->mlCode;
size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
BYTE* const ostart = fseMetadata->fseTablesBuffer;
BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
BYTE* op = ostart;
assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE));
DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq);
ZSTD_memset(workspace, 0, wkspSize);
fseMetadata->lastCountSize = 0;
/* convert length/distances into codes */
ZSTD_seqToCodes(seqStorePtr);
/* build CTable for Literal Lengths */
{ U32 LLtype;
unsigned max = MaxLL;
size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
DEBUGLOG(5, "Building LL table");
nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
countWksp, max, mostFrequent, nbSeq,
LLFSELog, prevEntropy->litlengthCTable,
LL_defaultNorm, LL_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(set_basic < set_compressed && set_rle < set_compressed);
assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL,
prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable),
cTableWksp, cTableWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
if (LLtype == set_compressed)
fseMetadata->lastCountSize = countSize;
op += countSize;
fseMetadata->llType = (symbolEncodingType_e) LLtype;
} }
/* build CTable for Offsets */
{ U32 Offtype;
unsigned max = MaxOff;
size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
/* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
DEBUGLOG(5, "Building OF table");
nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
countWksp, max, mostFrequent, nbSeq,
OffFSELog, prevEntropy->offcodeCTable,
OF_defaultNorm, OF_defaultNormLog,
defaultPolicy, strategy);
assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable),
cTableWksp, cTableWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
if (Offtype == set_compressed)
fseMetadata->lastCountSize = countSize;
op += countSize;
fseMetadata->ofType = (symbolEncodingType_e) Offtype;
} }
/* build CTable for MatchLengths */
{ U32 MLtype;
unsigned max = MaxML;
size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */
DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
countWksp, max, mostFrequent, nbSeq,
MLFSELog, prevEntropy->matchlengthCTable,
ML_defaultNorm, ML_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML,
prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable),
cTableWksp, cTableWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
if (MLtype == set_compressed)
fseMetadata->lastCountSize = countSize;
op += countSize;
fseMetadata->mlType = (symbolEncodingType_e) MLtype;
} }
assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer));
return op-ostart;
}
/** ZSTD_buildSuperBlockEntropy() :
* Builds entropy for the super-block.
* @return : 0 on success or error code */
static size_t
ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
ZSTD_entropyCTablesMetadata_t* entropyMetadata,
void* workspace, size_t wkspSize)
{
size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart;
DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy");
entropyMetadata->hufMetadata.hufDesSize =
ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize,
&prevEntropy->huf, &nextEntropy->huf,
&entropyMetadata->hufMetadata,
ZSTD_disableLiteralsCompression(cctxParams),
workspace, wkspSize);
FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed");
entropyMetadata->fseMetadata.fseTablesSize =
ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr,
&prevEntropy->fse, &nextEntropy->fse,
cctxParams,
&entropyMetadata->fseMetadata,
workspace, wkspSize);
FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed");
return 0;
}
static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
const ZSTD_hufCTablesMetadata_t* hufMetadata,
const BYTE* literals, size_t litSize,
void* dst, size_t dstSize,
const int bmi2, int writeEntropy, int* entropyWritten)
static size_t
ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
const ZSTD_hufCTablesMetadata_t* hufMetadata,
const BYTE* literals, size_t litSize,
void* dst, size_t dstSize,
const int bmi2, int writeEntropy, int* entropyWritten)
/* TODO bmi2 */
{ const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
: HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
{ int const flags = bmi2 ? HUF_flags_bmi2 : 0;
const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable, flags)
: HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable, flags);
static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
static size_t
ZSTD_seqDecompressedSize(seqStore_t const* seqStore,
const seqDef* sequences, size_t nbSeq,
size_t litSize, int lastSequence)
{
static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
const ZSTD_fseCTablesMetadata_t* fseMetadata,
const seqDef* sequences, size_t nbSeq,
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
const int bmi2, int writeEntropy, int* entropyWritten)
static size_t
ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
const ZSTD_fseCTablesMetadata_t* fseMetadata,
const seqDef* sequences, size_t nbSeq,
const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
const int bmi2, int writeEntropy, int* entropyWritten)
assert(entropyWorkspaceSize >= FSE_BUILD_CTABLE_WORKSPACE_SIZE(MaxSeq, MaxFSELog));
FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "");
{ size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
(void)entropyWorkspaceSize;
FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed");
assert(oend >= op);
{ size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize), "");
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed");
size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
ZSTD_hufCTables_t* nextHuf,
ZSTD_strategy strategy, int disableLiteralCompression,
void* dst, size_t dstCapacity,
/* ZSTD_compressLiterals():
* @entropyWorkspace: must be aligned on 4-bytes boundaries
* @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE
* @suspectUncompressible: sampling checks, to potentially skip huffman coding
*/
size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity,
/* **************************************************************
* Debug Traces
****************************************************************/
#if DEBUGLEVEL >= 2
static size_t showHexa(const void* src, size_t srcSize)
{
const BYTE* const ip = (const BYTE*)src;
size_t u;
for (u=0; u<srcSize; u++) {
RAWLOG(5, " %02X", ip[u]); (void)ip;
}
RAWLOG(5, " \n");
return srcSize;
}
#endif
size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
ZSTD_hufCTables_t* nextHuf,
ZSTD_strategy strategy, int disableLiteralCompression,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
void* entropyWorkspace, size_t entropyWorkspaceSize,
const int bmi2)
/* ZSTD_minLiteralsToCompress() :
* returns minimal amount of literals
* for literal compression to even be attempted.
* Minimum is made tighter as compression strategy increases.
*/
static size_t
ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat)
{
assert((int)strategy >= 0);
assert((int)strategy <= 9);
/* btultra2 : min 8 bytes;
* then 2x larger for each successive compression strategy
* max threshold 64 bytes */
{ int const shift = MIN(9-(int)strategy, 3);
size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift;
DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc);
return mintc;
}
}
size_t ZSTD_compressLiterals (
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
void* entropyWorkspace, size_t entropyWorkspaceSize,
const ZSTD_hufCTables_t* prevHuf,
ZSTD_hufCTables_t* nextHuf,
ZSTD_strategy strategy,
int disableLiteralCompression,
int suspectUncompressible,
int bmi2)
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
disableLiteralCompression, (U32)srcSize);
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)",
disableLiteralCompression, (U32)srcSize, dstCapacity);
DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize));
/* small ? don't even attempt compression (speed opt) */
# define COMPRESS_LITERALS_SIZE_MIN 63
{ size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
/* if too small, don't even attempt compression (speed opt) */
if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode))
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
int const flags = 0
| (bmi2 ? HUF_flags_bmi2 : 0)
| (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0)
| (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0)
| (suspectUncompressible ? HUF_flags_suspectUncompressible : 0);
typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int);
huf_compress_f huf_compress;
cLitSize = singleStream ?
HUF_compress1X_repeat(
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
HUF_compress4X_repeat(
ostart+lhSize, dstCapacity-lhSize, src, srcSize,
HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat;
cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize,
src, srcSize,
HUF_SYMBOLVALUE_MAX, LitHufLog,
entropyWorkspace, entropyWorkspaceSize,
(HUF_CElt*)nextHuf->CTable,
&repeat, flags);
DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize);
if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
}
{ size_t const minGain = ZSTD_minGain(srcSize, strategy);
if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
} }
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
}
/* A return value of 1 signals that the alphabet consists of a single symbol.
* However, in some rare circumstances, it could be the compressed size (a single byte).
* For that outcome to have a chance to happen, it's necessary that `srcSize < 8`.
* (it's also necessary to not generate statistics).
* Therefore, in such a case, actively check that all bytes are identical. */
if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) {
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
} }
{ U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
{ U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
/***********************************************
* Entropy buffer statistics structs and funcs *
***********************************************/
/** ZSTD_hufCTablesMetadata_t :
* Stores Literals Block Type for a super-block in hType, and
* huffman tree description in hufDesBuffer.
* hufDesSize refers to the size of huffman tree description in bytes.
* This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */
typedef struct {
symbolEncodingType_e hType;
BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
size_t hufDesSize;
} ZSTD_hufCTablesMetadata_t;
/** ZSTD_fseCTablesMetadata_t :
* Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
* fse tables in fseTablesBuffer.
* fseTablesSize refers to the size of fse tables in bytes.
* This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */
typedef struct {
symbolEncodingType_e llType;
symbolEncodingType_e ofType;
symbolEncodingType_e mlType;
BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
size_t fseTablesSize;
size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
} ZSTD_fseCTablesMetadata_t;
U32 off; /* Offset code (offset + ZSTD_REP_MOVE) for the match */
ZSTD_hufCTablesMetadata_t hufMetadata;
ZSTD_fseCTablesMetadata_t fseMetadata;
} ZSTD_entropyCTablesMetadata_t;
/** ZSTD_buildBlockEntropyStats() :
* Builds entropy for the block.
* @return : 0 on success or error code */
size_t ZSTD_buildBlockEntropyStats(
const seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
ZSTD_entropyCTablesMetadata_t* entropyMetadata,
void* workspace, size_t wkspSize);
/*********************************
* Compression internals structs *
*********************************/
typedef struct {
U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */
BYTE const* nextSrc; /* next block here to continue on current prefix */
BYTE const* base; /* All regular indexes relative to this position */
BYTE const* dictBase; /* extDict indexes relative to this position */
U32 dictLimit; /* below that point, need extDict */
U32 lowLimit; /* below that point, no more valid data */
BYTE const* nextSrc; /* next block here to continue on current prefix */
BYTE const* base; /* All regular indexes relative to this position */
BYTE const* dictBase; /* extDict indexes relative to this position */
U32 dictLimit; /* below that point, need extDict */
U32 lowLimit; /* below that point, no more valid data */
U32 nbOverflowCorrections; /* Number of times overflow correction has run since
* ZSTD_window_init(). Useful for debugging coredumps
* and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY.
*/
U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/
U16* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */
/* Controls prefetching in some dictMatchState matchfinders */
ZSTD_paramSwitch_e prefetchCDictTables;
/* Controls whether zstd will fall back to an internal matchfinder
* if the external matchfinder returns an error code. */
int enableMatchFinderFallback;
/* Indicates whether an external matchfinder has been referenced.
* Users can't set this externally.
* It is set internally in ZSTD_registerSequenceProducer(). */
int useSequenceProducer;
/* Adjust the max block size*/
size_t maxBlockSize;
/* Controls repcode search in external sequence parsing */
ZSTD_paramSwitch_e searchForExternalRepcodes;
/**
* Struct that contains all elements of block splitter that should be allocated
* in a wksp.
*/
#define ZSTD_MAX_NB_BLOCK_SPLITS 196
typedef struct {
seqStore_t fullSeqStoreChunk;
seqStore_t firstHalfSeqStore;
seqStore_t secondHalfSeqStore;
seqStore_t currSeqStore;
seqStore_t nextSeqStore;
U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS];
ZSTD_entropyCTablesMetadata_t entropyMetadata;
} ZSTD_blockSplitCtx;
/* Context for block-level external matchfinder API */
typedef struct {
void* mState;
ZSTD_sequenceProducer_F* mFinder;
ZSTD_Sequence* seqBuffer;
size_t seqBufferCapacity;
} ZSTD_externalMatchCtx;
}
typedef struct repcodes_s {
U32 rep[3];
} repcodes_t;
MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
{
repcodes_t newReps;
if (offset >= ZSTD_REP_NUM) { /* full offset */
newReps.rep[2] = rep[1];
newReps.rep[1] = rep[0];
newReps.rep[0] = offset - ZSTD_REP_MOVE;
} else { /* repcode */
U32 const repCode = offset + ll0;
if (repCode > 0) { /* note : if repCode==0, no change */
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
newReps.rep[1] = rep[0];
newReps.rep[0] = currentOffset;
} else { /* repCode == 0 */
ZSTD_memcpy(&newReps, rep, sizeof(newReps));
}
}
return newReps;
#define REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1)
#define REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2)
#define REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3)
#define REPCODE_TO_OFFBASE(r) (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */
#define OFFSET_TO_OFFBASE(o) (assert((o)>0), o + ZSTD_REP_NUM)
#define OFFBASE_IS_OFFSET(o) ((o) > ZSTD_REP_NUM)
#define OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM)
#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM)
#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */
* Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
* `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
* `mlBase` : matchLength - MINMATCH
* Allowed to overread literals up to litLimit.
* Store a sequence (litlen, litPtr, offBase and matchLength) into seqStore_t.
* @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE().
* @matchLength : must be >= MINMATCH
* Allowed to over-read literals up to litLimit.
HINT_INLINE UNUSED_ATTR
void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
HINT_INLINE UNUSED_ATTR void
ZSTD_storeSeq(seqStore_t* seqStorePtr,
size_t litLength, const BYTE* literals, const BYTE* litLimit,
U32 offBase,
size_t matchLength)
DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offBase%7u",
pos, (U32)litLength, (U32)matchLength, (U32)offBase);
assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
seqStorePtr->longLengthID = 1;
assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
seqStorePtr->longLengthType = ZSTD_llt_literalLength;
if (mlBase>0xFFFF) {
assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */
seqStorePtr->longLengthID = 2;
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
assert(matchLength >= MINMATCH);
{ size_t const mlBase = matchLength - MINMATCH;
if (mlBase>0xFFFF) {
assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
seqStorePtr->longLengthType = ZSTD_llt_matchLength;
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
}
seqStorePtr->sequences[0].mlBase = (U16)mlBase;
/* ZSTD_updateRep() :
* updates in-place @rep (array of repeat offsets)
* @offBase : sum-type, using numeric representation of ZSTD_storeSeq()
*/
MEM_STATIC void
ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
{
if (OFFBASE_IS_OFFSET(offBase)) { /* full offset */
rep[2] = rep[1];
rep[1] = rep[0];
rep[0] = OFFBASE_TO_OFFSET(offBase);
} else { /* repcode */
U32 const repCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0;
if (repCode > 0) { /* note : if repCode==0, no change */
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
rep[2] = (repCode >= 2) ? rep[1] : rep[2];
rep[1] = rep[0];
rep[0] = currentOffset;
} else { /* repCode == 0 */
/* nothing to do */
}
}
}
/*-*************************************
* Match length counter
***************************************/
static unsigned ZSTD_NbCommonBytes (size_t val)
typedef struct repcodes_s {
U32 rep[3];
} repcodes_t;
MEM_STATIC repcodes_t
ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
if (MEM_isLittleEndian()) {
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
# if STATIC_BMI2
return _tzcnt_u64(val) >> 3;
# else
unsigned long r = 0;
return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0;
# endif
# elif defined(__GNUC__) && (__GNUC__ >= 4)
return (__builtin_ctzll((U64)val) >> 3);
# else
static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
0, 3, 1, 3, 1, 4, 2, 7,
0, 2, 3, 6, 1, 5, 3, 5,
1, 3, 4, 4, 2, 5, 6, 7,
7, 0, 1, 2, 3, 3, 4, 6,
2, 6, 5, 5, 3, 4, 5, 6,
7, 1, 2, 4, 6, 4, 4, 5,
7, 2, 6, 5, 7, 6, 7, 7 };
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
# endif
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r=0;
return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_ctz((U32)val) >> 3);
# else
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
3, 2, 2, 1, 3, 2, 0, 1,
3, 3, 1, 2, 2, 2, 2, 0,
3, 1, 2, 0, 1, 0, 1, 1 };
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
# endif
}
} else { /* Big Endian CPU */
if (MEM_64bits()) {
# if defined(_MSC_VER) && defined(_WIN64)
# if STATIC_BMI2
return _lzcnt_u64(val) >> 3;
# else
unsigned long r = 0;
return _BitScanReverse64(&r, (U64)val) ? (unsigned)(r >> 3) : 0;
# endif
# elif defined(__GNUC__) && (__GNUC__ >= 4)
return (__builtin_clzll(val) >> 3);
# else
unsigned r;
const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
r += (!val);
return r;
# endif
} else { /* 32 bits */
# if defined(_MSC_VER)
unsigned long r = 0;
return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3)
return (__builtin_clz((U32)val) >> 3);
# else
unsigned r;
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
r += (!val);
return r;
# endif
} }
repcodes_t newReps;
ZSTD_memcpy(&newReps, rep, sizeof(newReps));
ZSTD_updateRep(newReps.rep, offBase, ll0);
return newReps;
static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
static U32 ZSTD_hash4(U32 u, U32 h) { assert(h <= 32); return (u * prime4bytes) >> (32-h) ; }
static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h); }
}
/* Defining this macro to non-zero tells zstd to run the overflow correction
* code much more frequently. This is very inefficient, and should only be
* used for tests and fuzzers.
*/
#ifndef ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY
# ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 1
# else
# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0
# endif
#endif
/**
* ZSTD_window_canOverflowCorrect():
* Returns non-zero if the indices are large enough for overflow correction
* to work correctly without impacting compression ratio.
*/
MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window,
U32 cycleLog,
U32 maxDist,
U32 loadedDictEnd,
void const* src)
{
U32 const cycleSize = 1u << cycleLog;
U32 const curr = (U32)((BYTE const*)src - window.base);
U32 const minIndexToOverflowCorrect = cycleSize
+ MAX(maxDist, cycleSize)
+ ZSTD_WINDOW_START_INDEX;
/* Adjust the min index to backoff the overflow correction frequency,
* so we don't waste too much CPU in overflow correction. If this
* computation overflows we don't really care, we just need to make
* sure it is at least minIndexToOverflowCorrect.
*/
U32 const adjustment = window.nbOverflowCorrections + 1;
U32 const adjustedIndex = MAX(minIndexToOverflowCorrect * adjustment,
minIndexToOverflowCorrect);
U32 const indexLargeEnough = curr > adjustedIndex;
/* Only overflow correct early if the dictionary is invalidated already,
* so we don't hurt compression ratio.
*/
U32 const dictionaryInvalidated = curr > maxDist + loadedDictEnd;
return indexLargeEnough && dictionaryInvalidated;
U32 const currentCycle0 = curr & cycleMask;
/* Exclude zero so that newCurrent - maxDist >= 1. */
U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0;
U32 const newCurrent = currentCycle1 + maxDist;
U32 const currentCycle = curr & cycleMask;
/* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */
U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX
? MAX(cycleSize, ZSTD_WINDOW_START_INDEX)
: 0;
U32 const newCurrent = currentCycle
+ currentCycleCorrection
+ MAX(maxDist, cycleSize);
assert((maxDist & cycleMask) == 0);
/* maxDist must be a power of two so that:
* (newCurrent & cycleMask) == (curr & cycleMask)
* This is required to not corrupt the chains / binary tree.
*/
assert((maxDist & (maxDist - 1)) == 0);
assert((curr & cycleMask) == (newCurrent & cycleMask));
if (window->lowLimit <= correction) window->lowLimit = 1;
else window->lowLimit -= correction;
if (window->dictLimit <= correction) window->dictLimit = 1;
else window->dictLimit -= correction;
if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) {
window->lowLimit = ZSTD_WINDOW_START_INDEX;
} else {
window->lowLimit -= correction;
}
if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) {
window->dictLimit = ZSTD_WINDOW_START_INDEX;
} else {
window->dictLimit -= correction;
}
*
* We also have to invalidate the dictionary if ZSTD_window_update() has detected
* non-contiguous segments, which means that loadedDictEnd != window->dictLimit.
* loadedDictEnd may be 0, if forceWindow is true, but in that case we never use
* dictMatchState, so setting it to NULL is not a problem.
window->base = (BYTE const*)"";
window->dictBase = (BYTE const*)"";
window->dictLimit = 1; /* start from 1, so that 1st position is valid */
window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */
window->nextSrc = window->base + 1; /* see issue #1241 */
window->base = (BYTE const*)" ";
window->dictBase = (BYTE const*)" ";
ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */
window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */
window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */
window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */
window->nbOverflowCorrections = 0;
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.lowLimit;
U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
U32 const isDictionary = (ms->loadedDictEnd != 0);
U32 const maxDistance = 1U << windowLog;
U32 const lowestValid = ms->window.lowLimit;
U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
U32 const isDictionary = (ms->loadedDictEnd != 0);
/* Short Cache */
/* Normally, zstd matchfinders follow this flow:
* 1. Compute hash at ip
* 2. Load index from hashTable[hash]
* 3. Check if *ip == *(base + index)
* In dictionary compression, loading *(base + index) is often an L2 or even L3 miss.
*
* Short cache is an optimization which allows us to avoid step 3 most of the time
* when the data doesn't actually match. With short cache, the flow becomes:
* 1. Compute (hash, currentTag) at ip. currentTag is an 8-bit independent hash at ip.
* 2. Load (index, matchTag) from hashTable[hash]. See ZSTD_writeTaggedIndex to understand how this works.
* 3. Only if currentTag == matchTag, check *ip == *(base + index). Otherwise, continue.
*
* Currently, short cache is only implemented in CDict hashtables. Thus, its use is limited to
* dictMatchState matchfinders.
*/
#define ZSTD_SHORT_CACHE_TAG_BITS 8
#define ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1)
/* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable.
* Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */
MEM_STATIC void ZSTD_writeTaggedIndex(U32* const hashTable, size_t hashAndTag, U32 index) {
size_t const hash = hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
U32 const tag = (U32)(hashAndTag & ZSTD_SHORT_CACHE_TAG_MASK);
assert(index >> (32 - ZSTD_SHORT_CACHE_TAG_BITS) == 0);
hashTable[hash] = (index << ZSTD_SHORT_CACHE_TAG_BITS) | tag;
}
/* Helper function for short cache matchfinders.
* Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */
MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) {
U32 const tag1 = packedTag1 & ZSTD_SHORT_CACHE_TAG_MASK;
U32 const tag2 = packedTag2 & ZSTD_SHORT_CACHE_TAG_MASK;
return tag1 == tag2;
}
/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
* ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
* Note that the block delimiter must include the last literals of the block.
*/
size_t
ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
ZSTD_sequencePosition* seqPos,
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
/* Returns the number of bytes to move the current read position back by.
* Only non-zero if we ended up splitting a sequence.
* Otherwise, it may return a ZSTD error if something went wrong.
*
* This function will attempt to scan through blockSize bytes
* represented by the sequences in @inSeqs,
* storing any (partial) sequences.
*
* Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
* avoid splitting a match, or to avoid splitting a match such that it would produce a match
* smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
*/
size_t
ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
/*!
* ZSTD_HASHLOG3_MAX :
* Maximum size of the hash table dedicated to find 3-bytes matches,
* in log format, aka 17 => 1 << 17 == 128Ki positions.
* This structure is only used in zstd_opt.
* Since allocation is centralized for all strategies, it has to be known here.
* The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3,
* so that zstd_opt.c doesn't need to know about this constant.
*/
#ifndef ZSTD_HASHLOG3_MAX
# define ZSTD_HASHLOG3_MAX 17
#endif
* Note that the result from this function is only compatible with the "normal"
* full-block strategy.
* When there are a lot of small blocks due to frequent flush in streaming mode
* the overhead of headers can make the compressed data to be larger than the
* return value of ZSTD_compressBound().
* Note that the result from this function is only valid for
* the one-pass compression functions.
* When employing the streaming mode,
* if flushes are frequently altering the size of blocks,
* the overhead from block headers can make the compressed data larger
* than the return value of ZSTD_compressBound().
/* Returns 1 if compression parameters are such that we should
/* Returns true if the strategy supports using a row based matchfinder */
static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2);
}
/* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder
* for this compression.
*/
static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) {
assert(mode != ZSTD_ps_auto);
return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable);
}
/* Returns row matchfinder usage given an initial mode and cParams */
static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON)
int const kHasSIMD128 = 1;
#else
int const kHasSIMD128 = 0;
#endif
if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */
mode = ZSTD_ps_disable;
if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode;
if (kHasSIMD128) {
if (cParams->windowLog > 14) mode = ZSTD_ps_enable;
} else {
if (cParams->windowLog > 17) mode = ZSTD_ps_enable;
}
return mode;
}
/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */
static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
if (mode != ZSTD_ps_auto) return mode;
return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable;
}
/* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */
static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
const ZSTD_paramSwitch_e useRowMatchFinder,
const U32 forDDSDict) {
assert(useRowMatchFinder != ZSTD_ps_auto);
/* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate.
* We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder.
*/
return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder));
}
/* Returns ZSTD_ps_enable if compression parameters are such that we should
static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) {
return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27;
static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
const ZSTD_compressionParameters* const cParams) {
if (mode != ZSTD_ps_auto) return mode;
return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;
}
static int ZSTD_resolveExternalSequenceValidation(int mode) {
return mode;
}
/* Resolves maxBlockSize to the default if no value is present. */
static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) {
if (maxBlockSize == 0) {
return ZSTD_BLOCKSIZE_MAX;
} else {
return maxBlockSize;
}
}
static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_paramSwitch_e value, int cLevel) {
if (value != ZSTD_ps_auto) return value;
if (cLevel < 10) {
return ZSTD_ps_disable;
} else {
return ZSTD_ps_enable;
}
}
/* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged.
* If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */
static int ZSTD_CDictIndicesAreTagged(const ZSTD_compressionParameters* const cParams) {
return cParams->strategy == ZSTD_fast || cParams->strategy == ZSTD_dfast;
if (ZSTD_CParams_shouldEnableLdm(&cParams)) {
DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params");
cctxParams.ldmParams.enableLdm = 1;
/* LDM is enabled by default for optimal parser and window size >= 128MB */
/* Adjust advanced params according to cParams */
cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams);
if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) {
cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams);
cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences);
cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize);
cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes,
cctxParams.compressionLevel);
/**
* Initializes `cctxParams` from `params` and `compressionLevel`.
* @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
*/
static void
ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams,
const ZSTD_parameters* params,
int compressionLevel)
{
assert(!ZSTD_checkCParams(params->cParams));
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
cctxParams->cParams = params->cParams;
cctxParams->fParams = params->fParams;
/* Should not matter, as all cParams are presumed properly defined.
* But, set it for tracing anyway.
*/
cctxParams->compressionLevel = compressionLevel;
cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams);
cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams);
cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams);
cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences);
cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize);
cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel);
DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d",
cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm);
}
ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
assert(!ZSTD_checkCParams(params.cParams));
cctxParams->cParams = params.cParams;
cctxParams->fParams = params.fParams;
cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
ZSTD_CCtxParams_init_internal(cctxParams, ¶ms, ZSTD_NO_CLEVEL);
/* ZSTD_assignParamsToCCtxParams() :
* params is presumed valid at this stage */
static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams(
const ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
/**
* Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
* @param params Validated zstd parameters.
*/
static void ZSTD_CCtxParams_setZstdParams(
ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
ret.cParams = params->cParams;
ret.fParams = params->fParams;
ret.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */
return ret;
cctxParams->cParams = params->cParams;
cctxParams->fParams = params->fParams;
/* Should not matter, as all cParams are presumed properly defined.
* But, set it for tracing anyway.
*/
cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed);
bounds.lowerBound = ZSTD_lcm_auto;
bounds.upperBound = ZSTD_lcm_uncompressed;
ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable);
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_useBlockSplitter:
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
case ZSTD_c_useRowMatchFinder:
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
case ZSTD_c_deterministicRefPrefix:
case ZSTD_c_prefetchCDictTables:
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
case ZSTD_c_enableSeqProducerFallback:
bounds.lowerBound = 0;
bounds.upperBound = 1;
return bounds;
case ZSTD_c_maxBlockSize:
bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN;
bounds.upperBound = ZSTD_BLOCKSIZE_MAX;
return bounds;
case ZSTD_c_searchForExternalRepcodes:
bounds.lowerBound = (int)ZSTD_ps_auto;
bounds.upperBound = (int)ZSTD_ps_disable;
return bounds;
RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN,
parameter_outOfBound, "Param out of bounds!");
CCtxParams->ldmParams.hashRateLog = value;
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_ldmHashRateLog, value);
CCtxParams->ldmParams.hashRateLog = (U32)value;
case ZSTD_c_useBlockSplitter:
BOUNDCHECK(ZSTD_c_useBlockSplitter, value);
CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value;
return CCtxParams->useBlockSplitter;
case ZSTD_c_useRowMatchFinder:
BOUNDCHECK(ZSTD_c_useRowMatchFinder, value);
CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value;
return CCtxParams->useRowMatchFinder;
case ZSTD_c_deterministicRefPrefix:
BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);
CCtxParams->deterministicRefPrefix = !!value;
return CCtxParams->deterministicRefPrefix;
case ZSTD_c_prefetchCDictTables:
BOUNDCHECK(ZSTD_c_prefetchCDictTables, value);
CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value;
return CCtxParams->prefetchCDictTables;
case ZSTD_c_enableSeqProducerFallback:
BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value);
CCtxParams->enableMatchFinderFallback = value;
return CCtxParams->enableMatchFinderFallback;
case ZSTD_c_maxBlockSize:
if (value!=0) /* 0 ==> default */
BOUNDCHECK(ZSTD_c_maxBlockSize, value);
CCtxParams->maxBlockSize = value;
return CCtxParams->maxBlockSize;
case ZSTD_c_searchForExternalRepcodes:
BOUNDCHECK(ZSTD_c_searchForExternalRepcodes, value);
CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value;
return CCtxParams->searchForExternalRepcodes;
case ZSTD_c_useBlockSplitter :
*value = (int)CCtxParams->useBlockSplitter;
break;
case ZSTD_c_useRowMatchFinder :
*value = (int)CCtxParams->useRowMatchFinder;
break;
case ZSTD_c_deterministicRefPrefix:
*value = (int)CCtxParams->deterministicRefPrefix;
break;
case ZSTD_c_prefetchCDictTables:
*value = (int)CCtxParams->prefetchCDictTables;
break;
case ZSTD_c_enableSeqProducerFallback:
*value = CCtxParams->enableMatchFinderFallback;
break;
case ZSTD_c_maxBlockSize:
*value = (int)CCtxParams->maxBlockSize;
break;
case ZSTD_c_searchForExternalRepcodes:
*value = (int)CCtxParams->searchForExternalRepcodes;
break;
ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams)
{
DEBUGLOG(4, "ZSTD_CCtx_setCParams");
assert(cctx != NULL);
if (cctx->streamStage != zcss_init) {
/* All parameters in @cparams are allowed to be updated during MT compression.
* This must be signaled, so that MT compression picks up the changes */
cctx->cParamsChanged = 1;
}
/* only update if parameters are valid */
FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), "");
cctx->requestedParams.cParams = cparams;
return 0;
}
size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
/* We can't use more than 32 bits of hash in total, so that means that we require:
* (hashLog + 8) <= 32 && (chainLog + 8) <= 32
*/
if (mode == ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar)) {
U32 const maxShortCacheHashLog = 32 - ZSTD_SHORT_CACHE_TAG_BITS;
if (cPar.hashLog > maxShortCacheHashLog) {
cPar.hashLog = maxShortCacheHashLog;
}
if (cPar.chainLog > maxShortCacheHashLog) {
cPar.chainLog = maxShortCacheHashLog;
}
}
/* At this point, we aren't 100% sure if we are using the row match finder.
* Unless it is explicitly disabled, conservatively assume that it is enabled.
* In this case it will only be disabled for small sources, so shrinking the
* hash log a little bit shouldn't result in any ratio loss.
*/
if (useRowMatchFinder == ZSTD_ps_auto)
useRowMatchFinder = ZSTD_ps_enable;
/* We can't hash more than 32-bits in total. So that means that we require:
* (hashLog - rowLog + 8) <= 32
*/
if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder)) {
/* Switch to 32-entry rows if searchLog is 5 (or more) */
U32 const rowLog = BOUNDED(4, cPar.searchLog, 6);
U32 const maxRowHashLog = 32 - ZSTD_ROW_HASH_TAG_BITS;
U32 const maxHashLog = maxRowHashLog + rowLog;
assert(cPar.hashLog >= rowLog);
if (cPar.hashLog > maxHashLog) {
cPar.hashLog = maxHashLog;
}
}
size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
/* chain table size should be 0 for fast or row-hash strategies */
size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx)
? ((size_t)1 << cParams->chainLog)
: 0;
ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((1<<Litbits) * sizeof(U32))
+ ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+ ZSTD_cwksp_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32))
+ ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
+ ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)
? ZSTD_cwksp_aligned_alloc_size(hSize*sizeof(U16))
: 0;
size_t const slackSpace = ZSTD_cwksp_slack_space_required();
/* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */
ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4);
assert(useRowMatchFinder != ZSTD_ps_auto);
return tableSpace + optSpace;
return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
}
/* Helper function for calculating memory requirements.
* Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */
static size_t ZSTD_maxNbSeq(size_t blockSize, unsigned minMatch, int useSequenceProducer) {
U32 const divider = (minMatch==3 || useSequenceProducer) ? 3 : 4;
return blockSize / divider;
size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize));
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
U32 const divider = (cParams->minMatch==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize);
size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize);
size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer);
ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
return ZSTD_estimateCCtxSize_usingCCtxParams(¶ms);
ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
/* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
size_t noRowCCtxSize;
size_t rowCCtxSize;
initialParams.useRowMatchFinder = ZSTD_ps_disable;
noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
initialParams.useRowMatchFinder = ZSTD_ps_enable;
rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
return MAX(noRowCCtxSize, rowCCtxSize);
} else {
return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
}
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
return ZSTD_estimateCCtxSize_usingCParams(cParams);
int tier = 0;
size_t largestSize = 0;
static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
for (; tier < 4; ++tier) {
/* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
}
return largestSize;
ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams);
return ZSTD_estimateCStreamSize_usingCCtxParams(¶ms);
ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
/* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
size_t noRowCCtxSize;
size_t rowCCtxSize;
initialParams.useRowMatchFinder = ZSTD_ps_disable;
noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
initialParams.useRowMatchFinder = ZSTD_ps_enable;
rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
return MAX(noRowCCtxSize, rowCCtxSize);
} else {
return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
}
size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog);
/* disable chain table allocation for fast or row-based strategies */
size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder,
ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict))
? ((size_t)1 << cParams->chainLog)
: 0;
if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
{ /* Row match finder needs an additional table of hashes ("tags") */
size_t const tagTableSize = hSize*sizeof(U16);
ms->tagTable = (U16*)ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize);
}
{ /* Switch to 32-entry rows if searchLog is 5 (or more) */
U32 const rowLog = BOUNDED(4, cParams->searchLog, 6);
assert(cParams->hashLog >= rowLog);
ms->rowHashLog = cParams->hashLog - rowLog;
}
}
}
/** ZSTD_dictTooBig():
* When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in
* one go generically. So we ensure that in that case we reset the tables to zero,
* so that we can load as much of the dictionary as possible.
*/
static int ZSTD_dictTooBig(size_t const loadedDictSize)
{
return loadedDictSize > ZSTD_CHUNKSIZE_MAX;
note : `params` are assumed fully validated at this stage */
* @param loadedDictSize The size of the dictionary to be loaded
* into the context, if any. If no dictionary is used, or the
* dictionary is being attached / copied, then pass 0.
* note : `params` are assumed fully validated at this stage.
*/
DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u",
(U32)pledgedSrcSize, params.cParams.windowLog);
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d",
(U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter);
assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
if (params.ldmParams.enableLdm) {
/* Set applied params early so we can modify them for LDM,
* and point params at the applied params.
*/
zc->appliedParams = *params;
params = &zc->appliedParams;
assert(params->useRowMatchFinder != ZSTD_ps_auto);
assert(params->useBlockSplitter != ZSTD_ps_auto);
assert(params->ldmParams.enableLdm != ZSTD_ps_auto);
assert(params->maxBlockSize != 0);
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams);
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
assert(params.ldmParams.hashRateLog < 32);
zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams);
assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog);
assert(params->ldmParams.hashRateLog < 32);
{ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize));
size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize);
U32 const divider = (params.cParams.minMatch==3) ? 3 : 4;
size_t const maxNbSeq = blockSize / divider;
size_t const buffOutSize = (zbuff == ZSTDb_buffered && params.outBufferMode == ZSTD_bm_buffered)
{ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
size_t const blockSize = MIN(params->maxBlockSize, windowSize);
size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, params->useSequenceProducer);
size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
¶ms.cParams, ¶ms.ldmParams, zc->staticSize != 0,
buffInSize, buffOutSize, pledgedSrcSize);
¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
buffInSize, buffOutSize, pledgedSrcSize, params->useSequenceProducer, params->maxBlockSize);
int resizeWorkspace;
size_t const ldmBucketSize =
((size_t)1) << (params.ldmParams.hashLog -
params.ldmParams.bucketSizeLog);
zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, ldmBucketSize);
ZSTD_memset(zc->ldmState.bucketOffsets, 0, ldmBucketSize);
size_t const numBuckets =
((size_t)1) << (params->ldmParams.hashLog -
params->ldmParams.bucketSizeLog);
zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
/* Due to alignment, when reusing a workspace, we can actually consume
* up to 3 extra bytes for alignment. See the comments in zstd_cwksp.h
*/
assert(ZSTD_cwksp_used(ws) >= neededSpace &&
ZSTD_cwksp_used(ws) <= neededSpace + 3);
/* reserve space for block-level external sequences */
if (params->useSequenceProducer) {
size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize);
zc->externalMatchCtx.seqBufferCapacity = maxNbExternalSeq;
zc->externalMatchCtx.seqBuffer =
(ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence));
}
FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */
FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, ¶ms, pledgedSrcSize,
/* loadedDictSize */ 0,
static void ZSTD_copyCDictTableIntoCCtx(U32* dst, U32 const* src, size_t tableSize,
ZSTD_compressionParameters const* cParams) {
if (ZSTD_CDictIndicesAreTagged(cParams)){
/* Remove tags from the CDict table if they are present.
* See docs on "short cache" in zstd_compress_internal.h for context. */
size_t i;
for (i = 0; i < tableSize; i++) {
U32 const taggedIndex = src[i];
U32 const index = taggedIndex >> ZSTD_SHORT_CACHE_TAG_BITS;
dst[i] = index;
}
} else {
ZSTD_memcpy(dst, src, tableSize * sizeof(U32));
}
}
{ size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog);
{ size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */)
? ((size_t)1 << cdict_cParams->chainLog)
: 0;
ZSTD_memcpy(cctx->blockState.matchState.hashTable,
cdict->matchState.hashTable,
hSize * sizeof(U32));
ZSTD_memcpy(cctx->blockState.matchState.chainTable,
cdict->matchState.chainTable,
chainSize * sizeof(U32));
/* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */
if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) {
ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable,
cdict->matchState.chainTable,
chainSize, cdict_cParams);
}
/* copy tag table */
if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) {
size_t const tagTableSize = hSize*sizeof(U16);
ZSTD_memcpy(cctx->blockState.matchState.tagTable,
cdict->matchState.tagTable,
tagTableSize);
}
assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto);
assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto);
assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto);
params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder;
params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter;
params.ldmParams = srcCCtx->appliedParams.ldmParams;
{ size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog);
{ size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy,
srcCCtx->appliedParams.useRowMatchFinder,
0 /* forDDSDict */)
? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
: 0;
if (preserveMark) {
U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0;
table[cellNb] += adder;
U32 newVal;
if (preserveMark && table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) {
/* This write is pointless, but is required(?) for the compiler
* to auto-vectorize the loop. */
newVal = ZSTD_DUBT_UNSORTED_MARK;
} else if (table[cellNb] < reducerThreshold) {
newVal = 0;
} else {
newVal = table[cellNb] - reducerValue;
/* ZSTD_blockSplitterEnabled():
* Returns if block splitting param is being used
* If used, compression will do best effort to split a block in order to improve compression ratio.
* At the time this function is called, the parameter must be finalized.
* Returns 1 if true, 0 otherwise. */
static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams)
{
DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter);
assert(cctxParams->useBlockSplitter != ZSTD_ps_auto);
return (cctxParams->useBlockSplitter == ZSTD_ps_enable);
}
/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types
* and size of the sequences statistics
*/
typedef struct {
U32 LLtype;
U32 Offtype;
U32 MLtype;
size_t size;
size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
int longOffsets;
} ZSTD_symbolEncodingTypeStats_t;
/* ZSTD_buildSequencesStatistics():
* Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field.
* Modifies `nextEntropy` to have the appropriate values as a side effect.
* nbSeq must be greater than 0.
*
* entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32)
*/
static ZSTD_symbolEncodingTypeStats_t
ZSTD_buildSequencesStatistics(
const seqStore_t* seqStorePtr, size_t nbSeq,
const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy,
BYTE* dst, const BYTE* const dstEnd,
ZSTD_strategy strategy, unsigned* countWorkspace,
void* entropyWorkspace, size_t entropyWkspSize)
{
BYTE* const ostart = dst;
const BYTE* const oend = dstEnd;
BYTE* op = ostart;
FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
const BYTE* const ofCodeTable = seqStorePtr->ofCode;
const BYTE* const llCodeTable = seqStorePtr->llCode;
const BYTE* const mlCodeTable = seqStorePtr->mlCode;
ZSTD_symbolEncodingTypeStats_t stats;
/* ZSTD_entropyCompressSequences_internal():
* actually compresses both literals and sequences */
stats.lastCountSize = 0;
/* convert length/distances into codes */
stats.longOffsets = ZSTD_seqToCodes(seqStorePtr);
assert(op <= oend);
assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */
/* build CTable for Literal Lengths */
{ unsigned max = MaxLL;
size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
DEBUGLOG(5, "Building LL table");
nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
countWorkspace, max, mostFrequent, nbSeq,
LLFSELog, prevEntropy->litlengthCTable,
LL_defaultNorm, LL_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(set_basic < set_compressed && set_rle < set_compressed);
assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype,
countWorkspace, max, llCodeTable, nbSeq,
LL_defaultNorm, LL_defaultNormLog, MaxLL,
prevEntropy->litlengthCTable,
sizeof(prevEntropy->litlengthCTable),
entropyWorkspace, entropyWkspSize);
if (ZSTD_isError(countSize)) {
DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed");
stats.size = countSize;
return stats;
}
if (stats.LLtype == set_compressed)
stats.lastCountSize = countSize;
op += countSize;
assert(op <= oend);
} }
/* build CTable for Offsets */
{ unsigned max = MaxOff;
size_t const mostFrequent = HIST_countFast_wksp(
countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
/* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
DEBUGLOG(5, "Building OF table");
nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
countWorkspace, max, mostFrequent, nbSeq,
OffFSELog, prevEntropy->offcodeCTable,
OF_defaultNorm, OF_defaultNormLog,
defaultPolicy, strategy);
assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype,
countWorkspace, max, ofCodeTable, nbSeq,
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
prevEntropy->offcodeCTable,
sizeof(prevEntropy->offcodeCTable),
entropyWorkspace, entropyWkspSize);
if (ZSTD_isError(countSize)) {
DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed");
stats.size = countSize;
return stats;
}
if (stats.Offtype == set_compressed)
stats.lastCountSize = countSize;
op += countSize;
assert(op <= oend);
} }
/* build CTable for MatchLengths */
{ unsigned max = MaxML;
size_t const mostFrequent = HIST_countFast_wksp(
countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
countWorkspace, max, mostFrequent, nbSeq,
MLFSELog, prevEntropy->matchlengthCTable,
ML_defaultNorm, ML_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype,
countWorkspace, max, mlCodeTable, nbSeq,
ML_defaultNorm, ML_defaultNormLog, MaxML,
prevEntropy->matchlengthCTable,
sizeof(prevEntropy->matchlengthCTable),
entropyWorkspace, entropyWkspSize);
if (ZSTD_isError(countSize)) {
DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed");
stats.size = countSize;
return stats;
}
if (stats.MLtype == set_compressed)
stats.lastCountSize = countSize;
op += countSize;
assert(op <= oend);
} }
stats.size = (size_t)(op-ostart);
return stats;
}
/* ZSTD_entropyCompressSeqStore_internal():
* compresses both literals and sequences
* Returns compressed size of block, or a zstd error.
*/
#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20
ZSTD_entropyCompressSequences_internal(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
void* entropyWorkspace, size_t entropyWkspSize,
const int bmi2)
ZSTD_entropyCompressSeqStore_internal(
const seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
void* entropyWorkspace, size_t entropyWkspSize,
const int bmi2)
size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
size_t const numLiterals = (size_t)(seqStorePtr->lit - seqStorePtr->litStart);
/* Base suspicion of uncompressibility on ratio of literals to sequences */
unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
}
{ BYTE* const seqHead = op++;
/* build stats for sequences */
const ZSTD_symbolEncodingTypeStats_t stats =
ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
&prevEntropy->fse, &nextEntropy->fse,
op, oend,
strategy, count,
entropyWorkspace, entropyWkspSize);
FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
*seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2));
lastCountSize = stats.lastCountSize;
op += stats.size;
longOffsets = stats.longOffsets;
/* seqHead : flags for FSE encoding type */
seqHead = op++;
assert(op <= oend);
/* convert length/distances into codes */
ZSTD_seqToCodes(seqStorePtr);
/* build CTable for Literal Lengths */
{ unsigned max = MaxLL;
size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
DEBUGLOG(5, "Building LL table");
nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode;
LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode,
count, max, mostFrequent, nbSeq,
LLFSELog, prevEntropy->fse.litlengthCTable,
LL_defaultNorm, LL_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(set_basic < set_compressed && set_rle < set_compressed);
assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype,
count, max, llCodeTable, nbSeq,
LL_defaultNorm, LL_defaultNormLog, MaxLL,
prevEntropy->fse.litlengthCTable,
sizeof(prevEntropy->fse.litlengthCTable),
entropyWorkspace, entropyWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed");
if (LLtype == set_compressed)
lastNCount = op;
op += countSize;
assert(op <= oend);
} }
/* build CTable for Offsets */
{ unsigned max = MaxOff;
size_t const mostFrequent = HIST_countFast_wksp(
count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
/* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
DEBUGLOG(5, "Building OF table");
nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode;
Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode,
count, max, mostFrequent, nbSeq,
OffFSELog, prevEntropy->fse.offcodeCTable,
OF_defaultNorm, OF_defaultNormLog,
defaultPolicy, strategy);
assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype,
count, max, ofCodeTable, nbSeq,
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
prevEntropy->fse.offcodeCTable,
sizeof(prevEntropy->fse.offcodeCTable),
entropyWorkspace, entropyWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed");
if (Offtype == set_compressed)
lastNCount = op;
op += countSize;
assert(op <= oend);
} }
/* build CTable for MatchLengths */
{ unsigned max = MaxML;
size_t const mostFrequent = HIST_countFast_wksp(
count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode;
MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode,
count, max, mostFrequent, nbSeq,
MLFSELog, prevEntropy->fse.matchlengthCTable,
ML_defaultNorm, ML_defaultNormLog,
ZSTD_defaultAllowed, strategy);
assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
{ size_t const countSize = ZSTD_buildCTable(
op, (size_t)(oend - op),
CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype,
count, max, mlCodeTable, nbSeq,
ML_defaultNorm, ML_defaultNormLog, MaxML,
prevEntropy->fse.matchlengthCTable,
sizeof(prevEntropy->fse.matchlengthCTable),
entropyWorkspace, entropyWkspSize);
FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed");
if (MLtype == set_compressed)
lastNCount = op;
op += countSize;
assert(op <= oend);
} }
*seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
if (lastNCount && (op - lastNCount) < 4) {
/* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
assert(op - lastNCount == 3);
if (lastCountSize && (lastCountSize + bitstreamSize) < 4) {
/* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
assert(lastCountSize + bitstreamSize == 3);
ZSTD_entropyCompressSequences(seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
size_t srcSize,
void* entropyWorkspace, size_t entropyWkspSize,
int bmi2)
ZSTD_entropyCompressSeqStore(
const seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
void* dst, size_t dstCapacity,
size_t srcSize,
void* entropyWorkspace, size_t entropyWkspSize,
int bmi2)
if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity))
if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) {
DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity);
DEBUGLOG(4, "ZSTD_entropyCompressSequences() cSize: %zu\n", cSize);
DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
/* libzstd decoder before > v1.5.4 is not compatible with compressed blocks of size ZSTD_BLOCKSIZE_MAX exactly.
* This restriction is indirectly already fulfilled by respecting ZSTD_minGain() condition above.
*/
assert(cSize < ZSTD_BLOCKSIZE_MAX);
selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = {
{ ZSTD_compressBlock_greedy_row,
ZSTD_compressBlock_lazy_row,
ZSTD_compressBlock_lazy2_row },
{ ZSTD_compressBlock_greedy_extDict_row,
ZSTD_compressBlock_lazy_extDict_row,
ZSTD_compressBlock_lazy2_extDict_row },
{ ZSTD_compressBlock_greedy_dictMatchState_row,
ZSTD_compressBlock_lazy_dictMatchState_row,
ZSTD_compressBlock_lazy2_dictMatchState_row },
{ ZSTD_compressBlock_greedy_dedicatedDictSearch_row,
ZSTD_compressBlock_lazy_dedicatedDictSearch_row,
ZSTD_compressBlock_lazy2_dedicatedDictSearch_row }
};
DEBUGLOG(4, "Selecting a row-based matchfinder");
assert(useRowMatchFinder != ZSTD_ps_auto);
selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy];
} else {
selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
}
ssPtr->longLengthID = 0;
ssPtr->longLengthType = ZSTD_llt_none;
}
/* ZSTD_postProcessSequenceProducerResult() :
* Validates and post-processes sequences obtained through the external matchfinder API:
* - Checks whether nbExternalSeqs represents an error condition.
* - Appends a block delimiter to outSeqs if one is not already present.
* See zstd.h for context regarding block delimiters.
* Returns the number of sequences after post-processing, or an error code. */
static size_t ZSTD_postProcessSequenceProducerResult(
ZSTD_Sequence* outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize
) {
RETURN_ERROR_IF(
nbExternalSeqs > outSeqsCapacity,
sequenceProducer_failed,
"External sequence producer returned error code %lu",
(unsigned long)nbExternalSeqs
);
RETURN_ERROR_IF(
nbExternalSeqs == 0 && srcSize > 0,
sequenceProducer_failed,
"Got zero sequences from external sequence producer for a non-empty src buffer!"
);
if (srcSize == 0) {
ZSTD_memset(&outSeqs[0], 0, sizeof(ZSTD_Sequence));
return 1;
}
{
ZSTD_Sequence const lastSeq = outSeqs[nbExternalSeqs - 1];
/* We can return early if lastSeq is already a block delimiter. */
if (lastSeq.offset == 0 && lastSeq.matchLength == 0) {
return nbExternalSeqs;
}
/* This error condition is only possible if the external matchfinder
* produced an invalid parse, by definition of ZSTD_sequenceBound(). */
RETURN_ERROR_IF(
nbExternalSeqs == outSeqsCapacity,
sequenceProducer_failed,
"nbExternalSeqs == outSeqsCapacity but lastSeq is not a block delimiter!"
);
/* lastSeq is not a block delimiter, so we need to append one. */
ZSTD_memset(&outSeqs[nbExternalSeqs], 0, sizeof(ZSTD_Sequence));
return nbExternalSeqs + 1;
}
/* ZSTD_fastSequenceLengthSum() :
* Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*.
* Similar to another function in zstd_compress.c (determine_blockSize),
* except it doesn't check for a block delimiter to end summation.
* Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P).
* This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */
static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seqBufSize) {
size_t matchLenSum, litLenSum, i;
matchLenSum = 0;
litLenSum = 0;
for (i = 0; i < seqBufSize; i++) {
litLenSum += seqBuf[i].litLength;
matchLenSum += seqBuf[i].matchLength;
}
return litLenSum + matchLenSum;
}
if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
/* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
* additional 1. We need to revisit and change this logic to be more consistent */
if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) {
assert(!zc->appliedParams.ldmParams.enableLdm);
assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable);
/* External matchfinder + LDM is technically possible, just not implemented yet.
* We need to revisit soon and implement it. */
RETURN_ERROR_IF(
zc->appliedParams.useSequenceProducer,
parameter_combination_unsupported,
"Long-distance matching with external sequence producer enabled is not currently supported."
);
/* External matchfinder + LDM is technically possible, just not implemented yet.
* We need to revisit soon and implement it. */
RETURN_ERROR_IF(
zc->appliedParams.useSequenceProducer,
parameter_combination_unsupported,
"Long-distance matching with external sequence producer enabled is not currently supported."
);
} else { /* not long range mode */
ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode);
} else if (zc->appliedParams.useSequenceProducer) {
assert(
zc->externalMatchCtx.seqBufferCapacity >= ZSTD_sequenceBound(srcSize)
);
assert(zc->externalMatchCtx.mFinder != NULL);
{ U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog;
size_t const nbExternalSeqs = (zc->externalMatchCtx.mFinder)(
zc->externalMatchCtx.mState,
zc->externalMatchCtx.seqBuffer,
zc->externalMatchCtx.seqBufferCapacity,
src, srcSize,
NULL, 0, /* dict and dictSize, currently not supported */
zc->appliedParams.compressionLevel,
windowSize
);
size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult(
zc->externalMatchCtx.seqBuffer,
nbExternalSeqs,
zc->externalMatchCtx.seqBufferCapacity,
srcSize
);
/* Return early if there is no error, since we don't need to worry about last literals */
if (!ZSTD_isError(nbPostProcessedSeqs)) {
ZSTD_sequencePosition seqPos = {0,0,0};
size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->externalMatchCtx.seqBuffer, nbPostProcessedSeqs);
RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!");
FORWARD_IF_ERROR(
ZSTD_copySequencesToSeqStoreExplicitBlockDelim(
zc, &seqPos,
zc->externalMatchCtx.seqBuffer, nbPostProcessedSeqs,
src, srcSize,
zc->appliedParams.searchForExternalRepcodes
),
"Failed to copy external sequences to seqStore!"
);
ms->ldmSeqStore = NULL;
DEBUGLOG(5, "Copied %lu sequences from external sequence producer to internal seqStore.", (unsigned long)nbExternalSeqs);
return ZSTDbss_compress;
}
/* Propagate the error if fallback is disabled */
if (!zc->appliedParams.enableMatchFinderFallback) {
return nbPostProcessedSeqs;
}
/* Fallback to software matchfinder */
{ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
zc->appliedParams.useRowMatchFinder,
dictMode);
ms->ldmSeqStore = NULL;
DEBUGLOG(
5,
"External sequence producer returned error code %lu. Falling back to internal parser.",
(unsigned long)nbExternalSeqs
);
lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
} }
} else { /* not long range mode and no external matchfinder */
ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
zc->appliedParams.useRowMatchFinder,
dictMode);
}
static void
ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs)
{
ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock;
bs->prevCBlock = bs->nextCBlock;
bs->nextCBlock = tmp;
}
/* Writes the block header */
static void
writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock)
{
U32 const cBlockHeader = cSize == 1 ?
lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
MEM_writeLE24(op, cBlockHeader);
DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
}
/** ZSTD_buildBlockEntropyStats_literals() :
* Builds entropy for the literals.
* Stores literals block type (raw, rle, compressed, repeat) and
* huffman description table to hufMetadata.
* Requires ENTROPY_WORKSPACE_SIZE workspace
* @return : size of huffman description table, or an error code
*/
static size_t
ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize,
const ZSTD_hufCTables_t* prevHuf,
ZSTD_hufCTables_t* nextHuf,
ZSTD_hufCTablesMetadata_t* hufMetadata,
const int literalsCompressionIsDisabled,
void* workspace, size_t wkspSize,
int hufFlags)
{
BYTE* const wkspStart = (BYTE*)workspace;
BYTE* const wkspEnd = wkspStart + wkspSize;
BYTE* const countWkspStart = wkspStart;
unsigned* const countWksp = (unsigned*)workspace;
const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
BYTE* const nodeWksp = countWkspStart + countWkspSize;
const size_t nodeWkspSize = (size_t)(wkspEnd - nodeWksp);
unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
unsigned huffLog = LitHufLog;
HUF_repeat repeat = prevHuf->repeatMode;
DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize);
/* Prepare nextEntropy assuming reusing the existing table */
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
if (literalsCompressionIsDisabled) {
DEBUGLOG(5, "set_basic - disabled");
hufMetadata->hType = set_basic;
return 0;
}
/* small ? don't even attempt compression (speed opt) */
#ifndef COMPRESS_LITERALS_SIZE_MIN
# define COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */
#endif
{ size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
if (srcSize <= minLitSize) {
DEBUGLOG(5, "set_basic - too small");
hufMetadata->hType = set_basic;
return 0;
} }
/* Scan input and build symbol stats */
{ size_t const largest =
HIST_count_wksp (countWksp, &maxSymbolValue,
(const BYTE*)src, srcSize,
workspace, wkspSize);
FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
if (largest == srcSize) {
/* only one literal symbol */
DEBUGLOG(5, "set_rle");
hufMetadata->hType = set_rle;
return 0;
}
if (largest <= (srcSize >> 7)+4) {
/* heuristic: likely not compressible */
DEBUGLOG(5, "set_basic - no gain");
hufMetadata->hType = set_basic;
return 0;
} }
/* Validate the previous Huffman table */
if (repeat == HUF_repeat_check
&& !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
repeat = HUF_repeat_none;
}
/* Build Huffman Tree */
ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, nextHuf->CTable, countWksp, hufFlags);
assert(huffLog <= LitHufLog);
{ size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
maxSymbolValue, huffLog,
nodeWksp, nodeWkspSize);
FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
huffLog = (U32)maxBits;
}
{ /* Build and write the CTable */
size_t const newCSize = HUF_estimateCompressedSize(
(HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
size_t const hSize = HUF_writeCTable_wksp(
hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
(HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
nodeWksp, nodeWkspSize);
/* Check against repeating the previous CTable */
if (repeat != HUF_repeat_none) {
size_t const oldCSize = HUF_estimateCompressedSize(
(HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
DEBUGLOG(5, "set_repeat - smaller");
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
hufMetadata->hType = set_repeat;
return 0;
} }
if (newCSize + hSize >= srcSize) {
DEBUGLOG(5, "set_basic - no gains");
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
hufMetadata->hType = set_basic;
return 0;
}
DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
hufMetadata->hType = set_compressed;
nextHuf->repeatMode = HUF_repeat_check;
return hSize;
}
static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc)
/* ZSTD_buildDummySequencesStatistics():
* Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic,
* and updates nextEntropy to the appropriate repeatMode.
*/
static ZSTD_symbolEncodingTypeStats_t
ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy)
{
ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0, 0};
nextEntropy->litlength_repeatMode = FSE_repeat_none;
nextEntropy->offcode_repeatMode = FSE_repeat_none;
nextEntropy->matchlength_repeatMode = FSE_repeat_none;
return stats;
}
/** ZSTD_buildBlockEntropyStats_sequences() :
* Builds entropy for the sequences.
* Stores symbol compression modes and fse table to fseMetadata.
* Requires ENTROPY_WORKSPACE_SIZE wksp.
* @return : size of fse tables or error code */
static size_t
ZSTD_buildBlockEntropyStats_sequences(
const seqStore_t* seqStorePtr,
const ZSTD_fseCTables_t* prevEntropy,
ZSTD_fseCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
ZSTD_fseCTablesMetadata_t* fseMetadata,
void* workspace, size_t wkspSize)
{
ZSTD_strategy const strategy = cctxParams->cParams.strategy;
size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
BYTE* const ostart = fseMetadata->fseTablesBuffer;
BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
BYTE* op = ostart;
unsigned* countWorkspace = (unsigned*)workspace;
unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1);
size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace);
ZSTD_symbolEncodingTypeStats_t stats;
DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq);
stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
prevEntropy, nextEntropy, op, oend,
strategy, countWorkspace,
entropyWorkspace, entropyWorkspaceSize)
: ZSTD_buildDummySequencesStatistics(nextEntropy);
FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
fseMetadata->llType = (symbolEncodingType_e) stats.LLtype;
fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype;
fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype;
fseMetadata->lastCountSize = stats.lastCountSize;
return stats.size;
}
/** ZSTD_buildBlockEntropyStats() :
* Builds entropy for the block.
* Requires workspace size ENTROPY_WORKSPACE_SIZE
* @return : 0 on success, or an error code
* Note : also employed in superblock
*/
size_t ZSTD_buildBlockEntropyStats(
const seqStore_t* seqStorePtr,
const ZSTD_entropyCTables_t* prevEntropy,
ZSTD_entropyCTables_t* nextEntropy,
const ZSTD_CCtx_params* cctxParams,
ZSTD_entropyCTablesMetadata_t* entropyMetadata,
void* workspace, size_t wkspSize)
{
size_t const litSize = (size_t)(seqStorePtr->lit - seqStorePtr->litStart);
int const huf_useOptDepth = (cctxParams->cParams.strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD);
int const hufFlags = huf_useOptDepth ? HUF_flags_optimalDepth : 0;
entropyMetadata->hufMetadata.hufDesSize =
ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize,
&prevEntropy->huf, &nextEntropy->huf,
&entropyMetadata->hufMetadata,
ZSTD_literalsCompressionIsDisabled(cctxParams),
workspace, wkspSize, hufFlags);
FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed");
entropyMetadata->fseMetadata.fseTablesSize =
ZSTD_buildBlockEntropyStats_sequences(seqStorePtr,
&prevEntropy->fse, &nextEntropy->fse,
cctxParams,
&entropyMetadata->fseMetadata,
workspace, wkspSize);
FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed");
return 0;
}
/* Returns the size estimate for the literals section (header + content) of a block */
static size_t
ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize,
const ZSTD_hufCTables_t* huf,
const ZSTD_hufCTablesMetadata_t* hufMetadata,
void* workspace, size_t wkspSize,
int writeEntropy)
{
unsigned* const countWksp = (unsigned*)workspace;
unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB);
U32 singleStream = litSize < 256;
if (hufMetadata->hType == set_basic) return litSize;
else if (hufMetadata->hType == set_rle) return 1;
else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
if (ZSTD_isError(largest)) return litSize;
{ size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */
return cLitSizeEstimate + literalSectionHeaderSize;
} }
assert(0); /* impossible */
return 0;
}
/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */
static size_t
ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
const BYTE* codeTable, size_t nbSeq, unsigned maxCode,
const FSE_CTable* fseCTable,
const U8* additionalBits,
short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
void* workspace, size_t wkspSize)
{
unsigned* const countWksp = (unsigned*)workspace;
const BYTE* ctp = codeTable;
const BYTE* const ctStart = ctp;
const BYTE* const ctEnd = ctStart + nbSeq;
size_t cSymbolTypeSizeEstimateInBits = 0;
unsigned max = maxCode;
HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
if (type == set_basic) {
/* We selected this encoding type, so it must be valid. */
assert(max <= defaultMax);
(void)defaultMax;
cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max);
} else if (type == set_rle) {
cSymbolTypeSizeEstimateInBits = 0;
} else if (type == set_compressed || type == set_repeat) {
cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
}
if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) {
return nbSeq * 10;
}
while (ctp < ctEnd) {
if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
ctp++;
}
return cSymbolTypeSizeEstimateInBits >> 3;
}
/* Returns the size estimate for the sequences section (header + content) of a block */
static size_t
ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable,
const BYTE* llCodeTable,
const BYTE* mlCodeTable,
size_t nbSeq,
const ZSTD_fseCTables_t* fseTables,
const ZSTD_fseCTablesMetadata_t* fseMetadata,
void* workspace, size_t wkspSize,
int writeEntropy)
{
size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ);
size_t cSeqSizeEstimate = 0;
cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff,
fseTables->offcodeCTable, NULL,
OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL,
fseTables->litlengthCTable, LL_bits,
LL_defaultNorm, LL_defaultNormLog, MaxLL,
workspace, wkspSize);
cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML,
fseTables->matchlengthCTable, ML_bits,
ML_defaultNorm, ML_defaultNormLog, MaxML,
workspace, wkspSize);
if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
return cSeqSizeEstimate + sequencesSectionHeaderSize;
}
/* Returns the size estimate for a given stream of literals, of, ll, ml */
static size_t
ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize,
const BYTE* ofCodeTable,
const BYTE* llCodeTable,
const BYTE* mlCodeTable,
size_t nbSeq,
const ZSTD_entropyCTables_t* entropy,
const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
void* workspace, size_t wkspSize,
int writeLitEntropy, int writeSeqEntropy)
{
size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize,
&entropy->huf, &entropyMetadata->hufMetadata,
workspace, wkspSize, writeLitEntropy);
size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
workspace, wkspSize, writeSeqEntropy);
return seqSize + literalsSize + ZSTD_blockHeaderSize;
}
/* Builds entropy statistics and uses them for blocksize estimation.
*
* @return: estimated compressed size of the seqStore, or a zstd error.
*/
static size_t
ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc)
{
ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata;
DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()");
FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore,
&zc->blockState.prevCBlock->entropy,
&zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
entropyMetadata,
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE), "");
return ZSTD_estimateBlockSize(
seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),
seqStore->ofCode, seqStore->llCode, seqStore->mlCode,
(size_t)(seqStore->sequences - seqStore->sequencesStart),
&zc->blockState.nextCBlock->entropy,
entropyMetadata,
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE,
(int)(entropyMetadata->hufMetadata.hType == set_compressed), 1);
}
/* Returns literals bytes represented in a seqStore */
static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore)
{
size_t literalsBytes = 0;
size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
size_t i;
for (i = 0; i < nbSeqs; ++i) {
seqDef const seq = seqStore->sequencesStart[i];
literalsBytes += seq.litLength;
if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) {
literalsBytes += 0x10000;
} }
return literalsBytes;
}
/* Returns match bytes represented in a seqStore */
static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore)
{
size_t matchBytes = 0;
size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
size_t i;
for (i = 0; i < nbSeqs; ++i) {
seqDef seq = seqStore->sequencesStart[i];
matchBytes += seq.mlBase + MINMATCH;
if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
matchBytes += 0x10000;
} }
return matchBytes;
}
/* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx).
* Stores the result in resultSeqStore.
*/
static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
const seqStore_t* originalSeqStore,
size_t startIdx, size_t endIdx)
ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock;
zc->blockState.prevCBlock = zc->blockState.nextCBlock;
zc->blockState.nextCBlock = tmp;
*resultSeqStore = *originalSeqStore;
if (startIdx > 0) {
resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx;
resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
}
/* Move longLengthPos into the correct position if necessary */
if (originalSeqStore->longLengthType != ZSTD_llt_none) {
if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) {
resultSeqStore->longLengthType = ZSTD_llt_none;
} else {
resultSeqStore->longLengthPos -= (U32)startIdx;
}
}
resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;
resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx;
if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) {
/* This accounts for possible last literals if the derived chunk reaches the end of the block */
assert(resultSeqStore->lit == originalSeqStore->lit);
} else {
size_t const literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
resultSeqStore->lit = resultSeqStore->litStart + literalsBytes;
}
resultSeqStore->llCode += startIdx;
resultSeqStore->mlCode += startIdx;
resultSeqStore->ofCode += startIdx;
static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, U32 frame)
/**
* Returns the raw offset represented by the combination of offBase, ll0, and repcode history.
* offBase must represent a repcode in the numeric representation of ZSTD_storeSeq().
*/
static U32
ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, const U32 ll0)
/* This the upper bound for the length of an rle block.
* This isn't the actual upper bound. Finding the real threshold
* needs further investigation.
U32 const adjustedRepCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; /* [ 0 - 3 ] */
assert(OFFBASE_IS_REPCODE(offBase));
if (adjustedRepCode == ZSTD_REP_NUM) {
assert(ll0);
/* litlength == 0 and offCode == 2 implies selection of first repcode - 1
* This is only valid if it results in a valid offset value, aka > 0.
* Note : it may happen that `rep[0]==1` in exceptional circumstances.
* In which case this function will return 0, which is an invalid offset.
* It's not an issue though, since this value will be
* compared and discarded within ZSTD_seqStore_resolveOffCodes().
*/
return rep[0] - 1;
}
return rep[adjustedRepCode];
}
/**
* ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise
* due to emission of RLE/raw blocks that disturb the offset history,
* and replaces any repcodes within the seqStore that may be invalid.
*
* dRepcodes are updated as would be on the decompression side.
* cRepcodes are updated exactly in accordance with the seqStore.
*
* Note : this function assumes seq->offBase respects the following numbering scheme :
* 0 : invalid
* 1-3 : repcode 1-3
* 4+ : real_offset+3
*/
static void
ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes,
const seqStore_t* const seqStore, U32 const nbSeq)
{
U32 idx = 0;
for (; idx < nbSeq; ++idx) {
seqDef* const seq = seqStore->sequencesStart + idx;
U32 const ll0 = (seq->litLength == 0);
U32 const offBase = seq->offBase;
assert(offBase > 0);
if (OFFBASE_IS_REPCODE(offBase)) {
U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0);
U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0);
/* Adjust simulated decompression repcode history if we come across a mismatch. Replace
* the repcode with the offset it actually references, determined by the compression
* repcode history.
*/
if (dRawOffset != cRawOffset) {
seq->offBase = OFFSET_TO_OFFBASE(cRawOffset);
}
}
/* Compression repcode history is always updated with values directly from the unmodified seqStore.
* Decompression repcode history may use modified seq->offset value taken from compression repcode history.
*/
ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0);
ZSTD_updateRep(cRepcodes->rep, offBase, ll0);
}
}
/* ZSTD_compressSeqStore_singleBlock():
* Compresses a seqStore into a block with a block header, into the buffer dst.
*
* Returns the total size of that block (including header) or a ZSTD error code.
*/
static size_t
ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc,
const seqStore_t* const seqStore,
repcodes_t* const dRep, repcodes_t* const cRep,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
U32 lastBlock, U32 isPartition)
{
const U32 rleMaxLength = 25;
BYTE* op = (BYTE*)dst;
const BYTE* ip = (const BYTE*)src;
size_t cSize;
size_t cSeqsSize;
/* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
repcodes_t const dRepOriginal = *dRep;
DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");
if (isPartition)
ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit");
cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore,
&zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
&zc->appliedParams,
op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize,
srcSize,
zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
zc->bmi2);
FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!");
if (!zc->isFirstBlock &&
cSeqsSize < rleMaxLength &&
ZSTD_isRLE((BYTE const*)src, srcSize)) {
/* We don't want to emit our first block as a RLE even if it qualifies because
* doing so will cause the decoder (cli only) to throw a "should consume all input error."
* This is only an issue for zstd <= v1.4.3
*/
cSeqsSize = 1;
}
if (zc->seqCollector.collectSequences) {
ZSTD_copyBlockSequences(zc);
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
return 0;
}
if (cSeqsSize == 0) {
cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "Nocompress block failed");
DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize);
*dRep = dRepOriginal; /* reset simulated decompression repcode history */
} else if (cSeqsSize == 1) {
cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "RLE compress block failed");
DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize);
*dRep = dRepOriginal; /* reset simulated decompression repcode history */
} else {
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
writeBlockHeader(op, cSeqsSize, srcSize, lastBlock);
cSize = ZSTD_blockHeaderSize + cSeqsSize;
DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize);
}
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
return cSize;
}
/* Struct to keep track of where we are in our recursive calls. */
typedef struct {
U32* splitLocations; /* Array of split indices */
size_t idx; /* The current index within splitLocations being worked on */
} seqStoreSplits;
#define MIN_SEQUENCES_BLOCK_SPLITTING 300
/* Helper function to perform the recursive search for block splits.
* Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half.
* If advantageous to split, then we recurse down the two sub-blocks.
* If not, or if an error occurred in estimation, then we do not recurse.
*
* Note: The recursion depth is capped by a heuristic minimum number of sequences,
* defined by MIN_SEQUENCES_BLOCK_SPLITTING.
* In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING).
* In practice, recursion depth usually doesn't go beyond 4.
*
* Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS.
* At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize
* maximum of 128 KB, this value is actually impossible to reach.
*/
static void
ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
ZSTD_CCtx* zc, const seqStore_t* origSeqStore)
{
seqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
seqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
seqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
size_t estimatedOriginalSize;
size_t estimatedFirstHalfSize;
size_t estimatedSecondHalfSize;
size_t midIdx = (startIdx + endIdx)/2;
DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx);
assert(endIdx >= startIdx);
if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) {
DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences (%zu)", endIdx - startIdx);
return;
}
ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx);
ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx);
ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx);
estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc);
estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc);
estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc);
DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu",
estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize);
if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) {
return;
}
if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) {
DEBUGLOG(5, "split decided at seqNb:%zu", midIdx);
ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore);
splits->splitLocations[splits->idx] = (U32)midIdx;
splits->idx++;
ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore);
}
}
/* Base recursive function.
* Populates a table with intra-block partition indices that can improve compression ratio.
*
* @return: number of splits made (which equals the size of the partition table - 1).
*/
static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq)
{
seqStoreSplits splits;
splits.splitLocations = partitions;
splits.idx = 0;
if (nbSeq <= 4) {
DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split (%u <= 4)", nbSeq);
/* Refuse to try and split anything with less than 4 sequences */
return 0;
}
ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore);
splits.splitLocations[splits.idx] = nbSeq;
DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1);
return splits.idx;
}
/* ZSTD_compressBlock_splitBlock():
* Attempts to split a given block into multiple blocks to improve compression ratio.
*
* Returns combined size of all blocks (which includes headers), or a ZSTD error code.
*/
static size_t
ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t blockSize,
U32 lastBlock, U32 nbSeq)
{
size_t cSize = 0;
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
size_t i = 0;
size_t srcBytesTotal = 0;
U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */
seqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore;
seqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore;
size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);
/* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history
* may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two
* separate repcode histories that simulate repcode history on compression and decompression side,
* and use the histories to determine whether we must replace a particular repcode with its raw offset.
*
* 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed
* or RLE. This allows us to retrieve the offset value that an invalid repcode references within
* a nocompress/RLE block.
* 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use
* the replacement offset value rather than the original repcode to update the repcode history.
* dRep also will be the final repcode history sent to the next block.
*
* See ZSTD_seqStore_resolveOffCodes() for more details.
*/
repcodes_t dRep;
repcodes_t cRep;
ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t));
DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
(unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
(unsigned)zc->blockState.matchState.nextToUpdate);
if (numSplits == 0) {
size_t cSizeSingleBlock =
ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore,
&dRep, &cRep,
op, dstCapacity,
ip, blockSize,
lastBlock, 0 /* isPartition */);
FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!");
DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits");
assert(zc->blockSize <= ZSTD_BLOCKSIZE_MAX);
assert(cSizeSingleBlock <= zc->blockSize + ZSTD_blockHeaderSize);
return cSizeSingleBlock;
}
ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]);
for (i = 0; i <= numSplits; ++i) {
size_t cSizeChunk;
U32 const lastPartition = (i == numSplits);
U32 lastBlockEntireSrc = 0;
size_t srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore);
srcBytesTotal += srcBytes;
if (lastPartition) {
/* This is the final partition, need to account for possible last literals */
srcBytes += blockSize - srcBytesTotal;
lastBlockEntireSrc = lastBlock;
} else {
ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]);
}
cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore,
&dRep, &cRep,
op, dstCapacity,
ip, srcBytes,
lastBlockEntireSrc, 1 /* isPartition */);
DEBUGLOG(5, "Estimated size: %zu vs %zu : actual size",
ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk);
FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!");
ip += srcBytes;
op += cSizeChunk;
dstCapacity -= cSizeChunk;
cSize += cSizeChunk;
*currSeqStore = *nextSeqStore;
assert(cSizeChunk <= zc->blockSize + ZSTD_blockHeaderSize);
}
/* cRep and dRep may have diverged during the compression.
* If so, we use the dRep repcodes for the next block.
ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t));
return cSize;
}
static size_t
ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, U32 lastBlock)
{
U32 nbSeq;
size_t cSize;
DEBUGLOG(4, "ZSTD_compressBlock_splitBlock");
assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable);
{ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
if (bss == ZSTDbss_noCompress) {
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block");
return cSize;
}
nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart);
}
cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq);
FORWARD_IF_ERROR(cSize, "Splitting blocks failed!");
return cSize;
}
static size_t
ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize, U32 frame)
{
/* This is an estimated upper bound for the length of an rle block.
* This isn't the actual upper bound.
* Finding the real threshold needs further investigation.
*/
if (ZSTD_window_needOverflowCorrection(ms->window, iend)) {
U32 const maxDist = (U32)1 << params->cParams.windowLog;
U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
U32 const maxDist = (U32)1 << params->cParams.windowLog;
if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) {
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE,
/* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
* additional 1. We need to revisit and change this logic to be more consistent */
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1,
} else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) {
cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock);
FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed");
assert(cSize > 0 || cctx->seqCollector.collectSequences == 1);
}
/* ZSTD_writeSkippableFrame_advanced() :
* Writes out a skippable frame with the specified magic number variant (16 are supported),
* from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
*
* Returns the total number of bytes written, or a ZSTD error code.
*/
size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
const void* src, size_t srcSize, unsigned magicVariant) {
BYTE* op = (BYTE*)dst;
RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
dstSize_tooSmall, "Not enough room for skippable frame");
RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
MEM_writeLE32(op+4, (U32)srcSize);
ZSTD_memcpy(op+8, src, srcSize);
return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
if (cctx->appliedParams.ldmParams.enableLdm) {
ZSTD_window_update(&cctx->ldmState.window, src, srcSize);
if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0);
ZSTD_window_update(&ms->window, src, srcSize);
{ /* Ensure large dictionaries can't cause index overflow */
/* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX.
* Dictionaries right at the edge will immediately trigger overflow
* correction, but I don't want to insert extra constraints here.
*/
U32 maxDictSize = ZSTD_CURRENT_MAX - ZSTD_WINDOW_START_INDEX;
int const CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(¶ms->cParams);
if (CDictTaggedIndices && tfp == ZSTD_tfp_forCDict) {
/* Some dictionary matchfinders in zstd use "short cache",
* which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each
* CDict hashtable entry as a tag rather than as part of an index.
* When short cache is used, we need to truncate the dictionary
* so that its indices don't overlap with the tag. */
U32 const shortCacheMaxDictSize = (1u << (32 - ZSTD_SHORT_CACHE_TAG_BITS)) - ZSTD_WINDOW_START_INDEX;
maxDictSize = MIN(maxDictSize, shortCacheMaxDictSize);
assert(!loadLdmDict);
}
/* If the dictionary is too large, only load the suffix of the dictionary. */
if (srcSize > maxDictSize) {
ip = iend - maxDictSize;
src = ip;
srcSize = maxDictSize;
} }
if (srcSize > ZSTD_CHUNKSIZE_MAX) {
/* We must have cleared our windows when our source is this large. */
assert(ZSTD_window_isEmpty(ms->window));
if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window));
}
DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder);
ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
if (params->ldmParams.enableLdm && ls != NULL)
ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, ¶ms->ldmParams);
switch(params->cParams.strategy)
{
case ZSTD_fast:
ZSTD_fillHashTable(ms, iend, dtlm, tfp);
break;
case ZSTD_dfast:
ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp);
break;
switch(params->cParams.strategy)
{
case ZSTD_fast:
ZSTD_fillHashTable(ms, ichunk, dtlm);
break;
case ZSTD_dfast:
ZSTD_fillDoubleHashTable(ms, ichunk, dtlm);
break;
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
if (chunk >= HASH_READ_SIZE && ms->dedicatedDictSearch) {
assert(chunk == remaining); /* must load everything in one go */
ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, ichunk-HASH_READ_SIZE);
} else if (chunk >= HASH_READ_SIZE) {
ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE);
case ZSTD_greedy:
case ZSTD_lazy:
case ZSTD_lazy2:
assert(srcSize >= HASH_READ_SIZE);
if (ms->dedicatedDictSearch) {
assert(ms->chainTable != NULL);
ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE);
} else {
assert(params->useRowMatchFinder != ZSTD_ps_auto);
if (params->useRowMatchFinder == ZSTD_ps_enable) {
size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16);
ZSTD_memset(ms->tagTable, 0, tagTableSize);
ZSTD_row_update(ms, iend-HASH_READ_SIZE);
DEBUGLOG(4, "Using row-based hash table for lazy dict");
} else {
ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
DEBUGLOG(4, "Using chain-based hash table for lazy dict");
case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
case ZSTD_btopt:
case ZSTD_btultra:
case ZSTD_btultra2:
if (chunk >= HASH_READ_SIZE)
ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk);
break;
case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
case ZSTD_btopt:
case ZSTD_btultra:
case ZSTD_btultra2:
assert(srcSize >= HASH_READ_SIZE);
ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
break;
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
ZSTD_CCtx_params const cctxParams =
ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, ¶ms);
ZSTD_CCtx_params cctxParams;
{ ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
}
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
{
#if ZSTD_TRACE
if (cctx->traceCtx && ZSTD_trace_compress_end != NULL) {
int const streaming = cctx->inBuffSize > 0 || cctx->outBuffSize > 0 || cctx->appliedParams.nbWorkers > 0;
ZSTD_Trace trace;
ZSTD_memset(&trace, 0, sizeof(trace));
trace.version = ZSTD_VERSION_NUMBER;
trace.streaming = streaming;
trace.dictionaryID = cctx->dictID;
trace.dictionarySize = cctx->dictContentSize;
trace.uncompressedSize = cctx->consumedSrcSize;
trace.compressedSize = cctx->producedCSize + extraCSize;
trace.params = &cctx->appliedParams;
trace.cctx = cctx;
ZSTD_trace_compress_end(cctx->traceCtx, &trace);
}
cctx->traceCtx = 0;
#else
(void)cctx;
(void)extraCSize;
#endif
}
}
static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
const ZSTD_parameters* params)
{
ZSTD_CCtx_params const cctxParams =
ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params);
DEBUGLOG(4, "ZSTD_compress_internal");
return ZSTD_compress_advanced_internal(cctx,
dst, dstCapacity,
src, srcSize,
dict, dictSize,
&cctxParams);
return ZSTD_compress_internal(cctx,
dst, dstCapacity,
src, srcSize,
dict, dictSize,
¶ms);
ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, ZSTD_NO_CLEVEL);
return ZSTD_compress_advanced_internal(cctx,
dst, dstCapacity,
src, srcSize,
dict, dictSize,
&cctx->simpleApiParams);
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, ¶ms);
{
ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
assert(params.fParams.contentSizeFlag == 1);
ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, ¶ms, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
}
assert(params.fParams.contentSizeFlag == 1);
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams);
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams);
+ ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0)
/* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small
* in case we are using DDS with row-hash. */
+ ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams),
/* enableDedicatedDictSearch */ 1, /* forCCtx */ 0)
cdict->compressionLevel = 0; /* signals advanced API usage */
cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
cdict->useRowMatchFinder = useRowMatchFinder;
size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0);
ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
/* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
/* ZSTD_compressBegin_usingCDict_advanced() :
* cdict must be != NULL */
size_t ZSTD_compressBegin_usingCDict_advanced(
/* ZSTD_compressBegin_usingCDict_internal() :
* Implementation of various ZSTD_compressBegin_usingCDict* functions.
*/
static size_t ZSTD_compressBegin_usingCDict_internal(
/* Increase window log to fit the entire dictionary and source if the
* source size is known. Limit the increase to 19, which is the
* window log for compression level 1 with the largest source size.
*/
if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog);
}
params.fParams = fParams;
return ZSTD_compressBegin_internal(cctx,
NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
cdict,
¶ms, pledgedSrcSize,
ZSTDb_not_buffered);
ZSTD_CCtxParams_init_internal(&cctxParams, ¶ms, cdict->compressionLevel);
}
/* Increase window log to fit the entire dictionary and source if the
* source size is known. Limit the increase to 19, which is the
* window log for compression level 1 with the largest source size.
*/
if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
/* ZSTD_compressBegin_usingCDict_advanced() :
* This function is DEPRECATED.
* cdict must be != NULL */
size_t ZSTD_compressBegin_usingCDict_advanced(
ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
{
return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize);
}
DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag);
return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
}
/*! ZSTD_compress_usingCDict_internal():
* Implementation of various ZSTD_compress_usingCDict* functions.
*/
static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
{
FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
if (hintInSize==0) hintInSize = cctx->blockSize;
return hintInSize;
if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
return cctx->blockSize - cctx->stableIn_notConsumed;
}
assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered);
{ size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
if (hintInSize==0) hintInSize = cctx->blockSize;
return hintInSize;
}
const char* const istart = (const char*)input->src;
const char* const iend = input->size != 0 ? istart + input->size : istart;
const char* ip = input->pos != 0 ? istart + input->pos : istart;
char* const ostart = (char*)output->dst;
char* const oend = output->size != 0 ? ostart + output->size : ostart;
char* op = output->pos != 0 ? ostart + output->pos : ostart;
const char* const istart = (assert(input != NULL), (const char*)input->src);
const char* const iend = (istart != NULL) ? istart + input->size : istart;
const char* ip = (istart != NULL) ? istart + input->pos : istart;
char* const ostart = (assert(output != NULL), (char*)output->dst);
char* const oend = (ostart != NULL) ? ostart + output->size : ostart;
char* op = (ostart != NULL) ? ostart + output->pos : ostart;
DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode);
DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%i, srcSize = %zu", (int)flushMode, input->size - input->pos);
assert(zcs != NULL);
if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) {
assert(input->pos >= zcs->stableIn_notConsumed);
input->pos -= zcs->stableIn_notConsumed;
ip -= zcs->stableIn_notConsumed;
zcs->stableIn_notConsumed = 0;
}
/* empty */
someMoreWork = 0; break;
}
} else {
assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable);
if ( (flushMode == ZSTD_e_continue)
&& ( (size_t)(iend - ip) < zcs->blockSize) ) {
/* can't compress a full block : stop here */
zcs->stableIn_notConsumed = (size_t)(iend - ip);
ip = iend; /* pretend to have consumed input */
someMoreWork = 0; break;
}
if ( (flushMode == ZSTD_e_flush)
&& (ip == iend) ) {
} else {
unsigned const lastBlock = (ip + iSize == iend);
assert(flushMode == ZSTD_e_end /* Already validated */);
} else { /* !inputBuffered, hence ZSTD_bm_stable */
unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip + iSize == iend);
if (expect.src != input->src || expect.pos != input->pos || expect.size != input->size)
RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer enabled but input differs!");
if (endOp != ZSTD_e_end)
RETURN_ERROR(srcBuffer_wrong, "ZSTD_c_stableInBuffer can only be used with ZSTD_e_end!");
if (expect.src != input->src || expect.pos != input->pos)
RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableInBuffer enabled but input differs!");
if (cctx->cdict)
params.compressionLevel = cctx->cdict->compressionLevel; /* let cdict take priority in terms of compression level */
if (cctx->cdict && !cctx->localDict.cdict) {
/* Let the cdict's compression level take priority over the requested params.
* But do not take the cdict's compression level if the "cdict" is actually a localDict
* generated from ZSTD_initLocalDict().
*/
params.compressionLevel = cctx->cdict->compressionLevel;
}
if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-fix pledgedSrcSize */
{
size_t const dictSize = prefixDict.dict
if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */
{ size_t const dictSize = prefixDict.dict
if (ZSTD_CParams_shouldEnableLdm(¶ms.cParams)) {
/* Enable LDM by default for optimal parser and window size >= 128MB */
DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)");
params.ldmParams.enableLdm = 1;
}
params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams);
params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams);
params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams);
params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences);
params.maxBlockSize = ZSTD_resolveMaxBlockSize(params.maxBlockSize);
params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(params.searchForExternalRepcodes, params.compressionLevel);
/* If external matchfinder is enabled, make sure to fail before checking job size (for consistency) */
RETURN_ERROR_IF(
params.useSequenceProducer == 1 && params.nbWorkers >= 1,
parameter_combination_unsupported,
"External sequence producer isn't supported with nbWorkers >= 1"
);
FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, input->size), "CompressStream2 initialization failed");
ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
size_t const inputSize = input->size - input->pos; /* no obligation to start from pos==0 */
size_t const totalInputSize = inputSize + cctx->stableIn_notConsumed;
if ( (cctx->requestedParams.inBufferMode == ZSTD_bm_stable) /* input is presumed stable, across invocations */
&& (endOp == ZSTD_e_continue) /* no flush requested, more input to come */
&& (totalInputSize < ZSTD_BLOCKSIZE_MAX) ) { /* not even reached one block yet */
if (cctx->stableIn_notConsumed) { /* not the first time */
/* check stable source guarantees */
RETURN_ERROR_IF(input->src != cctx->expectedInBuffer.src, stabilityCondition_notRespected, "stableInBuffer condition not respected: wrong src pointer");
RETURN_ERROR_IF(input->pos != cctx->expectedInBuffer.size, stabilityCondition_notRespected, "stableInBuffer condition not respected: externally modified pos");
}
/* pretend input was consumed, to give a sense forward progress */
input->pos = input->size;
/* save stable inBuffer, for later control, and flush/end */
cctx->expectedInBuffer = *input;
/* but actually input wasn't consumed, so keep track of position from where compression shall resume */
cctx->stableIn_notConsumed += inputSize;
/* don't initialize yet, wait for the first block of flush() order, for better parameters adaptation */
return ZSTD_FRAMEHEADERSIZE_MIN(cctx->requestedParams.format); /* at least some header to produce */
}
FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize), "compressStream2 initialization failed");
ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
if (cctx->stableIn_notConsumed) {
assert(cctx->appliedParams.inBufferMode == ZSTD_bm_stable);
/* some early data was skipped - make it available for consumption */
assert(input->pos >= cctx->stableIn_notConsumed);
input->pos -= cctx->stableIn_notConsumed;
cctx->stableIn_notConsumed = 0;
}
ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
ZSTD_inBuffer input = { src, srcSize, *srcPos };
ZSTD_outBuffer output;
ZSTD_inBuffer input;
output.dst = dst;
output.size = dstCapacity;
output.pos = *dstPos;
input.src = src;
input.size = srcSize;
input.pos = *srcPos;
size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
*dstPos = output.pos;
*srcPos = input.pos;
return cErr;
{ size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
*dstPos = output.pos;
*srcPos = input.pos;
return cErr;
}
typedef struct {
U32 idx; /* Index in array of ZSTD_Sequence */
U32 posInSequence; /* Position within sequence at idx */
size_t posInSrc; /* Number of bytes given by sequences provided so far */
} ZSTD_sequencePosition;
/* Returns a ZSTD error code if sequence is not valid */
static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength,
size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) {
size_t offsetBound;
U32 windowSize = 1 << windowLog;
/* posInSrc represents the amount of data the the decoder would decode up to this point.
/* ZSTD_validateSequence() :
* @offCode : is presumed to follow format required by ZSTD_storeSeq()
* @returns a ZSTD error code if sequence is not valid
*/
static size_t
ZSTD_validateSequence(U32 offCode, U32 matchLength, U32 minMatch,
size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer)
{
U32 const windowSize = 1u << windowLog;
/* posInSrc represents the amount of data the decoder would decode up to this point.
offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!");
RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small");
size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4;
RETURN_ERROR_IF(offCode > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!");
/* Validate maxNbSeq is large enough for the given matchLength and minMatch */
RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch");
static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) {
U32 offCode = rawOffset + ZSTD_REP_MOVE;
U32 repCode = 0;
static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
{
U32 offBase = OFFSET_TO_OFFBASE(rawOffset);
/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
* ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
*/
static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
const void* src, size_t blockSize) {
size_t
ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
ZSTD_sequencePosition* seqPos,
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
const void* src, size_t blockSize,
ZSTD_paramSwitch_e externalRepSearch)
{
for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
litLength = inSeqs[idx].litLength;
matchLength = inSeqs[idx].matchLength;
ll0 = litLength == 0;
offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) {
U32 const litLength = inSeqs[idx].litLength;
U32 const matchLength = inSeqs[idx].matchLength;
U32 offBase;
if (externalRepSearch == ZSTD_ps_disable) {
offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset);
} else {
U32 const ll0 = (litLength == 0);
offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
}
FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
cctx->appliedParams.cParams.windowLog, dictSize,
cctx->appliedParams.cParams.minMatch),
FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc,
cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.useSequenceProducer),
}
/* If we skipped repcode search while parsing, we need to update repcodes now */
assert(externalRepSearch != ZSTD_ps_auto);
assert(idx >= startIdx);
if (externalRepSearch == ZSTD_ps_disable && idx != startIdx) {
U32* const rep = updatedRepcodes.rep;
U32 lastSeqIdx = idx - 1; /* index of last non-block-delimiter sequence */
if (lastSeqIdx >= startIdx + 2) {
rep[2] = inSeqs[lastSeqIdx - 2].offset;
rep[1] = inSeqs[lastSeqIdx - 1].offset;
rep[0] = inSeqs[lastSeqIdx].offset;
} else if (lastSeqIdx == startIdx + 1) {
rep[2] = rep[0];
rep[1] = inSeqs[lastSeqIdx - 1].offset;
rep[0] = inSeqs[lastSeqIdx].offset;
} else {
assert(lastSeqIdx == startIdx);
rep[2] = rep[1];
rep[1] = rep[0];
rep[0] = inSeqs[lastSeqIdx].offset;
}
/* Returns the number of bytes to move the current read position back by. Only non-zero
* if we ended up splitting a sequence. Otherwise, it may return a ZSTD error if something
* went wrong.
*
* This function will attempt to scan through blockSize bytes represented by the sequences
* in inSeqs, storing any (partial) sequences.
*
* Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
* avoid splitting a match, or to avoid splitting a match such that it would produce a match
* smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
*/
static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
const void* src, size_t blockSize) {
size_t
ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch)
{
{ U32 ll0 = (litLength == 0);
offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
{ U32 const ll0 = (litLength == 0);
offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0);
ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
cctx->appliedParams.cParams.windowLog, dictSize,
cctx->appliedParams.cParams.minMatch),
FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc,
cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.useSequenceProducer),
DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
const void* src, size_t blockSize);
static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) {
const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
{
/* Discover the size of next block by searching for the delimiter.
* Note that a block delimiter **must** exist in this mode,
* otherwise it's an input error.
* The block size retrieved will be later compared to ensure it remains within bounds */
static size_t
blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos)
{
int end = 0;
size_t blockSize = 0;
size_t spos = seqPos.idx;
DEBUGLOG(6, "blockSize_explicitDelimiter : seq %zu / %zu", spos, inSeqsSize);
assert(spos <= inSeqsSize);
while (spos < inSeqsSize) {
end = (inSeqs[spos].offset == 0);
blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength;
if (end) {
if (inSeqs[spos].matchLength != 0)
RETURN_ERROR(externalSequences_invalid, "delimiter format error : both matchlength and offset must be == 0");
break;
}
spos++;
}
if (!end)
RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter");
return blockSize;
}
/* More a "target" block size */
static size_t blockSize_noDelimiter(size_t blockSize, size_t remaining)
{
int const lastBlock = (remaining <= blockSize);
return lastBlock ? remaining : blockSize;
}
static size_t determine_blockSize(ZSTD_sequenceFormat_e mode,
size_t blockSize, size_t remaining,
const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos)
{
DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining);
if (mode == ZSTD_sf_noBlockDelimiters)
return blockSize_noDelimiter(blockSize, remaining);
{ size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos);
FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters");
if (explicitBlockSize > blockSize)
RETURN_ERROR(externalSequences_invalid, "sequences incorrectly define a too large block");
if (explicitBlockSize > remaining)
RETURN_ERROR(externalSequences_invalid, "sequences define a frame longer than source");
return explicitBlockSize;
}
}
static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
const void* src, size_t srcSize) {
static size_t
ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
const void* src, size_t srcSize)
{
lastBlock = remaining <= cctx->blockSize;
blockSize = lastBlock ? (U32)remaining : (U32)cctx->blockSize;
size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters,
cctx->blockSize, remaining,
inSeqs, inSeqsSize, seqPos);
U32 const lastBlock = (blockSize == remaining);
FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size");
assert(blockSize <= remaining);
if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) {
/* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
* additional 1. We need to revisit and change this logic to be more consistent */
if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) {
compressedSeqsSize = ZSTD_entropyCompressSequences(&cctx->seqStore,
RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block");
compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore,
FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
DEBUGLOG(4, "Writing out nocompress block, size: %zu", cBlockSize);
FORWARD_IF_ERROR(cBlockSize, "ZSTD_noCompressBlock failed");
DEBUGLOG(5, "Writing out nocompress block, size: %zu", cBlockSize);
FORWARD_IF_ERROR(cBlockSize, "RLE compress block failed");
DEBUGLOG(4, "Writing out RLE block, size: %zu", cBlockSize);
FORWARD_IF_ERROR(cBlockSize, "ZSTD_rleCompressBlock failed");
DEBUGLOG(5, "Writing out RLE block, size: %zu", cBlockSize);
static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
{ /* "default" - for any srcSize > 256 KB */
/* W, C, H, S, L, TL, strat */
{ 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */
{ 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */
{ 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */
{ 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */
{ 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */
{ 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */
{ 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */
{ 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */
{ 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */
{ 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */
{ 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */
{ 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */
{ 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */
{ 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */
{ 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */
{ 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */
{ 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */
{ 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */
{ 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */
{ 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */
{ 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */
{ 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */
{ 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */
},
{ /* for srcSize <= 256 KB */
/* W, C, H, S, L, T, strat */
{ 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
{ 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */
{ 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */
{ 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */
{ 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/
{ 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/
{ 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/
{ 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */
{ 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
{ 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
{ 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
{ 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/
{ 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/
{ 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */
{ 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
{ 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/
{ 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/
{ 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/
{ 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/
{ 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
{ 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/
{ 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/
{ 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/
},
{ /* for srcSize <= 128 KB */
/* W, C, H, S, L, T, strat */
{ 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
{ 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */
{ 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */
{ 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */
{ 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */
{ 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */
{ 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */
{ 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */
{ 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
{ 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
{ 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
{ 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */
{ 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */
{ 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/
{ 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/
{ 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/
{ 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/
{ 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/
{ 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/
{ 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/
{ 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/
{ 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
{ 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/
},
{ /* for srcSize <= 16 KB */
/* W, C, H, S, L, T, strat */
{ 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */
{ 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */
{ 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */
{ 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */
{ 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */
{ 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/
{ 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */
{ 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */
{ 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/
{ 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/
{ 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/
{ 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/
{ 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/
{ 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/
{ 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/
{ 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/
{ 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/
{ 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/
{ 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/
{ 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/
{ 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/
{ 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/
{ 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/
},
};
int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; }
void ZSTD_registerSequenceProducer(
ZSTD_CCtx* zc, void* mState,
ZSTD_sequenceProducer_F* mFinder
) {
if (mFinder != NULL) {
ZSTD_externalMatchCtx emctx;
emctx.mState = mState;
emctx.mFinder = mFinder;
emctx.seqBuffer = NULL;
emctx.seqBufferCapacity = 0;
zc->externalMatchCtx = emctx;
zc->requestedParams.useSequenceProducer = 1;
} else {
ZSTD_memset(&zc->externalMatchCtx, 0, sizeof(zc->externalMatchCtx));
zc->requestedParams.useSequenceProducer = 0;
}
}
* Utils
* Required declarations
****************************************************************/
typedef struct nodeElt_s {
U32 count;
U16 parent;
BYTE byte;
BYTE nbBits;
} nodeElt;
/* **************************************************************
* Debug Traces
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
#if DEBUGLEVEL >= 2
static size_t showU32(const U32* arr, size_t size)
{
size_t u;
for (u=0; u<size; u++) {
RAWLOG(6, " %u", arr[u]); (void)arr;
}
RAWLOG(6, " \n");
return size;
}
static size_t HUF_getNbBits(HUF_CElt elt);
static size_t showCTableBits(const HUF_CElt* ctable, size_t size)
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
size_t u;
for (u=0; u<size; u++) {
RAWLOG(6, " %zu", HUF_getNbBits(ctable[u])); (void)ctable;
}
RAWLOG(6, " \n");
return size;
}
static size_t showHNodeSymbols(const nodeElt* hnode, size_t size)
{
size_t u;
for (u=0; u<size; u++) {
RAWLOG(6, " %u", hnode[u].byte); (void)hnode;
}
RAWLOG(6, " \n");
return size;
}
static size_t showHNodeBits(const nodeElt* hnode, size_t size)
{
size_t u;
for (u=0; u<size; u++) {
RAWLOG(6, " %u", hnode[u].nbBits); (void)hnode;
}
RAWLOG(6, " \n");
return size;
#define HUF_WORKSPACE_MAX_ALIGNMENT 8
static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align)
{
size_t const mask = align - 1;
size_t const rem = (size_t)workspace & mask;
size_t const add = (align - rem) & mask;
BYTE* const aligned = (BYTE*)workspace + add;
assert((align & (align - 1)) == 0); /* pow 2 */
assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT);
if (*workspaceSizePtr >= add) {
assert(add < align);
assert(((size_t)aligned & mask) == 0);
*workspaceSizePtr -= add;
return aligned;
} else {
*workspaceSizePtr = 0;
return NULL;
}
}
static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
typedef struct {
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
unsigned count[HUF_TABLELOG_MAX+1];
S16 norm[HUF_TABLELOG_MAX+1];
} HUF_CompressWeightsWksp;
static size_t
HUF_compressWeights(void* dst, size_t dstSize,
const void* weightTable, size_t wtSize,
void* workspace, size_t workspaceSize)
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, CTable) );
CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
/*! HUF_writeCTable() :
`CTable` : Huffman tree to save, using huf representation.
@return : size of saved CTable */
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
static size_t HUF_getValue(HUF_CElt elt)
{
return elt & ~(size_t)0xFF;
}
static size_t HUF_getValueFast(HUF_CElt elt)
{
return elt;
}
static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits)
{
assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX);
*elt = nbBits;
}
static void HUF_setValue(HUF_CElt* elt, size_t value)
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
if (maxDstSize < 1) return ERROR(dstSize_tooSmall);
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
* It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
* the tree to so that it is a valid canonical Huffman tree.
* It attempts to convert all nodes with nbBits > @targetNbBits
* to employ @targetNbBits instead. Then it adjusts the tree
* so that it remains a valid canonical Huffman tree.
/* early exit : no elt > maxNbBits, so the tree is already valid. */
if (largestBits <= maxNbBits) return largestBits;
/* early exit : no elt > targetNbBits, so the tree is already valid. */
if (largestBits <= targetNbBits) return largestBits;
DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)", targetNbBits);
/* n stops at huffNode[n].nbBits <= maxNbBits */
assert(huffNode[n].nbBits <= maxNbBits);
/* n end at index of smallest symbol using < maxNbBits */
while (huffNode[n].nbBits == maxNbBits) --n;
/* n stops at huffNode[n].nbBits <= targetNbBits */
assert(huffNode[n].nbBits <= targetNbBits);
/* n end at index of smallest symbol using < targetNbBits */
while (huffNode[n].nbBits == targetNbBits) --n;
/* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing.
* Strategy is to use as many buckets as possible for representing distinct
* counts while using the remainder to represent all "large" counts.
*
* To satisfy this requirement for 192 buckets, we can do the following:
* Let buckets 0-166 represent distinct counts of [0, 166]
* Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing.
*/
#define RANK_POSITION_MAX_COUNT_LOG 32
#define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */)
#define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */)
/* Return the appropriate bucket index for a given count. See definition of
* RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy.
*/
static U32 HUF_getIndex(U32 const count) {
return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF)
? count
: ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN;
}
/* Helper swap function for HUF_quickSortPartition() */
static void HUF_swapNodes(nodeElt* a, nodeElt* b) {
nodeElt tmp = *a;
*a = *b;
*b = tmp;
}
/* Returns 0 if the huffNode array is not sorted by descending count */
MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) {
U32 i;
for (i = 1; i < maxSymbolValue1; ++i) {
if (huffNode[i].count > huffNode[i-1].count) {
return 0;
}
}
return 1;
}
/* Insertion sort by descending order */
HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) {
int i;
int const size = high-low+1;
huffNode += low;
for (i = 1; i < size; ++i) {
nodeElt const key = huffNode[i];
int j = i - 1;
while (j >= 0 && huffNode[j].count < key.count) {
huffNode[j + 1] = huffNode[j];
j--;
}
huffNode[j + 1] = key;
}
}
/* Pivot helper function for quicksort. */
static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) {
/* Simply select rightmost element as pivot. "Better" selectors like
* median-of-three don't experimentally appear to have any benefit.
*/
U32 const pivot = arr[high].count;
int i = low - 1;
int j = low;
for ( ; j < high; j++) {
if (arr[j].count > pivot) {
i++;
HUF_swapNodes(&arr[i], &arr[j]);
}
}
HUF_swapNodes(&arr[i + 1], &arr[high]);
return i + 1;
}
/* Classic quicksort by descending with partially iterative calls
* to reduce worst case callstack size.
*/
static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) {
int const kInsertionSortThreshold = 8;
if (high - low < kInsertionSortThreshold) {
HUF_insertionSort(arr, low, high);
return;
}
while (low < high) {
int const idx = HUF_quickSortPartition(arr, low, high);
if (idx - low < high - idx) {
HUF_simpleQuickSort(arr, low, idx - 1);
low = idx + 1;
} else {
HUF_simpleQuickSort(arr, idx + 1, high);
high = idx - 1;
}
}
}
static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
{
int n;
int const maxSymbolValue1 = (int)maxSymbolValue + 1;
static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) {
U32 n;
U32 const maxSymbolValue1 = maxSymbolValue+1;
* For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.
* Then 2^lowerRank <= count[n]+1 <= 2^rank.
* For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1.
* See HUF_getIndex to see bucketing strategy.
U32 const r = BIT_highbit32(c+1) + 1;
U32 pos = rankPosition[r].curr++;
/* Insert into the correct position in the rank.
* We have at most 256 symbols, so this insertion should be fine.
*/
while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
huffNode[pos] = huffNode[pos-1];
pos--;
}
U32 const r = HUF_getIndex(c) + 1;
U32 const pos = rankPosition[r].curr++;
assert(pos < maxSymbolValue1);
}
/* Sort each bucket. */
for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
int const bucketSize = rankPosition[n].curr - rankPosition[n].base;
U32 const bucketStartIdx = rankPosition[n].base;
if (bucketSize > 1) {
assert(bucketStartIdx < maxSymbolValue1);
HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1);
}
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
size_t
HUF_buildCTable_wksp(HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
void* workSpace, size_t wkspSize)
/** HUF_CStream_t:
* Huffman uses its own BIT_CStream_t implementation.
* There are three major differences from BIT_CStream_t:
* 1. HUF_addBits() takes a HUF_CElt (size_t) which is
* the pair (nbBits, value) in the format:
* format:
* - Bits [0, 4) = nbBits
* - Bits [4, 64 - nbBits) = 0
* - Bits [64 - nbBits, 64) = value
* 2. The bitContainer is built from the upper bits and
* right shifted. E.g. to add a new value of N bits
* you right shift the bitContainer by N, then or in
* the new value into the N upper bits.
* 3. The bitstream has two bit containers. You can add
* bits to the second container and merge them into
* the first container.
*/
#define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8)
typedef struct {
size_t bitContainer[2];
size_t bitPos[2];
BYTE* startPtr;
BYTE* ptr;
BYTE* endPtr;
} HUF_CStream_t;
/**! HUF_initCStream():
* Initializes the bitstream.
* @returns 0 or an error code.
*/
static size_t HUF_initCStream(HUF_CStream_t* bitC,
void* startPtr, size_t dstCapacity)
{
ZSTD_memset(bitC, 0, sizeof(*bitC));
bitC->startPtr = (BYTE*)startPtr;
bitC->ptr = bitC->startPtr;
bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]);
if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall);
return 0;
}
/*! HUF_addBits():
* Adds the symbol stored in HUF_CElt elt to the bitstream.
*
* @param elt The element we're adding. This is a (nbBits, value) pair.
* See the HUF_CStream_t docs for the format.
* @param idx Insert into the bitstream at this idx.
* @param kFast This is a template parameter. If the bitstream is guaranteed
* to have at least 4 unused bits after this call it may be 1,
* otherwise it must be 0. HUF_addBits() is faster when fast is set.
*/
FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast)
{
assert(idx <= 1);
assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX);
/* This is efficient on x86-64 with BMI2 because shrx
* only reads the low 6 bits of the register. The compiler
* knows this and elides the mask. When fast is set,
* every operation can use the same value loaded from elt.
*/
bitC->bitContainer[idx] >>= HUF_getNbBits(elt);
bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt);
/* We only read the low 8 bits of bitC->bitPos[idx] so it
* doesn't matter that the high bits have noise from the value.
*/
bitC->bitPos[idx] += HUF_getNbBitsFast(elt);
assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
/* The last 4-bits of elt are dirty if fast is set,
* so we must not be overwriting bits that have already been
* inserted into the bit container.
*/
#if DEBUGLEVEL >= 1
{
size_t const nbBits = HUF_getNbBits(elt);
size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1;
(void)dirtyBits;
/* Middle bits are 0. */
assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0);
/* We didn't overwrite any bits in the bit container. */
assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
(void)dirtyBits;
}
#endif
}
FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC)
{
bitC->bitContainer[1] = 0;
bitC->bitPos[1] = 0;
}
/*! HUF_mergeIndex1() :
* Merges the bit container @ index 1 into the bit container @ index 0
* and zeros the bit container @ index 1.
*/
FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC)
{
assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER);
bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF);
bitC->bitContainer[0] |= bitC->bitContainer[1];
bitC->bitPos[0] += bitC->bitPos[1];
assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER);
}
/*! HUF_flushBits() :
* Flushes the bits in the bit container @ index 0.
*
* @post bitPos will be < 8.
* @param kFast If kFast is set then we must know a-priori that
* the bit container will not overflow.
*/
FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast)
{
/* The upper bits of bitPos are noisy, so we must mask by 0xFF. */
size_t const nbBits = bitC->bitPos[0] & 0xFF;
size_t const nbBytes = nbBits >> 3;
/* The top nbBits bits of bitContainer are the ones we need. */
size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits);
/* Mask bitPos to account for the bytes we consumed. */
bitC->bitPos[0] &= 7;
assert(nbBits > 0);
assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8);
assert(bitC->ptr <= bitC->endPtr);
MEM_writeLEST(bitC->ptr, bitContainer);
bitC->ptr += nbBytes;
assert(!kFast || bitC->ptr <= bitC->endPtr);
if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
/* bitContainer doesn't need to be modified because the leftover
* bits are already the top bitPos bits. And we don't care about
* noise in the lower values.
*/
}
/*! HUF_endMark()
* @returns The Huffman stream end mark: A 1-bit value = 1.
*/
static HUF_CElt HUF_endMark(void)
{
HUF_CElt endMark;
HUF_setNbBits(&endMark, 1);
HUF_setValue(&endMark, 1);
return endMark;
}
/*! HUF_closeCStream() :
* @return Size of CStream, in bytes,
* or 0 if it could not fit into dstBuffer */
static size_t HUF_closeCStream(HUF_CStream_t* bitC)
{
HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0);
HUF_flushBits(bitC, /* kFast */ 0);
{
size_t const nbBits = bitC->bitPos[0] & 0xFF;
if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0);
}
}
#define HUF_FLUSHBITS(s) BIT_flushBits(s)
FORCE_INLINE_TEMPLATE void
HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
const BYTE* ip, size_t srcSize,
const HUF_CElt* ct,
int kUnroll, int kFastFlush, int kLastFast)
{
/* Join to kUnroll */
int n = (int)srcSize;
int rem = n % kUnroll;
if (rem > 0) {
for (; rem > 0; --rem) {
HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0);
}
HUF_flushBits(bitC, kFastFlush);
}
assert(n % kUnroll == 0);
#define HUF_FLUSHBITS_1(stream) \
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
/* Join to 2 * kUnroll */
if (n % (2 * kUnroll)) {
int u;
for (u = 1; u < kUnroll; ++u) {
HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1);
}
HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast);
HUF_flushBits(bitC, kFastFlush);
n -= kUnroll;
}
assert(n % (2 * kUnroll) == 0);
#define HUF_FLUSHBITS_2(stream) \
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
for (; n>0; n-= 2 * kUnroll) {
/* Encode kUnroll symbols into the bitstream @ index 0. */
int u;
for (u = 1; u < kUnroll; ++u) {
HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1);
}
HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast);
HUF_flushBits(bitC, kFastFlush);
/* Encode kUnroll symbols into the bitstream @ index 1.
* This allows us to start filling the bit container
* without any data dependencies.
*/
HUF_zeroIndex1(bitC);
for (u = 1; u < kUnroll; ++u) {
HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1);
}
HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast);
/* Merge bitstream @ index 1 into the bitstream @ index 0 */
HUF_mergeIndex1(bitC);
HUF_flushBits(bitC, kFastFlush);
}
assert(n == 0);
}
/**
* Returns a tight upper bound on the output space needed by Huffman
* with 8 bytes buffer to handle over-writes. If the output is at least
* this large we don't need to do bounds checks during Huffman encoding.
*/
static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog)
{
return ((srcSize * tableLog) >> 3) + 8;
}
n = srcSize & ~3; /* join to mod 4 */
switch (srcSize & 3)
{
case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
HUF_FLUSHBITS_2(&bitC);
/* fall-through */
case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
HUF_FLUSHBITS_1(&bitC);
/* fall-through */
case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
HUF_FLUSHBITS(&bitC);
/* fall-through */
case 0 : /* fall-through */
default: break;
}
for (; n>0; n-=4) { /* note : n&3==0 at this stage */
HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
HUF_FLUSHBITS_1(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
HUF_FLUSHBITS_2(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
HUF_FLUSHBITS_1(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
HUF_FLUSHBITS(&bitC);
if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0);
else {
if (MEM_32bits()) {
switch (tableLog) {
case 11:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0);
break;
case 10: ZSTD_FALLTHROUGH;
case 9: ZSTD_FALLTHROUGH;
case 8:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1);
break;
case 7: ZSTD_FALLTHROUGH;
default:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1);
break;
}
} else {
switch (tableLog) {
case 11:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0);
break;
case 10:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1);
break;
case 9:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0);
break;
case 8:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0);
break;
case 7:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0);
break;
case 6: ZSTD_FALLTHROUGH;
default:
HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1);
break;
}
}
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize==0) return 0;
assert(cSize <= 65535);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
if (cSize == 0 || cSize > 65535) return 0;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize==0) return 0;
assert(cSize <= 65535);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
if (cSize == 0 || cSize > 65535) return 0;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) );
if (cSize==0) return 0;
assert(cSize <= 65535);
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
if (cSize == 0 || cSize > 65535) return 0;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) );
if (cSize==0) return 0;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) );
if (cSize == 0 || cSize > 65535) return 0;
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) :
HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2);
HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) :
HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags);
HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
HUF_buildCTable_wksp_tables buildCTable_wksp;
HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)];
union {
HUF_buildCTable_wksp_tables buildCTable_wksp;
HUF_WriteCTableWksp writeCTable_wksp;
U32 hist_wksp[HIST_WKSP_SIZE_U32];
} wksps;
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue)
{
unsigned cardinality = 0;
unsigned i;
for (i = 0; i < maxSymbolValue + 1; i++) {
if (count[i] != 0) cardinality += 1;
}
return cardinality;
}
unsigned HUF_minTableLog(unsigned symbolCardinality)
{
U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1;
return minBitsSymbols;
}
unsigned HUF_optimalTableLog(
unsigned maxTableLog,
size_t srcSize,
unsigned maxSymbolValue,
void* workSpace, size_t wkspSize,
HUF_CElt* table,
const unsigned* count,
int flags)
{
assert(srcSize > 1); /* Not supported, RLE should be used instead */
assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables));
if (!(flags & HUF_flags_optimalDepth)) {
/* cheap evaluation, based on FSE */
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
}
{ BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp);
size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp);
size_t maxBits, hSize, newSize;
const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue);
const unsigned minTableLog = HUF_minTableLog(symbolCardinality);
size_t optSize = ((size_t) ~0) - 1;
unsigned optLog = maxTableLog, optLogGuess;
DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize);
/* Search until size increases */
for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) {
DEBUGLOG(7, "checking for huffLog=%u", optLogGuess);
maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize);
if (ERR_isError(maxBits)) continue;
if (maxBits < optLogGuess && optLogGuess > minTableLog) break;
hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize);
if (ERR_isError(hSize)) continue;
newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize;
if (newSize > optSize + 1) {
break;
}
if (newSize < optSize) {
optSize = newSize;
optLog = optLogGuess;
}
}
assert(optLog <= HUF_TABLELOG_MAX);
return optLog;
}
}
HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
assert(((size_t)workSpace_align4 & 3) == 0); /* must be aligned on 4-bytes boundaries */
DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)", srcSize);
HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
nbStreams, oldHufTable, bmi2);
nbStreams, oldHufTable, flags);
}
/* If uncompressible data is suspected, do a smaller sampling first */
DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2);
if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
size_t largestTotal = 0;
DEBUGLOG(5, "input suspected incompressible : sampling to check");
{ unsigned maxSymbolValueBegin = maxSymbolValue;
CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
largestTotal += largestBegin;
}
{ unsigned maxSymbolValueEnd = maxSymbolValue;
CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
largestTotal += largestEnd;
}
if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */
{ CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
{ CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) );
/* Zero unused symbols in CTable, so we can check it for validity */
ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1));
}
/* Zero unused symbols in CTable, so we can check it for validity */
{
size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue);
size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt);
ZSTD_memset(table->CTable + ctableSize, 0, unusedSize);
{ CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) );
{ CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
&table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
nbStreams, table->CTable, bmi2);
}
size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize)
{
return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_singleStream,
workSpace, wkspSize,
NULL, NULL, 0, 0 /*bmi2*/);
nbStreams, table->CTable, flags);
repeat, preferRepeat, bmi2);
}
/* HUF_compress4X_repeat():
* compress input using 4 streams.
* provide workspace to generate compression tables */
size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize)
{
return HUF_compress_internal(dst, dstSize, src, srcSize,
maxSymbolValue, huffLog, HUF_fourStreams,
workSpace, wkspSize,
NULL, NULL, 0, 0 /*bmi2*/);
repeat, flags);
hufTable, repeat, preferRepeat, bmi2);
}
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
/** HUF_buildCTable() :
* @return : maxNbBits
* Note : count is used before tree is written, so they can safely overlap
*/
size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
{
HUF_buildCTable_wksp_tables workspace;
return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace));
}
size_t HUF_compress1X (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog)
{
unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
}
size_t HUF_compress2 (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog)
{
unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
}
size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize)
{
return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
hufTable, repeat, flags);
U32* cumul = (U32*)workSpace;
FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2));
U16* cumul = (U16*)workSpace; /* size = maxSV1 */
FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */
{ U32 position = 0;
if (highThreshold == tableSize - 1) {
/* Case for no low prob count symbols. Lay down 8 bytes at a time
* to reduce branch misses since we are operating on a small block
*/
BYTE* const spread = tableSymbol + tableSize; /* size = tableSize + 8 (may write beyond tableSize) */
{ U64 const add = 0x0101010101010101ull;
size_t pos = 0;
U64 sv = 0;
U32 s;
for (s=0; s<maxSV1; ++s, sv += add) {
int i;
int const n = normalizedCounter[s];
MEM_write64(spread + pos, sv);
for (i = 8; i < n; i += 8) {
MEM_write64(spread + pos + i, sv);
}
assert(n>=0);
pos += (size_t)n;
}
}
/* Spread symbols across the table. Lack of lowprob symbols means that
* we don't need variable sized inner loop, so we can unroll the loop and
* reduce branch misses.
*/
{ size_t position = 0;
size_t s;
size_t const unroll = 2; /* Experimentally determined optimal unroll */
assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
for (s = 0; s < (size_t)tableSize; s += unroll) {
size_t u;
for (u = 0; u < unroll; ++u) {
size_t const uPosition = (position + (u * step)) & tableMask;
tableSymbol[uPosition] = spread[s + u];
}
position = (position + (unroll * step)) & tableMask;
}
assert(position == 0); /* Must have initialized all positions */
}
} else {
U32 position = 0;
{
U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
assert(normalizedCounter[s] > 1);
{ U32 const maxBitsOut = tableLog - ZSTD_highbit32 ((U32)normalizedCounter[s]-1);
U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut;
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */
return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol));
}
#endif
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog
+ 4 /* bitCount initialized at 4 */
+ 2 /* first two symbols may use one additional bit each */) / 8)
+ 1 /* round up to whole nb bytes */
+ 2 /* additional two bytes for bitstream flush */;
FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog)
{
size_t size;
if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
return (FSE_CTable*)ZSTD_malloc(size);
}
void FSE_freeCTable (FSE_CTable* ct) { ZSTD_free(ct); }
/* fake FSE_CTable, for raw (uncompressed) input */
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
{
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSymbolValue = tableMask;
void* const ptr = ct;
U16* const tableU16 = ( (U16*) ptr) + 2;
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* header */
tableU16[-2] = (U16) nbBits;
tableU16[-1] = (U16) maxSymbolValue;
/* Build table */
for (s=0; s<tableSize; s++)
tableU16[s] = (U16)(tableSize + s);
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
/* FSE_compress_wksp() :
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
* `wkspSize` size must be `(1<<tableLog)`.
*/
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const oend = ostart + dstSize;
unsigned count[FSE_MAX_SYMBOL_VALUE+1];
S16 norm[FSE_MAX_SYMBOL_VALUE+1];
FSE_CTable* CTable = (FSE_CTable*)workSpace;
size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
void* scratchBuffer = (void*)(CTable + CTableSize);
size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
/* init conditions */
if (wkspSize < FSE_COMPRESS_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
if (srcSize <= 1) return 0; /* Not compressible */
if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
/* Scan input and build symbol stats */
{ CHECK_V_F(maxCount, HIST_count_wksp(count, &maxSymbolValue, src, srcSize, scratchBuffer, scratchBufferSize) );
if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */
}
tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue, /* useLowProbCount */ srcSize >= 2048) );
/* Write table description header */
{ CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
op += nc_err;
}
/* Compress */
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
if (cSize == 0) return 0; /* not enough space for compressed data */
op += cSize;
}
/* check compressibility */
if ( (size_t)(op-ostart) >= srcSize-1 ) return 0;
return op-ostart;
}
typedef struct {
FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
union {
U32 hist_wksp[HIST_WKSP_SIZE_U32];
BYTE scratchBuffer[1 << FSE_MAX_TABLELOG];
} workspace;
} fseWkspMax_t;
size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog)
{
fseWkspMax_t scratchBuffer;
DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_COMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer));
}
size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
{
return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG);
}
#endif
/* weak symbol support */
#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && defined(__GNUC__) && \
/* weak symbol support
* For now, enable conservatively:
* - Only GNUC
* - Only ELF
* - Only x86-64, i386 and aarch64
* Also, explicitly disable on platforms known not to work so they aren't
* forgotten in the future.
*/
#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \
defined(__GNUC__) && defined(__ELF__) && \
(defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86) || defined(__aarch64__)) && \
/**
* Ignore: this is an internal helper.
*
* This is a helper function to help force C99-correctness during compilation.
* Under strict compilation modes, variadic macro arguments can't be empty.
* However, variadic function arguments can be. Using a function therefore lets
* us statically check that at least one (string) argument was passed,
* independent of the compilation flags.
*/
static INLINE_KEYWORD UNUSED_ATTR
void _force_has_format_string(const char *format, ...) {
(void)format;
}
/**
* Ignore: this is an internal helper.
*
* We want to force this function invocation to be syntactically correct, but
* we don't want to force runtime evaluation of its arguments.
*/
#define _FORCE_HAS_FORMAT_STRING(...) \
if (0) { \
_force_has_format_string(__VA_ARGS__); \
}
/**
* Return the specified error if the condition evaluates to true.
*
* In debug modes, prints additional information.
* In order to do that (particularly, printing the conditional that failed),
* this can't just wrap RETURN_ERROR().
*/
#define RETURN_ERROR_IF(cond, err, ...) \
if (cond) { \
RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
__FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return ERROR(err); \
}
/**
* Unconditionally return the specified error.
*
* In debug modes, prints additional information.
*/
#define RETURN_ERROR(err, ...) \
do { \
RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
__FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return ERROR(err); \
} while(0);
/**
* If the provided expression evaluates to an error code, returns that error code.
*
* In debug modes, prints additional information.
*/
#define FORWARD_IF_ERROR(err, ...) \
do { \
size_t const err_code = (err); \
if (ERR_isError(err_code)) { \
RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
__FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return err_code; \
} \
} while(0);
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */) /* for a non-null block */
#define MIN_LITERALS_FOR_4_STREAMS 6
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
/* Need to use memmove here since the literal buffer can now be located within
the dst buffer. In circumstances where the op "catches up" to where the
literal buffer is, there can be partial overlaps in this call on the final
copy if the literal is being shifted by less than 16 bytes. */
/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */
typedef enum {
ZSTD_llt_none = 0, /* no longLengthType */
ZSTD_llt_literalLength = 1, /* represents a long literal */
ZSTD_llt_matchLength = 2 /* represents a long match */
} ZSTD_longLengthType_e;
U32 longLengthID; /* 0 == no longLength; 1 == Represent the long literal; 2 == Represent the long match; */
U32 longLengthPos; /* Index of the sequence to apply long length modification to */
ZSTD_longLengthType_e longLengthType;
U32 longLengthPos; /* Index of the sequence to apply long length modification to */
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
int ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */
MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */
{
assert(val != 0);
{
# if defined(_MSC_VER) /* Visual */
# if STATIC_BMI2 == 1
return _lzcnt_u32(val)^31;
# else
unsigned long r=0;
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
# endif
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# elif defined(__ICCARM__) /* IAR Intrinsic */
return 31 - __CLZ(val);
# else /* Software version */
static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];
# endif
}
}
* You can contact the author at :
* - xxHash source repository : https://github.com/Cyan4973/xxHash
*
* You can contact the author at :
* - xxHash homepage: https://cyan4973.github.io/xxHash/
* - xxHash source repository : https://github.com/Cyan4973/xxHash
*
/* Notice extracted from xxHash homepage :
#ifndef XXH_NO_XXH3
# define XXH_NO_XXH3
#endif
#ifndef XXH_NAMESPACE
# define XXH_NAMESPACE ZSTD_
#endif
/*!
* @mainpage xxHash
*
* @file xxhash.h
* xxHash prototypes and implementation
*/
/* TODO: update */
/* Notice extracted from xxHash homepage:
* Definitions
******************************/
#include "zstd_deps.h"
typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
/* ****************************
* API modifier
******************************/
/** XXH_PRIVATE_API
* This is useful if you want to include xxhash functions in `static` mode
* in order to inline them, and remove their symbol from the public list.
* Methodology :
* #define XXH_PRIVATE_API
* #include "xxhash.h"
* `xxhash.c` is automatically included.
* It's not useful to compile and link it as a separate module anymore.
*/
#ifdef XXH_PRIVATE_API
# ifndef XXH_STATIC_LINKING_ONLY
# define XXH_STATIC_LINKING_ONLY
# endif
* INLINE mode
******************************/
/*!
* XXH_INLINE_ALL (and XXH_PRIVATE_API)
* Use these build macros to inline xxhash into the target unit.
* Inlining improves performance on small inputs, especially when the length is
* expressed as a compile-time constant:
*
* https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
*
* It also keeps xxHash symbols private to the unit, so they are not exported.
*
* Usage:
* #define XXH_INLINE_ALL
* #include "xxhash.h"
*
* Do not compile and link xxhash.o as a separate object, as it is not useful.
*/
#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
&& !defined(XXH_INLINE_ALL_31684351384)
/* this section should be traversed only once */
# define XXH_INLINE_ALL_31684351384
/* give access to the advanced API, required to compile implementations */
# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
# define XXH_STATIC_LINKING_ONLY
/* make all functions private */
# undef XXH_PUBLIC_API
/*!XXH_NAMESPACE, aka Namespace Emulation :
/*
* This part deals with the special case where a unit wants to inline xxHash,
* but "xxhash.h" has previously been included without XXH_INLINE_ALL,
* such as part of some previously included *.h header file.
* Without further action, the new include would just be ignored,
* and functions would effectively _not_ be inlined (silent failure).
* The following macros solve this situation by prefixing all inlined names,
* avoiding naming collision with previous inclusions.
*/
/* Before that, we unconditionally #undef all symbols,
* in case they were already defined with XXH_NAMESPACE.
* They will then be redefined for XXH_INLINE_ALL
*/
# undef XXH_versionNumber
/* XXH32 */
# undef XXH32
# undef XXH32_createState
# undef XXH32_freeState
# undef XXH32_reset
# undef XXH32_update
# undef XXH32_digest
# undef XXH32_copyState
# undef XXH32_canonicalFromHash
# undef XXH32_hashFromCanonical
/* XXH64 */
# undef XXH64
# undef XXH64_createState
# undef XXH64_freeState
# undef XXH64_reset
# undef XXH64_update
# undef XXH64_digest
# undef XXH64_copyState
# undef XXH64_canonicalFromHash
# undef XXH64_hashFromCanonical
/* XXH3_64bits */
# undef XXH3_64bits
# undef XXH3_64bits_withSecret
# undef XXH3_64bits_withSeed
# undef XXH3_64bits_withSecretandSeed
# undef XXH3_createState
# undef XXH3_freeState
# undef XXH3_copyState
# undef XXH3_64bits_reset
# undef XXH3_64bits_reset_withSeed
# undef XXH3_64bits_reset_withSecret
# undef XXH3_64bits_update
# undef XXH3_64bits_digest
# undef XXH3_generateSecret
/* XXH3_128bits */
# undef XXH128
# undef XXH3_128bits
# undef XXH3_128bits_withSeed
# undef XXH3_128bits_withSecret
# undef XXH3_128bits_reset
# undef XXH3_128bits_reset_withSeed
# undef XXH3_128bits_reset_withSecret
# undef XXH3_128bits_reset_withSecretandSeed
# undef XXH3_128bits_update
# undef XXH3_128bits_digest
# undef XXH128_isEqual
# undef XXH128_cmp
# undef XXH128_canonicalFromHash
# undef XXH128_hashFromCanonical
/* Finally, free the namespace itself */
# undef XXH_NAMESPACE
If you want to include _and expose_ xxHash functions from within your own library,
but also want to avoid symbol collisions with another library which also includes xxHash,
/* employ the namespace for XXH_INLINE_ALL */
# define XXH_NAMESPACE XXH_INLINE_
/*
* Some identifiers (enums, type names) are not symbols,
* but they must nonetheless be renamed to avoid redeclaration.
* Alternative solution: do not redeclare them.
* However, this requires some #ifdefs, and has a more dispersed impact.
* Meanwhile, renaming can be achieved in a single place.
*/
# define XXH_IPREF(Id) XXH_NAMESPACE ## Id
# define XXH_OK XXH_IPREF(XXH_OK)
# define XXH_ERROR XXH_IPREF(XXH_ERROR)
# define XXH_errorcode XXH_IPREF(XXH_errorcode)
# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
# define XXH32_state_s XXH_IPREF(XXH32_state_s)
# define XXH32_state_t XXH_IPREF(XXH32_state_t)
# define XXH64_state_s XXH_IPREF(XXH64_state_s)
# define XXH64_state_t XXH_IPREF(XXH64_state_t)
# define XXH3_state_s XXH_IPREF(XXH3_state_s)
# define XXH3_state_t XXH_IPREF(XXH3_state_t)
# define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
/* Ensure the header is parsed again, even if it was previously included */
# undef XXHASH_H_5627135585666179
# undef XXHASH_H_STATIC_13879238742
#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values).
/* ****************************************************************
* Stable API
*****************************************************************/
#ifndef XXHASH_H_5627135585666179
#define XXHASH_H_5627135585666179 1
Note that no change is required within the calling program as long as it includes `xxhash.h` :
regular symbol name will be automatically translated by this header.
*/
/*!
* @defgroup public Public API
* Contains details on the public xxHash functions.
* @{
*/
/* specific declaration modes for Windows */
#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
# ifdef XXH_EXPORT
# define XXH_PUBLIC_API __declspec(dllexport)
# elif XXH_IMPORT
# define XXH_PUBLIC_API __declspec(dllimport)
# endif
# else
# define XXH_PUBLIC_API /* do nothing */
# endif
#endif
#ifdef XXH_DOXYGEN
/*!
* @brief Emulate a namespace by transparently prefixing all symbols.
*
* If you want to include _and expose_ xxHash functions from within your own
* library, but also want to avoid symbol collisions with other libraries which
* may also include xxHash, you can use XXH_NAMESPACE to automatically prefix
* any public symbol from xxhash library with the value of XXH_NAMESPACE
* (therefore, avoid empty or numeric values).
*
* Note that no change is required within the calling program as long as it
* includes `xxhash.h`: Regular symbol names will be automatically translated
* by this header.
*/
# define XXH_NAMESPACE /* YOUR NAME HERE */
# undef XXH_NAMESPACE
#endif
/* XXH64 */
# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
/* XXH3_64bits */
# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
/* XXH3_128bits */
# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
/*-**********************************************************************
* 32-bit hash
************************************************************************/
#if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
/*!
* @brief An unsigned 32-bit integer.
*
* Not necessarily defined to `uint32_t` but functionally equivalent.
*/
typedef uint32_t XXH32_hash_t;
#elif !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint32_t XXH32_hash_t;
#else
# include <limits.h>
# if UINT_MAX == 0xFFFFFFFFUL
typedef unsigned int XXH32_hash_t;
# else
# if ULONG_MAX == 0xFFFFFFFFUL
typedef unsigned long XXH32_hash_t;
# else
# error "unsupported platform: need a 32-bit type"
# endif
# endif
#endif
XXH32() :
Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
The memory between input & input+length must be valid (allocated and read-accessible).
"seed" can be used to alter the result predictably.
Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
XXH64() :
Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
"seed" can be used to alter the result predictably.
This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
*/
* @}
*
* @defgroup xxh32_family XXH32 family
* @ingroup public
* Contains functions used in the classic 32-bit xxHash algorithm.
*
* @note
* XXH32 is useful for older platforms, with no or poor 64-bit performance.
* Note that @ref xxh3_family provides competitive speed
* for both 32-bit and 64-bit systems, and offers true 64/128 bit hash results.
*
* @see @ref xxh64_family, @ref xxh3_family : Other xxHash families
* @see @ref xxh32_impl for implementation details
* @{
*/
/*!
* @brief Calculates the 32-bit hash of @p input using xxHash32.
*
* Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s
*
* @param input The block of data to be hashed, at least @p length bytes in size.
* @param length The length of @p input, in bytes.
* @param seed The 32-bit seed to alter the hash's output predictably.
*
* @pre
* The memory between @p input and @p input + @p length must be valid,
* readable, contiguous memory. However, if @p length is `0`, @p input may be
* `NULL`. In C++, this also must be *TriviallyCopyable*.
*
* @return The calculated 32-bit hash value.
*
* @see
* XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
* Direct equivalents for the other variants of xxHash.
* @see
* XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version.
*/
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
/* ****************************
* Streaming Hash Functions
******************************/
typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
/*!
* Streaming functions generate the xxHash value from an incremental input.
* This method is slower than single-call functions, due to state management.
* For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
*
* An XXH state must first be allocated using `XXH*_createState()`.
*
* Start a new hash by initializing the state with a seed using `XXH*_reset()`.
*
* Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
*
* The function returns an error code, with 0 meaning OK, and any other value
* meaning there is an error.
*
* Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
* This function returns the nn-bits hash as an int or long long.
*
* It's still possible to continue inserting input into the hash state after a
* digest, and generate new hash values later on by invoking `XXH*_digest()`.
*
* When done, release the state using `XXH*_freeState()`.
*
* Example code for incrementally hashing a file:
* @code{.c}
* #include <stdio.h>
* #include <xxhash.h>
* #define BUFFER_SIZE 256
*
* // Note: XXH64 and XXH3 use the same interface.
* XXH32_hash_t
* hashFile(FILE* stream)
* {
* XXH32_state_t* state;
* unsigned char buf[BUFFER_SIZE];
* size_t amt;
* XXH32_hash_t hash;
*
* state = XXH32_createState(); // Create a state
* assert(state != NULL); // Error check here
* XXH32_reset(state, 0xbaad5eed); // Reset state with our seed
* while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) {
* XXH32_update(state, buf, amt); // Hash the file in chunks
* }
* hash = XXH32_digest(state); // Finalize the hash
* XXH32_freeState(state); // Clean up
* return hash;
* }
* @endcode
*/
/*! State allocation, compatible with dynamic libraries */
/*!
* @typedef struct XXH32_state_s XXH32_state_t
* @brief The opaque state struct for the XXH32 streaming API.
*
* @see XXH32_state_s for details.
*/
typedef struct XXH32_state_s XXH32_state_t;
/*!
* @brief Copies one @ref XXH32_state_t to another.
*
* @param dst_state The state to copy to.
* @param src_state The state to copy from.
* @pre
* @p dst_state and @p src_state must not be `NULL` and must not overlap.
*/
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
/*!
* @brief Resets an @ref XXH32_state_t to begin a new hash.
*
* This function resets and seeds a state. Call it before @ref XXH32_update().
*
* @param statePtr The state struct to reset.
* @param seed The 32-bit seed to alter the hash result predictably.
*
* @pre
* @p statePtr must not be `NULL`.
*
* @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
*/
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
/* hash streaming */
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
/*!
* @brief Consumes a block of @p input to an @ref XXH32_state_t.
*
* Call this to incrementally consume blocks of data.
*
* @param statePtr The state struct to update.
* @param input The block of data to be hashed, at least @p length bytes in size.
* @param length The length of @p input, in bytes.
*
* @pre
* @p statePtr must not be `NULL`.
* @pre
* The memory between @p input and @p input + @p length must be valid,
* readable, contiguous memory. However, if @p length is `0`, @p input may be
* `NULL`. In C++, this also must be *TriviallyCopyable*.
*
* @return @ref XXH_OK on success, @ref XXH_ERROR on failure.
*/
/*!
* @brief Returns the calculated hash value from an @ref XXH32_state_t.
*
* @note
* Calling XXH32_digest() will not affect @p statePtr, so you can update,
* digest, and update again.
*
* @param statePtr The state struct to calculate the hash from.
*
* @pre
* @p statePtr must not be `NULL`.
*
* @return The calculated xxHash32 value from that state.
*/
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
/******* Canonical representation *******/
These functions generate the xxHash of an input provided in multiple segments.
Note that, for small input, they are slower than single-call functions, due to state management.
For small input, prefer `XXH32()` and `XXH64()` .
* The default return values from XXH functions are unsigned 32 and 64 bit
* integers.
* This the simplest and fastest format for further post-processing.
*
* However, this leaves open the question of what is the order on the byte level,
* since little and big endian conventions will store the same number differently.
*
* The canonical representation settles this issue by mandating big-endian
* convention, the same convention as human-readable numbers (large digits first).
*
* When writing hash values to storage, sending them over a network, or printing
* them, it's highly recommended to use the canonical representation to ensure
* portability across a wider range of systems, present and future.
*
* The following functions allow transformation of hash values to and from
* canonical format.
*/
/*!
* @brief Canonical (big endian) representation of @ref XXH32_hash_t.
*/
typedef struct {
unsigned char digest[4]; /*!< Hash bytes, big endian */
} XXH32_canonical_t;
/*!
* @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
*
* @param dst The @ref XXH32_canonical_t pointer to be stored to.
* @param hash The @ref XXH32_hash_t to be converted.
*
* @pre
* @p dst must not be `NULL`.
*/
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
XXH state must first be allocated, using XXH*_createState() .
/*!
* @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
*
* @param src The @ref XXH32_canonical_t to convert.
*
* @pre
* @p src must not be `NULL`.
*
* @return The converted hash.
*/
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
Then, feed the hash state by calling XXH*_update() as many times as necessary.
Obviously, input must be allocated and read accessible.
The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
#ifdef __has_attribute
# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
#else
# define XXH_HAS_ATTRIBUTE(x) 0
#endif
Finally, a hash value can be produced anytime, by using XXH*_digest().
This function returns the nn-bits hash as an int or long long.
/* C-language Attributes are added in C23. */
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
#else
# define XXH_HAS_C_ATTRIBUTE(x) 0
#endif
It's still possible to continue inserting input into the hash state after a digest,
and generate some new hashes later on, by calling again XXH*_digest().
#if defined(__cplusplus) && defined(__has_cpp_attribute)
# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
#else
# define XXH_HAS_CPP_ATTRIBUTE(x) 0
#endif
When done, free XXH state space if it was allocated dynamically.
/*
Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
introduced in CPP17 and C23.
CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
/* **************************
* Utils
****************************/
#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */
# define restrict /* disable restrict */
#ifndef XXH_NO_LONG_LONG
/*-**********************************************************************
* 64-bit hash
************************************************************************/
#if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
/*!
* @brief An unsigned 64-bit integer.
*
* Not necessarily defined to `uint64_t` but functionally equivalent.
*/
typedef uint64_t XXH64_hash_t;
#elif !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint64_t XXH64_hash_t;
#else
# include <limits.h>
# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
/* LP64 ABI says uint64_t is unsigned long */
typedef unsigned long XXH64_hash_t;
# else
/* the following type must have a width of 64-bit */
typedef unsigned long long XXH64_hash_t;
# endif
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state);
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state);
/*!
* @}
*
* @defgroup xxh64_family XXH64 family
* @ingroup public
* @{
* Contains functions used in the classic 64-bit xxHash algorithm.
*
* @note
* XXH3 provides competitive speed for both 32-bit and 64-bit systems,
* and offers true 64/128 bit hash results.
* It provides better speed for systems with vector processing capabilities.
*/
/* **************************
* Canonical representation
****************************/
/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
* These functions allow transformation of hash result into and from its canonical format.
* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
*/
typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
/*!
* @brief Calculates the 64-bit hash of @p input using xxHash64.
*
* This function usually runs faster on 64-bit systems, but slower on 32-bit
* systems (see benchmark).
*
* @param input The block of data to be hashed, at least @p length bytes in size.
* @param length The length of @p input, in bytes.
* @param seed The 64-bit seed to alter the hash's output predictably.
*
* @pre
* The memory between @p input and @p input + @p length must be valid,
* readable, contiguous memory. However, if @p length is `0`, @p input may be
* `NULL`. In C++, this also must be *TriviallyCopyable*.
*
* @return The calculated 64-bit hash.
*
* @see
* XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128():
* Direct equivalents for the other variants of xxHash.
* @see
* XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version.
*/
XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed);
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
/******* Streaming *******/
/*!
* @brief The opaque state struct for the XXH64 streaming API.
*
* @see XXH64_state_s for details.
*/
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed);
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
/******* Canonical representation *******/
typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
#ifndef XXH_NO_XXH3
/*!
* @}
* ************************************************************************
* @defgroup xxh3_family XXH3 family
* @ingroup public
* @{
*
* XXH3 is a more recent hash algorithm featuring:
* - Improved speed for both small and large inputs
* - True 64-bit and 128-bit outputs
* - SIMD acceleration
* - Improved 32-bit viability
*
* Speed analysis methodology is explained here:
*
* https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
*
* Compared to XXH64, expect XXH3 to run approximately
* ~2x faster on large inputs and >3x faster on small ones,
* exact differences vary depending on platform.
*
* XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
* but does not require it.
* Any 32-bit and 64-bit targets that can run XXH32 smoothly
* can run XXH3 at competitive speeds, even without vector support.
* Further details are explained in the implementation.
*
* Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8,
* ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro.
*
* XXH3 implementation is portable:
* it has a generic C90 formulation that can be compiled on any platform,
* all implementations generage exactly the same hash value on all platforms.
* Starting from v0.8.0, it's also labelled "stable", meaning that
* any future version will also generate the same hash value.
*
* XXH3 offers 2 variants, _64bits and _128bits.
*
* When only 64 bits are needed, prefer invoking the _64bits variant, as it
* reduces the amount of mixing, resulting in faster speed on small inputs.
* It's also generally simpler to manipulate a scalar return type than a struct.
*
* The API supports one-shot hashing, streaming mode, and custom secrets.
*/
/*-**********************************************************************
* XXH3 64-bit variant
************************************************************************/
/* XXH3_64bits():
* default 64-bit variant, using default secret and default seed of 0.
* It's the fastest variant. */
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
/*
* XXH3_64bits_withSeed():
* This variant generates a custom secret on the fly
* based on default secret altered using the `seed` value.
* While this operation is decently fast, note that it's not completely free.
* Note: seed==0 produces the same results as XXH3_64bits().
*/
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
/*!
* The bare minimum size for a custom secret.
*
* @see
* XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
* XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
*/
#define XXH3_SECRET_SIZE_MIN 136
/*
* XXH3_64bits_withSecret():
* It's possible to provide any blob of bytes as a "secret" to generate the hash.
* This makes it more difficult for an external actor to prepare an intentional collision.
* The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
* However, the quality of the secret impacts the dispersion of the hash algorithm.
* Therefore, the secret _must_ look like a bunch of random bytes.
* Avoid "trivial" or structured data such as repeated sequences or a text document.
* Whenever in doubt about the "randomness" of the blob of bytes,
* consider employing "XXH3_generateSecret()" instead (see below).
* It will generate a proper high entropy secret derived from the blob of bytes.
* Another advantage of using XXH3_generateSecret() is that
* it guarantees that all bits within the initial blob of bytes
* will impact every bit of the output.
* This is not necessarily the case when using the blob of bytes directly
* because, when hashing _small_ inputs, only a portion of the secret is employed.
*/
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
/******* Streaming *******/
/*
* Streaming requires state maintenance.
* This operation costs memory and CPU.
* As a consequence, streaming is slower than one-shot hashing.
* For better performance, prefer one-shot functions whenever applicable.
*/
/*!
* @brief The state struct for the XXH3 streaming API.
*
* @see XXH3_state_s for details.
*/
typedef struct XXH3_state_s XXH3_state_t;
XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
/*
* XXH3_64bits_reset():
* Initialize with default parameters.
* digest will be equivalent to `XXH3_64bits()`.
*/
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
/*
* XXH3_64bits_reset_withSeed():
* Generate a custom secret from `seed`, and store it into `statePtr`.
* digest will be equivalent to `XXH3_64bits_withSeed()`.
*/
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
/*
* XXH3_64bits_reset_withSecret():
* `secret` is referenced, it _must outlive_ the hash streaming session.
* Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
* and the quality of produced hash values depends on secret's entropy
* (secret's content should look like a bunch of random bytes).
* When in doubt about the randomness of a candidate `secret`,
* consider employing `XXH3_generateSecret()` instead (see below).
*/
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr);
/* note : canonical representation of XXH3 is the same as XXH64
* since they both produce XXH64_hash_t values */
/*-**********************************************************************
* XXH3 128-bit variant
************************************************************************/
/*!
* @brief The return value from 128-bit hashes.
*
* Stored in little endian order, although the fields themselves are in native
* endianness.
*/
typedef struct {
XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
XXH64_hash_t high64; /*!< `value >> 64` */
} XXH128_hash_t;
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
/******* Streaming *******/
/*
* Streaming requires state maintenance.
* This operation costs memory and CPU.
* As a consequence, streaming is slower than one-shot hashing.
* For better performance, prefer one-shot functions whenever applicable.
*
* XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
* Use already declared XXH3_createState() and XXH3_freeState().
*
* All reset and streaming functions have same meaning as their 64-bit counterpart.
*/
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed);
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
/* Following helper functions make it possible to compare XXH128_hast_t values.
* Since XXH128_hash_t is a structure, this capability is not offered by the language.
* Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
/*!
* XXH128_isEqual():
* Return: 1 if `h1` and `h2` are equal, 0 if they are not.
*/
XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
/*!
* XXH128_cmp():
*
* This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
*
* return: >0 if *h128_1 > *h128_2
* =0 if *h128_1 == *h128_2
* <0 if *h128_1 < *h128_2
*/
XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
/******* Canonical representation *******/
typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
#endif /* !XXH_NO_XXH3 */
#endif /* XXH_NO_LONG_LONG */
/* ================================================================================================
This section contains definitions which are not guaranteed to remain stable.
They may change in future versions, becoming incompatible with a different version of the library.
They shall only be used with static linking.
Never use these definitions in association with dynamic linking !
=================================================================================================== */
#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345)
#define XXH_STATIC_H_3543687687345
#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
#define XXHASH_H_STATIC_13879238742
/* ****************************************************************************
* This section contains declarations which are not guaranteed to remain stable.
* They may change in future versions, becoming incompatible with a different
* version of the library.
* These declarations should only be used with static linking.
* Never use them in association with dynamic linking!
***************************************************************************** */
/*
* These definitions are only present to allow static allocation
* of XXH states, on stack or in a struct, for example.
* Never **ever** access their members directly.
*/
/*!
* @internal
* @brief Structure for XXH32 streaming API.
*
* @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
* @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
* an opaque type. This allows fields to safely be changed.
*
* Typedef'd to @ref XXH32_state_t.
* Do not access the members of this struct directly.
* @see XXH64_state_s, XXH3_state_s
*/
struct XXH32_state_s {
XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
XXH32_hash_t v[4]; /*!< Accumulator lanes */
XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */
}; /* typedef'd to XXH32_state_t */
#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
/*!
* @internal
* @brief Structure for XXH64 streaming API.
*
* @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
* @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
* an opaque type. This allows fields to safely be changed.
*
* Typedef'd to @ref XXH64_state_t.
* Do not access the members of this struct directly.
* @see XXH32_state_s, XXH3_state_s
*/
struct XXH64_state_s {
XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
XXH64_hash_t v[4]; /*!< Accumulator lanes */
XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */
}; /* typedef'd to XXH64_state_t */
#ifndef XXH_NO_XXH3
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
# include <stdalign.h>
# define XXH_ALIGN(n) alignas(n)
#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
/* In C++ alignas() is a keyword */
# define XXH_ALIGN(n) alignas(n)
#elif defined(__GNUC__)
# define XXH_ALIGN(n) __attribute__ ((aligned(n)))
#elif defined(_MSC_VER)
# define XXH_ALIGN(n) __declspec(align(n))
#else
# define XXH_ALIGN(n) /* disabled */
#endif
/* Old GCC versions only accept the attribute after the type in structures. */
#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
&& ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
&& defined(__GNUC__)
# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
#else
# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
#endif
/*!
* @brief The size of the internal XXH3 buffer.
*
* This is the optimal update size for incremental hashing.
*
* @see XXH3_64b_update(), XXH3_128b_update().
*/
#define XXH3_INTERNALBUFFER_SIZE 256
/*!
* @brief Default size of the secret buffer (and @ref XXH3_kSecret).
*
* This is the size used in @ref XXH3_kSecret and the seeded functions.
*
* Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
*/
#define XXH3_SECRET_DEFAULT_SIZE 192
/*!
* @internal
* @brief Structure for XXH3 streaming API.
*
* @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
* @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
* Otherwise it is an opaque type.
* Never use this definition in combination with dynamic library.
* This allows fields to safely be changed in the future.
*
* @note ** This structure has a strict alignment requirement of 64 bytes!! **
* Do not allocate this with `malloc()` or `new`,
* it will not be sufficiently aligned.
* Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
*
* Typedef'd to @ref XXH3_state_t.
* Do never access the members of this struct directly.
*
* @see XXH3_INITSTATE() for stack initialization.
* @see XXH3_createState(), XXH3_freeState().
* @see XXH32_state_s, XXH64_state_s
*/
struct XXH3_state_s {
XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
/*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */
XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
/*!< Used to store a custom secret generated from a seed. */
XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
/*!< The internal buffer. @see XXH32_state_s::mem32 */
XXH32_hash_t bufferedSize;
/*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
XXH32_hash_t useSeed;
/*!< Reserved field. Needed for padding on 64-bit. */
size_t nbStripesSoFar;
/*!< Number or stripes processed. */
XXH64_hash_t totalLen;
/*!< Total length hashed. 64-bit even on 32-bit targets. */
size_t nbStripesPerBlock;
/*!< Number of stripes per block. */
size_t secretLimit;
/*!< Size of @ref customSecret or @ref extSecret */
XXH64_hash_t seed;
/*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
XXH64_hash_t reserved64;
/*!< Reserved field. */
const unsigned char* extSecret;
/*!< Reference to an external secret for the _withSecret variants, NULL
* for other variants. */
/* note: there may be some padding at the end due to alignment on 64 bytes */
}; /* typedef'd to XXH3_state_t */
#undef XXH_ALIGN_MEMBER
/*!
* @brief Initializes a stack-allocated `XXH3_state_s`.
*
* When the @ref XXH3_state_t structure is merely emplaced on stack,
* it should be initialized with XXH3_INITSTATE() or a memset()
* in case its first reset uses XXH3_NNbits_reset_withSeed().
* This init can be omitted if the first reset uses default or _withSecret mode.
* This operation isn't necessary when the state is created with XXH3_createState().
* Note that this doesn't prepare the state for a streaming operation,
* it's still necessary to use XXH3_NNbits_reset*() afterwards.
*/
#define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; }
/* XXH128() :
* simple alias to pre-selected XXH3_128bits variant
*/
XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
/* === Experimental API === */
/* Symbols defined below must be considered tied to a specific library version. */
/*
* XXH3_generateSecret():
*
* Derive a high-entropy secret from any user-defined content, named customSeed.
* The generated secret can be used in combination with `*_withSecret()` functions.
* The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed,
* as it becomes much more difficult for an external actor to guess how to impact the calculation logic.
*
* The function accepts as input a custom seed of any length and any content,
* and derives from it a high-entropy secret of length @secretSize
* into an already allocated buffer @secretBuffer.
* @secretSize must be >= XXH3_SECRET_SIZE_MIN
*
* The generated secret can then be used with any `*_withSecret()` variant.
* Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
* `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
* are part of this list. They all accept a `secret` parameter
* which must be large enough for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
* _and_ feature very high entropy (consist of random-looking bytes).
* These conditions can be a high bar to meet, so
* XXH3_generateSecret() can be employed to ensure proper quality.
*
* customSeed can be anything. It can have any size, even small ones,
* and its content can be anything, even "poor entropy" sources such as a bunch of zeroes.
* The resulting `secret` will nonetheless provide all required qualities.
*
* When customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
*/
XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize);
/*
* XXH3_generateSecret_fromSeed():
*
* Generate the same secret as the _withSeed() variants.
*
* The resulting secret has a length of XXH3_SECRET_DEFAULT_SIZE (necessarily).
* @secretBuffer must be already allocated, of size at least XXH3_SECRET_DEFAULT_SIZE bytes.
*
* The generated secret can be used in combination with
*`*_withSecret()` and `_withSecretandSeed()` variants.
* This generator is notably useful in combination with `_withSecretandSeed()`,
* as a way to emulate a faster `_withSeed()` variant.
*/
XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed);
/*
* *_withSecretandSeed() :
* These variants generate hash values using either
* @seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
* or @secret for "large" keys (>= XXH3_MIDSIZE_MAX).
*
* This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
* `_withSeed()` has to generate the secret on the fly for "large" keys.
* It's fast, but can be perceptible for "not so large" keys (< 1 KB).
* `_withSecret()` has to generate the masks on the fly for "small" keys,
* which requires more instructions than _withSeed() variants.
* Therefore, _withSecretandSeed variant combines the best of both worlds.
*
* When @secret has been generated by XXH3_generateSecret_fromSeed(),
* this variant produces *exactly* the same results as `_withSeed()` variant,
* hence offering only a pure speed benefit on "large" input,
* by skipping the need to regenerate the secret for every large input.
*
* Another usage scenario is to hash the secret to a 64-bit hash value,
* for example with XXH3_64bits(), which then becomes the seed,
* and then employ both the seed and the secret in _withSecretandSeed().
* On top of speed, an added benefit is that each bit in the secret
* has a 50% chance to swap each bit in the output,
* via its impact to the seed.
* This is not guaranteed when using the secret directly in "small data" scenarios,
* because only portions of the secret are employed for small data.
*/
XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecretandSeed(const void* data, size_t len,
const void* secret, size_t secretSize,
XXH64_hash_t seed);
XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecretandSeed(const void* data, size_t len,
const void* secret, size_t secretSize,
XXH64_hash_t seed64);
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
const void* secret, size_t secretSize,
XXH64_hash_t seed64);
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
const void* secret, size_t secretSize,
XXH64_hash_t seed64);
#endif /* XXH_NO_XXH3 */
#endif /* XXH_NO_LONG_LONG */
#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
# define XXH_IMPLEMENTATION
#endif
#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
/* ======================================================================== */
/* ======================================================================== */
/* ======================================================================== */
/*-**********************************************************************
* xxHash implementation
*-**********************************************************************
* xxHash's implementation used to be hosted inside xxhash.c.
*
* However, inlining requires implementation to be visible to the compiler,
* hence be included alongside the header.
* Previously, implementation was hosted inside xxhash.c,
* which was then #included when inlining was activated.
* This construction created issues with a few build and install systems,
* as it required xxhash.c to be stored in /include directory.
*
* xxHash implementation is now directly integrated within xxhash.h.
* As a consequence, xxhash.c is no longer needed in /include.
*
* xxhash.c is still available and is still useful.
* In a "normal" setup, when xxhash is not inlined,
* xxhash.h only exposes the prototypes and public symbols,
* while xxhash.c can be built into an object file xxhash.o
* which can then be linked into the final binary.
************************************************************************/
#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
|| defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
# define XXH_IMPLEM_13a8737387
/* *************************************
* Tuning parameters
***************************************/
/*!
* @defgroup tuning Tuning parameters
* @{
*
* Various macros to control xxHash's behavior.
*/
#ifdef XXH_DOXYGEN
/*!
* @brief Define this to disable 64-bit code.
*
* Useful if only using the @ref xxh32_family and you have a strict C90 compiler.
*/
# define XXH_NO_LONG_LONG
# undef XXH_NO_LONG_LONG /* don't actually */
/*!
* @brief Controls how unaligned memory is accessed.
*
* By default, access to unaligned memory is controlled by `memcpy()`, which is
* safe and portable.
*
* Unfortunately, on some target/compiler combinations, the generated assembly
* is sub-optimal.
*
* The below switch allow selection of a different access method
* in the search for improved performance.
*
* @par Possible options:
*
* - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
* @par
* Use `memcpy()`. Safe and portable. Note that most modern compilers will
* eliminate the function call and treat it as an unaligned access.
*
* - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))`
* @par
* Depends on compiler extensions and is therefore not portable.
* This method is safe _if_ your compiler supports it,
* and *generally* as fast or faster than `memcpy`.
*
* - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
* @par
* Casts directly and dereferences. This method doesn't depend on the
* compiler, but it violates the C standard as it directly dereferences an
* unaligned pointer. It can generate buggy code on targets which do not
* support unaligned memory accesses, but in some circumstances, it's the
* only known way to get the most performance.
*
* - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
* @par
* Also portable. This can generate the best code on old compilers which don't
* inline small `memcpy()` calls, and it might also be faster on big-endian
* systems which lack a native byteswap instruction. However, some compilers
* will emit literal byteshifts even if the target supports unaligned access.
* .
*
* @warning
* Methods 1 and 2 rely on implementation-defined behavior. Use these with
* care, as what works on one compiler/platform/optimization level may cause
* another to read garbage data or even crash.
*
* See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
*
* Prefer these methods in priority order (0 > 3 > 1 > 2)
*/
# define XXH_FORCE_MEMORY_ACCESS 0
/*!
* @def XXH_FORCE_ALIGN_CHECK
* @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
* and XXH64() only).
*
* This is an important performance trick for architectures without decent
* unaligned memory access performance.
*
* It checks for input alignment, and when conditions are met, uses a "fast
* path" employing direct 32-bit/64-bit reads, resulting in _dramatically
* faster_ read speed.
*
* The check costs one initial branch per hash, which is generally negligible,
* but not zero.
*
* Moreover, it's not useful to generate an additional code path if memory
* access uses the same instruction for both aligned and unaligned
* addresses (e.g. x86 and aarch64).
*
* In these cases, the alignment check can be removed by setting this macro to 0.
* Then the code will always use unaligned memory access.
* Align check is automatically disabled on x86, x64 & arm64,
* which are platforms known to offer good unaligned memory accesses performance.
*
* This option does not affect XXH3 (only XXH32 and XXH64).
*/
# define XXH_FORCE_ALIGN_CHECK 0
/*!
* @def XXH_NO_INLINE_HINTS
* @brief When non-zero, sets all functions to `static`.
*
* By default, xxHash tries to force the compiler to inline almost all internal
* functions.
*
* This can usually improve performance due to reduced jumping and improved
* constant folding, but significantly increases the size of the binary which
* might not be favorable.
*
* Additionally, sometimes the forced inlining can be detrimental to performance,
* depending on the architecture.
*
* XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
* compiler full control on whether to inline or not.
*
* When not optimizing (-O0), optimizing for size (-Os, -Oz), or using
* -fno-inline with GCC or Clang, this will automatically be defined.
*/
# define XXH_NO_INLINE_HINTS 0
/*!
* @def XXH32_ENDJMP
* @brief Whether to use a jump for `XXH32_finalize`.
*
* For performance, `XXH32_finalize` uses multiple branches in the finalizer.
* This is generally preferable for performance,
* but depending on exact architecture, a jmp may be preferable.
*
* This setting is only possibly making a difference for very small inputs.
*/
# define XXH32_ENDJMP 0
/*!
* @internal
* @brief Redefines old internal names.
*
* For compatibility with code that uses xxHash's internals before the names
* were changed to improve namespacing. There is no other reason to use this.
*/
# define XXH_OLD_NAMES
# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
#endif /* XXH_DOXYGEN */
/*!
* @}
*/
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
/* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */
# if !defined(__clang__) && \
( \
(defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
( \
defined(__GNUC__) && ( \
(defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \
( \
defined(__mips__) && \
(__mips <= 5 || __mips_isa_rev < 6) && \
(!defined(__mips16) || defined(__mips_mips16e2)) \
) \
) \
) \
)
# define XXH_FORCE_MEMORY_ACCESS 1
# endif
#endif
#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
# if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \
|| defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */
# define XXH_FORCE_ALIGN_CHECK 0
# else
# define XXH_FORCE_ALIGN_CHECK 1
# endif
#endif
#ifndef XXH_NO_INLINE_HINTS
# if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
|| defined(__NO_INLINE__) /* -O0, -fno-inline */
# define XXH_NO_INLINE_HINTS 1
# else
# define XXH_NO_INLINE_HINTS 0
# endif
#endif
#ifndef XXH32_ENDJMP
/* generally preferable for performance */
# define XXH32_ENDJMP 0
#endif
/*!
* @defgroup impl Implementation
* @{
*/
/* *************************************
* Includes & Memory related functions
***************************************/
/* Modify the local functions below should you wish to use some other memory routines */
/* for ZSTD_malloc(), ZSTD_free() */
#define ZSTD_DEPS_NEED_MALLOC
#include "zstd_deps.h" /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */
static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); }
static void XXH_free (void* p) { ZSTD_free(p); }
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); }
/* *************************************
* Compiler Specific Options
***************************************/
#ifdef _MSC_VER /* Visual Studio warning fix */
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
#endif
#if XXH_NO_INLINE_HINTS /* disable inlining hints */
# if defined(__GNUC__) || defined(__clang__)
# define XXH_FORCE_INLINE static __attribute__((unused))
# else
# define XXH_FORCE_INLINE static
# endif
# define XXH_NO_INLINE static
/* enable inlining hints */
#elif defined(__GNUC__) || defined(__clang__)
# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
# define XXH_NO_INLINE static __attribute__((noinline))
#elif defined(_MSC_VER) /* Visual Studio */
# define XXH_FORCE_INLINE static __forceinline
# define XXH_NO_INLINE static __declspec(noinline)
#elif defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
# define XXH_FORCE_INLINE static inline
# define XXH_NO_INLINE static
#else
# define XXH_FORCE_INLINE static
# define XXH_NO_INLINE static
#endif
/* *************************************
* Debug
***************************************/
/*!
* @ingroup tuning
* @def XXH_DEBUGLEVEL
* @brief Sets the debugging level.
*
* XXH_DEBUGLEVEL is expected to be defined externally, typically via the
* compiler's command line options. The value must be a number.
*/
#ifndef XXH_DEBUGLEVEL
# ifdef DEBUGLEVEL /* backwards compat */
# define XXH_DEBUGLEVEL DEBUGLEVEL
# else
# define XXH_DEBUGLEVEL 0
# endif
#endif
#if (XXH_DEBUGLEVEL>=1)
# include <assert.h> /* note: can still be disabled with NDEBUG */
# define XXH_ASSERT(c) assert(c)
#else
# define XXH_ASSERT(c) ((void)0)
#endif
/* note: use after variable declarations */
#ifndef XXH_STATIC_ASSERT
# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
# include <assert.h>
# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
# else
# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
# endif
# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
#endif
/*!
* @internal
* @def XXH_COMPILER_GUARD(var)
* @brief Used to prevent unwanted optimizations for @p var.
*
* It uses an empty GCC inline assembly statement with a register constraint
* which forces @p var into a general purpose register (e.g. eax, ebx, ecx
* on x86) and marks it as modified.
*
* This is used in a few places to avoid unwanted autovectorization (e.g.
* XXH32_round()). All vectorization we want is explicit via intrinsics,
* and _usually_ isn't wanted elsewhere.
*
* We also use it to prevent unwanted constant folding for AArch64 in
* XXH3_initCustomSecret_scalar().
*/
#if defined(__GNUC__) || defined(__clang__)
# define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
#else
# define XXH_COMPILER_GUARD(var) ((void)0)
#endif
/* *************************************
* Basic Types
***************************************/
#if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
# include <stdint.h>
typedef uint8_t xxh_u8;
#else
typedef unsigned char xxh_u8;
#endif
typedef XXH32_hash_t xxh_u32;
#ifdef XXH_OLD_NAMES
# define BYTE xxh_u8
# define U8 xxh_u8
# define U32 xxh_u32
#endif
/* *** Memory access *** */
/*!
* @internal
* @fn xxh_u32 XXH_read32(const void* ptr)
* @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
*
* Affected by @ref XXH_FORCE_MEMORY_ACCESS.
*
* @param ptr The pointer to read from.
* @return The 32-bit native endian integer from the bytes at @p ptr.
*/
/*!
* @internal
* @fn xxh_u32 XXH_readLE32(const void* ptr)
* @brief Reads an unaligned 32-bit little endian integer from @p ptr.
*
* Affected by @ref XXH_FORCE_MEMORY_ACCESS.
*
* @param ptr The pointer to read from.
* @return The 32-bit little endian integer from the bytes at @p ptr.
*/
/*!
* @internal
* @fn xxh_u32 XXH_readBE32(const void* ptr)
* @brief Reads an unaligned 32-bit big endian integer from @p ptr.
*
* Affected by @ref XXH_FORCE_MEMORY_ACCESS.
*
* @param ptr The pointer to read from.
* @return The 32-bit big endian integer from the bytes at @p ptr.
*/
/*!
* @internal
* @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
* @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
*
* Affected by @ref XXH_FORCE_MEMORY_ACCESS.
* Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
* always @ref XXH_alignment::XXH_unaligned.
*
* @param ptr The pointer to read from.
* @param align Whether @p ptr is aligned.
* @pre
* If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
* aligned.
* @return The 32-bit little endian integer from the bytes at @p ptr.
*/
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
/*
* Manual byteshift. Best for old compilers which don't inline memcpy.
* We actually directly use XXH_readLE32 and XXH_readBE32.
*/
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
/*
* Force direct memory access. Only works on CPU which support unaligned memory
* access in hardware.
*/
static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
/*
* __pack instructions are safer but compiler specific, hence potentially
* problematic for some compilers.
*
* Currently only defined for GCC and ICC.
*/
#ifdef XXH_OLD_NAMES
typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
#endif
static xxh_u32 XXH_read32(const void* ptr)
{
typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
return ((const xxh_unalign*)ptr)->u32;
}
#else
/*
* Portable and safe solution. Generally efficient.
* see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
*/
static xxh_u32 XXH_read32(const void* memPtr)
{
xxh_u32 val;
XXH_memcpy(&val, memPtr, sizeof(val));
return val;
}
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
/* *** Endianness *** */
/*!
* @ingroup tuning
* @def XXH_CPU_LITTLE_ENDIAN
* @brief Whether the target is little endian.
*
* Defined to 1 if the target is little endian, or 0 if it is big endian.
* It can be defined externally, for example on the compiler command line.
*
* If it is not defined,
* a runtime check (which is usually constant folded) is used instead.
*
* @note
* This is not necessarily defined to an integer constant.
*
* @see XXH_isLittleEndian() for the runtime check.
*/
#ifndef XXH_CPU_LITTLE_ENDIAN
/*
* Try to detect endianness automatically, to avoid the nonstandard behavior
* in `XXH_isLittleEndian()`
*/
# if defined(_WIN32) /* Windows is always little endian */ \
|| defined(__LITTLE_ENDIAN__) \
|| (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
# define XXH_CPU_LITTLE_ENDIAN 1
# elif defined(__BIG_ENDIAN__) \
|| (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
# define XXH_CPU_LITTLE_ENDIAN 0
# else
/*!
* @internal
* @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
*
* Most compilers will constant fold this.
*/
static int XXH_isLittleEndian(void)
{
/*
* Portable and well-defined behavior.
* Don't use static: it is detrimental to performance.
*/
const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
return one.c[0];
}
# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
# endif
#endif
/* ****************************************
* Compiler-specific Functions and Macros
******************************************/
#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
#ifdef __has_builtin
# define XXH_HAS_BUILTIN(x) __has_builtin(x)
#else
# define XXH_HAS_BUILTIN(x) 0
#endif
/*!
* @internal
* @def XXH_rotl32(x,r)
* @brief 32-bit rotate left.
*
* @param x The 32-bit integer to be rotated.
* @param r The number of bits to rotate.
* @pre
* @p r > 0 && @p r < 32
* @note
* @p x and @p r may be evaluated multiple times.
* @return The rotated result.
*/
#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
&& XXH_HAS_BUILTIN(__builtin_rotateleft64)
# define XXH_rotl32 __builtin_rotateleft32
# define XXH_rotl64 __builtin_rotateleft64
/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
#elif defined(_MSC_VER)
# define XXH_rotl32(x,r) _rotl(x,r)
# define XXH_rotl64(x,r) _rotl64(x,r)
#else
# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
#endif
/*!
* @internal
* @fn xxh_u32 XXH_swap32(xxh_u32 x)
* @brief A 32-bit byteswap.
*
* @param x The 32-bit integer to byteswap.
* @return @p x, byteswapped.
*/
#if defined(_MSC_VER) /* Visual Studio */
# define XXH_swap32 _byteswap_ulong
#elif XXH_GCC_VERSION >= 403
# define XXH_swap32 __builtin_bswap32
#else
static xxh_u32 XXH_swap32 (xxh_u32 x)
{
return ((x << 24) & 0xff000000 ) |
((x << 8) & 0x00ff0000 ) |
((x >> 8) & 0x0000ff00 ) |
((x >> 24) & 0x000000ff );
}
#endif
/* ***************************
* Memory reads
*****************************/
/*!
* @internal
* @brief Enum to indicate whether a pointer is aligned.
*/
typedef enum {
XXH_aligned, /*!< Aligned */
XXH_unaligned /*!< Possibly unaligned */
} XXH_alignment;
/*
* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
*
* This is ideal for older compilers which don't inline memcpy.
*/
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
{
const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
return bytePtr[0]
| ((xxh_u32)bytePtr[1] << 8)
| ((xxh_u32)bytePtr[2] << 16)
| ((xxh_u32)bytePtr[3] << 24);
}
XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
{
const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
return bytePtr[3]
| ((xxh_u32)bytePtr[2] << 8)
| ((xxh_u32)bytePtr[1] << 16)
| ((xxh_u32)bytePtr[0] << 24);
}
#else
XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
}
static xxh_u32 XXH_readBE32(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
}
#endif
XXH_FORCE_INLINE xxh_u32
XXH_readLE32_align(const void* ptr, XXH_alignment align)
{
if (align==XXH_unaligned) {
return XXH_readLE32(ptr);
} else {
return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
}
}
/* *************************************
* Misc
***************************************/
/*! @ingroup public */
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
/* *******************************************************************
* 32-bit hash functions
*********************************************************************/
/*!
* @}
* @defgroup xxh32_impl XXH32 implementation
* @ingroup impl
* @{
*/
/* #define instead of static const, to be used as initializers */
#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
#ifdef XXH_OLD_NAMES
# define PRIME32_1 XXH_PRIME32_1
# define PRIME32_2 XXH_PRIME32_2
# define PRIME32_3 XXH_PRIME32_3
# define PRIME32_4 XXH_PRIME32_4
# define PRIME32_5 XXH_PRIME32_5
#endif
/*!
* @internal
* @brief Normal stripe processing routine.
*
* This shuffles the bits so that any bit from @p input impacts several bits in
* @p acc.
*
* @param acc The accumulator lane.
* @param input The stripe of input to mix.
* @return The mixed accumulator lane.
*/
static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
{
acc += input * XXH_PRIME32_2;
acc = XXH_rotl32(acc, 13);
acc *= XXH_PRIME32_1;
#if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
/*
* UGLY HACK:
* A compiler fence is the only thing that prevents GCC and Clang from
* autovectorizing the XXH32 loop (pragmas and attributes don't work for some
* reason) without globally disabling SSE4.1.
*
* The reason we want to avoid vectorization is because despite working on
* 4 integers at a time, there are multiple factors slowing XXH32 down on
* SSE4:
* - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
* newer chips!) making it slightly slower to multiply four integers at
* once compared to four integers independently. Even when pmulld was
* fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
* just to multiply unless doing a long operation.
*
* - Four instructions are required to rotate,
* movqda tmp, v // not required with VEX encoding
* pslld tmp, 13 // tmp <<= 13
* psrld v, 19 // x >>= 19
* por v, tmp // x |= tmp
* compared to one for scalar:
* roll v, 13 // reliably fast across the board
* shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
*
* - Instruction level parallelism is actually more beneficial here because
* the SIMD actually serializes this operation: While v1 is rotating, v2
* can load data, while v3 can multiply. SSE forces them to operate
* together.
*
* This is also enabled on AArch64, as Clang autovectorizes it incorrectly
* and it is pointless writing a NEON implementation that is basically the
* same speed as scalar for XXH32.
*/
XXH_COMPILER_GUARD(acc);
#endif
return acc;
}
/*!
* @internal
* @brief Mixes all bits to finalize the hash.
*
* The final mix ensures that all input bits have a chance to impact any bit in
* the output digest, resulting in an unbiased distribution.
*
* @param h32 The hash to avalanche.
* @return The avalanched hash.
*/
static xxh_u32 XXH32_avalanche(xxh_u32 h32)
{
h32 ^= h32 >> 15;
h32 *= XXH_PRIME32_2;
h32 ^= h32 >> 13;
h32 *= XXH_PRIME32_3;
h32 ^= h32 >> 16;
return(h32);
}
#define XXH_get32bits(p) XXH_readLE32_align(p, align)
/*!
* @internal
* @brief Processes the last 0-15 bytes of @p ptr.
*
* There may be up to 15 bytes remaining to consume from the input.
* This final stage will digest them to ensure that all input bytes are present
* in the final mix.
*
* @param h32 The hash to finalize.
* @param ptr The pointer to the remaining input.
* @param len The remaining length, modulo 16.
* @param align Whether @p ptr is aligned.
* @return The finalized hash.
*/
static xxh_u32
XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
{
#define XXH_PROCESS1 do { \
h32 += (*ptr++) * XXH_PRIME32_5; \
h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \
} while (0)
#define XXH_PROCESS4 do { \
h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \
ptr += 4; \
h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \
} while (0)
if (ptr==NULL) XXH_ASSERT(len == 0);
/* Compact rerolled version; generally faster */
if (!XXH32_ENDJMP) {
len &= 15;
while (len >= 4) {
XXH_PROCESS4;
len -= 4;
}
while (len > 0) {
XXH_PROCESS1;
--len;
}
return XXH32_avalanche(h32);
} else {
switch(len&15) /* or switch(bEnd - p) */ {
case 12: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 8: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 4: XXH_PROCESS4;
return XXH32_avalanche(h32);
case 13: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 9: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 5: XXH_PROCESS4;
XXH_PROCESS1;
return XXH32_avalanche(h32);
case 14: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 10: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 6: XXH_PROCESS4;
XXH_PROCESS1;
XXH_PROCESS1;
return XXH32_avalanche(h32);
case 15: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 11: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 7: XXH_PROCESS4;
XXH_FALLTHROUGH;
case 3: XXH_PROCESS1;
XXH_FALLTHROUGH;
case 2: XXH_PROCESS1;
XXH_FALLTHROUGH;
case 1: XXH_PROCESS1;
XXH_FALLTHROUGH;
case 0: return XXH32_avalanche(h32);
}
XXH_ASSERT(0);
return h32; /* reaching this point is deemed impossible */
}
}
#ifdef XXH_OLD_NAMES
# define PROCESS1 XXH_PROCESS1
# define PROCESS4 XXH_PROCESS4
#else
# undef XXH_PROCESS1
# undef XXH_PROCESS4
#endif
/*!
* @internal
* @brief The implementation for @ref XXH32().
*
* @param input , len , seed Directly passed from @ref XXH32().
* @param align Whether @p input is aligned.
* @return The calculated hash.
*/
XXH_FORCE_INLINE xxh_u32
XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
{
xxh_u32 h32;
if (input==NULL) XXH_ASSERT(len == 0);
if (len>=16) {
const xxh_u8* const bEnd = input + len;
const xxh_u8* const limit = bEnd - 15;
xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
xxh_u32 v2 = seed + XXH_PRIME32_2;
xxh_u32 v3 = seed + 0;
xxh_u32 v4 = seed - XXH_PRIME32_1;
do {
v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
} while (input < limit);
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
+ XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
} else {
h32 = seed + XXH_PRIME32_5;
}
h32 += (xxh_u32)len;
return XXH32_finalize(h32, input, len&15, align);
}
/*! @ingroup xxh32_family */
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
{
#if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH32_state_t state;
XXH32_reset(&state, seed);
XXH32_update(&state, (const xxh_u8*)input, len);
return XXH32_digest(&state);
#else
if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
} }
return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
#endif
}
/******* Hash streaming *******/
/*!
* @ingroup xxh32_family
*/
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
{
return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
}
/*! @ingroup xxh32_family */
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
{
XXH_free(statePtr);
return XXH_OK;
}
/*! @ingroup xxh32_family */
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
{
XXH_memcpy(dstState, srcState, sizeof(*dstState));
}
/*! @ingroup xxh32_family */
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
{
XXH_ASSERT(statePtr != NULL);
memset(statePtr, 0, sizeof(*statePtr));
statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
statePtr->v[1] = seed + XXH_PRIME32_2;
statePtr->v[2] = seed + 0;
statePtr->v[3] = seed - XXH_PRIME32_1;
return XXH_OK;
}
/*! @ingroup xxh32_family */
XXH_PUBLIC_API XXH_errorcode
XXH32_update(XXH32_state_t* state, const void* input, size_t len)
{
if (input==NULL) {
XXH_ASSERT(len == 0);
return XXH_OK;
}
{ const xxh_u8* p = (const xxh_u8*)input;
const xxh_u8* const bEnd = p + len;
state->total_len_32 += (XXH32_hash_t)len;
state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
if (state->memsize + len < 16) { /* fill in tmp buffer */
XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
state->memsize += (XXH32_hash_t)len;
return XXH_OK;
}
if (state->memsize) { /* some data left from previous update */
XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
{ const xxh_u32* p32 = state->mem32;
state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
}
p += 16-state->memsize;
state->memsize = 0;
}
if (p <= bEnd-16) {
const xxh_u8* const limit = bEnd - 16;
do {
state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
} while (p<=limit);
}
if (p < bEnd) {
XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
state->memsize = (unsigned)(bEnd-p);
}
}
return XXH_OK;
}
/*! @ingroup xxh32_family */
XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
{
xxh_u32 h32;
if (state->large_len) {
h32 = XXH_rotl32(state->v[0], 1)
+ XXH_rotl32(state->v[1], 7)
+ XXH_rotl32(state->v[2], 12)
+ XXH_rotl32(state->v[3], 18);
} else {
h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
}
h32 += state->total_len_32;
return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
}
/******* Canonical representation *******/
/*!
* @ingroup xxh32_family
* The default return values from XXH functions are unsigned 32 and 64 bit
* integers.
*
* The canonical representation uses big endian convention, the same convention
* as human-readable numbers (large digits first).
*
* This way, hash values can be written into a file or buffer, remaining
* comparable across different systems.
*
* The following functions allow transformation of hash values to and from their
* canonical format.
*/
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
{
/* XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); */
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
XXH_memcpy(dst, &hash, sizeof(*dst));
}
/*! @ingroup xxh32_family */
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
{
return XXH_readBE32(src);
}
#ifndef XXH_NO_LONG_LONG
/* *******************************************************************
* 64-bit hash functions
*********************************************************************/
/*!
* @}
* @ingroup impl
* @{
*/
/******* Memory access *******/
typedef XXH64_hash_t xxh_u64;
#ifdef XXH_OLD_NAMES
# define U64 xxh_u64
#endif
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
/*
* Manual byteshift. Best for old compilers which don't inline memcpy.
* We actually directly use XXH_readLE64 and XXH_readBE64.
*/
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
static xxh_u64 XXH_read64(const void* memPtr)
{
return *(const xxh_u64*) memPtr;
}
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
/*
* __pack instructions are safer, but compiler specific, hence potentially
* problematic for some compilers.
*
* Currently only defined for GCC and ICC.
*/
#ifdef XXH_OLD_NAMES
typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
#endif
static xxh_u64 XXH_read64(const void* ptr)
{
typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
return ((const xxh_unalign64*)ptr)->u64;
}
#else
/*
* Portable and safe solution. Generally efficient.
* see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
*/
static xxh_u64 XXH_read64(const void* memPtr)
{
xxh_u64 val;
XXH_memcpy(&val, memPtr, sizeof(val));
return val;
}
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
#if defined(_MSC_VER) /* Visual Studio */
# define XXH_swap64 _byteswap_uint64
#elif XXH_GCC_VERSION >= 403
# define XXH_swap64 __builtin_bswap64
#else
static xxh_u64 XXH_swap64(xxh_u64 x)
{
return ((x << 56) & 0xff00000000000000ULL) |
((x << 40) & 0x00ff000000000000ULL) |
((x << 24) & 0x0000ff0000000000ULL) |
((x << 8) & 0x000000ff00000000ULL) |
((x >> 8) & 0x00000000ff000000ULL) |
((x >> 24) & 0x0000000000ff0000ULL) |
((x >> 40) & 0x000000000000ff00ULL) |
((x >> 56) & 0x00000000000000ffULL);
}
#endif
/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
{
const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
return bytePtr[0]
| ((xxh_u64)bytePtr[1] << 8)
| ((xxh_u64)bytePtr[2] << 16)
| ((xxh_u64)bytePtr[3] << 24)
| ((xxh_u64)bytePtr[4] << 32)
| ((xxh_u64)bytePtr[5] << 40)
| ((xxh_u64)bytePtr[6] << 48)
| ((xxh_u64)bytePtr[7] << 56);
}
XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
{
const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
return bytePtr[7]
| ((xxh_u64)bytePtr[6] << 8)
| ((xxh_u64)bytePtr[5] << 16)
| ((xxh_u64)bytePtr[4] << 24)
| ((xxh_u64)bytePtr[3] << 32)
| ((xxh_u64)bytePtr[2] << 40)
| ((xxh_u64)bytePtr[1] << 48)
| ((xxh_u64)bytePtr[0] << 56);
}
#else
XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
}
static xxh_u64 XXH_readBE64(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
}
#endif
XXH_FORCE_INLINE xxh_u64
XXH_readLE64_align(const void* ptr, XXH_alignment align)
{
if (align==XXH_unaligned)
return XXH_readLE64(ptr);
else
return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
}
/******* xxh64 *******/
/*!
* @}
* @defgroup xxh64_impl XXH64 implementation
* @ingroup impl
* @{
*/
/* #define rather that static const, to be used as initializers */
#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
#ifdef XXH_OLD_NAMES
# define PRIME64_1 XXH_PRIME64_1
# define PRIME64_2 XXH_PRIME64_2
# define PRIME64_3 XXH_PRIME64_3
# define PRIME64_4 XXH_PRIME64_4
# define PRIME64_5 XXH_PRIME64_5
#endif
static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
{
acc += input * XXH_PRIME64_2;
acc = XXH_rotl64(acc, 31);
acc *= XXH_PRIME64_1;
return acc;
}
static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
{
val = XXH64_round(0, val);
acc ^= val;
acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
return acc;
}
static xxh_u64 XXH64_avalanche(xxh_u64 h64)
{
h64 ^= h64 >> 33;
h64 *= XXH_PRIME64_2;
h64 ^= h64 >> 29;
h64 *= XXH_PRIME64_3;
h64 ^= h64 >> 32;
return h64;
}
#define XXH_get64bits(p) XXH_readLE64_align(p, align)
static xxh_u64
XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
{
if (ptr==NULL) XXH_ASSERT(len == 0);
len &= 31;
while (len >= 8) {
xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
ptr += 8;
h64 ^= k1;
h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
len -= 8;
}
if (len >= 4) {
h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
ptr += 4;
h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
len -= 4;
}
while (len > 0) {
h64 ^= (*ptr++) * XXH_PRIME64_5;
h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
--len;
}
return XXH64_avalanche(h64);
}
#ifdef XXH_OLD_NAMES
# define PROCESS1_64 XXH_PROCESS1_64
# define PROCESS4_64 XXH_PROCESS4_64
# define PROCESS8_64 XXH_PROCESS8_64
#else
# undef XXH_PROCESS1_64
# undef XXH_PROCESS4_64
# undef XXH_PROCESS8_64
#endif
XXH_FORCE_INLINE xxh_u64
XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
{
xxh_u64 h64;
if (input==NULL) XXH_ASSERT(len == 0);
if (len>=32) {
const xxh_u8* const bEnd = input + len;
const xxh_u8* const limit = bEnd - 31;
xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
xxh_u64 v2 = seed + XXH_PRIME64_2;
xxh_u64 v3 = seed + 0;
xxh_u64 v4 = seed - XXH_PRIME64_1;
do {
v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
} while (input<limit);
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
h64 = XXH64_mergeRound(h64, v1);
h64 = XXH64_mergeRound(h64, v2);
h64 = XXH64_mergeRound(h64, v3);
h64 = XXH64_mergeRound(h64, v4);
} else {
h64 = seed + XXH_PRIME64_5;
}
h64 += (xxh_u64) len;
return XXH64_finalize(h64, input, len, align);
}
/*! @ingroup xxh64_family */
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
{
#if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH64_state_t state;
XXH64_reset(&state, seed);
XXH64_update(&state, (const xxh_u8*)input, len);
return XXH64_digest(&state);
#else
if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
} }
return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
#endif
}
/******* Hash Streaming *******/
/*! @ingroup xxh64_family*/
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
{
return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
}
/*! @ingroup xxh64_family */
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
{
XXH_free(statePtr);
return XXH_OK;
}
/*! @ingroup xxh64_family */
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
{
XXH_memcpy(dstState, srcState, sizeof(*dstState));
}
/*! @ingroup xxh64_family */
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
{
XXH_ASSERT(statePtr != NULL);
memset(statePtr, 0, sizeof(*statePtr));
statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
statePtr->v[1] = seed + XXH_PRIME64_2;
statePtr->v[2] = seed + 0;
statePtr->v[3] = seed - XXH_PRIME64_1;
return XXH_OK;
}
/*! @ingroup xxh64_family */
XXH_PUBLIC_API XXH_errorcode
XXH64_update (XXH64_state_t* state, const void* input, size_t len)
{
if (input==NULL) {
XXH_ASSERT(len == 0);
return XXH_OK;
}
{ const xxh_u8* p = (const xxh_u8*)input;
const xxh_u8* const bEnd = p + len;
state->total_len += len;
if (state->memsize + len < 32) { /* fill in tmp buffer */
XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
state->memsize += (xxh_u32)len;
return XXH_OK;
}
if (state->memsize) { /* tmp buffer is full */
XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
p += 32 - state->memsize;
state->memsize = 0;
}
if (p+32 <= bEnd) {
const xxh_u8* const limit = bEnd - 32;
do {
state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
} while (p<=limit);
}
if (p < bEnd) {
XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
state->memsize = (unsigned)(bEnd-p);
}
}
return XXH_OK;
}
/*! @ingroup xxh64_family */
XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state)
{
xxh_u64 h64;
if (state->total_len >= 32) {
h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
h64 = XXH64_mergeRound(h64, state->v[0]);
h64 = XXH64_mergeRound(h64, state->v[1]);
h64 = XXH64_mergeRound(h64, state->v[2]);
h64 = XXH64_mergeRound(h64, state->v[3]);
} else {
h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
}
h64 += (xxh_u64) state->total_len;
return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
}
/******* Canonical representation *******/
/*! @ingroup xxh64_family */
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
{
/* XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); */
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
XXH_memcpy(dst, &hash, sizeof(*dst));
}
/*! @ingroup xxh64_family */
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
{
return XXH_readBE64(src);
}
#ifndef XXH_NO_XXH3
/* *********************************************************************
* XXH3
* New generation hash designed for speed on small keys and vectorization
************************************************************************ */
/*!
* @}
* @defgroup xxh3_impl XXH3 implementation
* @ingroup impl
* @{
*/
/* === Compiler specifics === */
#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
# define XXH_RESTRICT /* disable */
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
# define XXH_RESTRICT restrict
#else
/* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
# define XXH_RESTRICT /* disable */
#endif
#if (defined(__GNUC__) && (__GNUC__ >= 3)) \
|| (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
|| defined(__clang__)
# define XXH_likely(x) __builtin_expect(x, 1)
# define XXH_unlikely(x) __builtin_expect(x, 0)
#else
# define XXH_likely(x) (x)
# define XXH_unlikely(x) (x)
#endif
#if defined(__GNUC__) || defined(__clang__)
# if defined(__ARM_NEON__) || defined(__ARM_NEON) \
|| defined(__aarch64__) || defined(_M_ARM) \
|| defined(_M_ARM64) || defined(_M_ARM64EC)
# define inline __inline__ /* circumvent a clang bug */
# include <arm_neon.h>
# undef inline
# elif defined(__AVX2__)
# include <immintrin.h>
# elif defined(__SSE2__)
# include <emmintrin.h>
# endif
#endif
#if defined(_MSC_VER)
# include <intrin.h>
#endif
/*
* One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
* remaining a true 64-bit/128-bit hash function.
*
* This is done by prioritizing a subset of 64-bit operations that can be
* emulated without too many steps on the average 32-bit machine.
*
* For example, these two lines seem similar, and run equally fast on 64-bit:
*
* xxh_u64 x;
* x ^= (x >> 47); // good
* x ^= (x >> 13); // bad
*
* However, to a 32-bit machine, there is a major difference.
*
* x ^= (x >> 47) looks like this:
*
* x.lo ^= (x.hi >> (47 - 32));
*
* while x ^= (x >> 13) looks like this:
*
* // note: funnel shifts are not usually cheap.
* x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
* x.hi ^= (x.hi >> 13);
*
* The first one is significantly faster than the second, simply because the
* shift is larger than 32. This means:
* - All the bits we need are in the upper 32 bits, so we can ignore the lower
* 32 bits in the shift.
* - The shift result will always fit in the lower 32 bits, and therefore,
* we can ignore the upper 32 bits in the xor.
*
* Thanks to this optimization, XXH3 only requires these features to be efficient:
*
* - Usable unaligned access
* - A 32-bit or 64-bit ALU
* - If 32-bit, a decent ADC instruction
* - A 32 or 64-bit multiply with a 64-bit result
* - For the 128-bit variant, a decent byteswap helps short inputs.
*
* The first two are already required by XXH32, and almost all 32-bit and 64-bit
* platforms which can run XXH32 can run XXH3 efficiently.
*
* Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
* notable exception.
*
* First of all, Thumb-1 lacks support for the UMULL instruction which
* performs the important long multiply. This means numerous __aeabi_lmul
* calls.
*
* Second of all, the 8 functional registers are just not enough.
* Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
* Lo registers, and this shuffling results in thousands more MOVs than A32.
*
* A32 and T32 don't have this limitation. They can access all 14 registers,
* do a 32->64 multiply with UMULL, and the flexible operand allowing free
* shifts is helpful, too.
*
* Therefore, we do a quick sanity check.
*
* If compiling Thumb-1 for a target which supports ARM instructions, we will
* emit a warning, as it is not a "sane" platform to compile for.
*
* Usually, if this happens, it is because of an accident and you probably need
* to specify -march, as you likely meant to compile for a newer architecture.
*
* Credit: large sections of the vectorial and asm source code paths
* have been contributed by @easyaspi314
*/
#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
# warning "XXH3 is highly inefficient without ARM or Thumb-2."
#endif
/* ==========================================
* Vectorization detection
* ========================================== */
#ifdef XXH_DOXYGEN
/*!
* @ingroup tuning
* @brief Overrides the vectorization implementation chosen for XXH3.
*
* Can be defined to 0 to disable SIMD or any of the values mentioned in
* @ref XXH_VECTOR_TYPE.
*
* If this is not defined, it uses predefined macros to determine the best
* implementation.
*/
# define XXH_VECTOR XXH_SCALAR
/*!
* @ingroup tuning
* @brief Possible values for @ref XXH_VECTOR.
*
* Note that these are actually implemented as macros.
*
* If this is not defined, it is detected automatically.
* @ref XXH_X86DISPATCH overrides this.
*/
enum XXH_VECTOR_TYPE /* fake enum */ {
XXH_SCALAR = 0, /*!< Portable scalar version */
XXH_SSE2 = 1, /*!<
* SSE2 for Pentium 4, Opteron, all x86_64.
*
* @note SSE2 is also guaranteed on Windows 10, macOS, and
* Android x86.
*/
XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */
XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
};
/*!
* @ingroup tuning
* @brief Selects the minimum alignment for XXH3's accumulators.
*
* When using SIMD, this should match the alignment required for said vector
* type, so, for example, 32 for AVX2.
*
* Default: Auto detected.
*/
# define XXH_ACC_ALIGN 8
#endif
/* Actual definition */
#ifndef XXH_DOXYGEN
# define XXH_SCALAR 0
# define XXH_SSE2 1
# define XXH_AVX2 2
# define XXH_AVX512 3
# define XXH_NEON 4
# define XXH_VSX 5
#endif
#ifndef XXH_VECTOR /* can be defined on command line */
# if ( \
defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
|| defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
) && ( \
defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
|| (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
)
# define XXH_VECTOR XXH_NEON
# elif defined(__AVX512F__)
# define XXH_VECTOR XXH_AVX512
# elif defined(__AVX2__)
# define XXH_VECTOR XXH_AVX2
# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
# define XXH_VECTOR XXH_SSE2
# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
|| (defined(__s390x__) && defined(__VEC__)) \
&& defined(__GNUC__) /* TODO: IBM XL */
# define XXH_VECTOR XXH_VSX
# else
# define XXH_VECTOR XXH_SCALAR
# endif
#endif
/* These definitions are only meant to allow allocation of XXH state
statically, on stack, or in a struct for example.
Do not use members directly. */
/*
* Controls the alignment of the accumulator,
* for compatibility with aligned vector loads, which are usually faster.
*/
#ifndef XXH_ACC_ALIGN
# if defined(XXH_X86DISPATCH)
# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
# elif XXH_VECTOR == XXH_SCALAR /* scalar */
# define XXH_ACC_ALIGN 8
# elif XXH_VECTOR == XXH_SSE2 /* sse2 */
# define XXH_ACC_ALIGN 16
# elif XXH_VECTOR == XXH_AVX2 /* avx2 */
# define XXH_ACC_ALIGN 32
# elif XXH_VECTOR == XXH_NEON /* neon */
# define XXH_ACC_ALIGN 16
# elif XXH_VECTOR == XXH_VSX /* vsx */
# define XXH_ACC_ALIGN 16
# elif XXH_VECTOR == XXH_AVX512 /* avx512 */
# define XXH_ACC_ALIGN 64
# endif
#endif
struct XXH32_state_s {
unsigned total_len_32;
unsigned large_len;
unsigned v1;
unsigned v2;
unsigned v3;
unsigned v4;
unsigned mem32[4]; /* buffer defined as U32 for alignment */
unsigned memsize;
unsigned reserved; /* never read nor write, will be removed in a future version */
}; /* typedef'd to XXH32_state_t */
#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
|| XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
# define XXH_SEC_ALIGN XXH_ACC_ALIGN
#else
# define XXH_SEC_ALIGN 8
#endif
struct XXH64_state_s {
unsigned long long total_len;
unsigned long long v1;
unsigned long long v2;
unsigned long long v3;
unsigned long long v4;
unsigned long long mem64[4]; /* buffer defined as U64 for alignment */
unsigned memsize;
unsigned reserved[2]; /* never read nor write, will be removed in a future version */
}; /* typedef'd to XXH64_state_t */
/*
* UGLY HACK:
* GCC usually generates the best code with -O3 for xxHash.
*
* However, when targeting AVX2, it is overzealous in its unrolling resulting
* in code roughly 3/4 the speed of Clang.
*
* There are other issues, such as GCC splitting _mm256_loadu_si256 into
* _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
* only applies to Sandy and Ivy Bridge... which don't even support AVX2.
*
* That is why when compiling the AVX2 version, it is recommended to use either
* -O2 -mavx2 -march=haswell
* or
* -O2 -mavx2 -mno-avx256-split-unaligned-load
* for decent performance, or to use Clang instead.
*
* Fortunately, we can control the first one with a pragma that forces GCC into
* -O2, but the other one we can't control without "failed to inline always
* inline function due to target mismatch" warnings.
*/
#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
&& defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
&& defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
# pragma GCC push_options
# pragma GCC optimize("-O2")
#endif
# ifdef XXH_PRIVATE_API
# include "xxhash.c" /* include xxhash functions as `static`, for inlining */
#if XXH_VECTOR == XXH_NEON
/*
* NEON's setup for vmlal_u32 is a little more complicated than it is on
* SSE2, AVX2, and VSX.
*
* While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
*
* To do the same operation, the 128-bit 'Q' register needs to be split into
* two 64-bit 'D' registers, performing this operation::
*
* [ a | b ]
* | '---------. .--------' |
* | x |
* | .---------' '--------. |
* [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ]
*
* Due to significant changes in aarch64, the fastest method for aarch64 is
* completely different than the fastest method for ARMv7-A.
*
* ARMv7-A treats D registers as unions overlaying Q registers, so modifying
* D11 will modify the high half of Q5. This is similar to how modifying AH
* will only affect bits 8-15 of AX on x86.
*
* VZIP takes two registers, and puts even lanes in one register and odd lanes
* in the other.
*
* On ARMv7-A, this strangely modifies both parameters in place instead of
* taking the usual 3-operand form.
*
* Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
* lower and upper halves of the Q register to end up with the high and low
* halves where we want - all in one instruction.
*
* vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
*
* Unfortunately we need inline assembly for this: Instructions modifying two
* registers at once is not possible in GCC or Clang's IR, and they have to
* create a copy.
*
* aarch64 requires a different approach.
*
* In order to make it easier to write a decent compiler for aarch64, many
* quirks were removed, such as conditional execution.
*
* NEON was also affected by this.
*
* aarch64 cannot access the high bits of a Q-form register, and writes to a
* D-form register zero the high bits, similar to how writes to W-form scalar
* registers (or DWORD registers on x86_64) work.
*
* The formerly free vget_high intrinsics now require a vext (with a few
* exceptions)
*
* Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
* of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
* operand.
*
* The equivalent of the VZIP.32 on the lower and upper halves would be this
* mess:
*
* ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
* zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] }
* zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] }
*
* Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
*
* shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32);
* xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
*
* This is available on ARMv7-A, but is less efficient than a single VZIP.32.
*/
/*!
* Function-like macro:
* void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi)
* {
* outLo = (uint32x2_t)(in & 0xFFFFFFFF);
* outHi = (uint32x2_t)(in >> 32);
* in = UNDEFINED;
* }
*/
# if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
&& (defined(__GNUC__) || defined(__clang__)) \
&& (defined(__arm__) || defined(__thumb__) || defined(_M_ARM))
# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
do { \
/* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
/* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \
/* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
__asm__("vzip.32 %e0, %f0" : "+w" (in)); \
(outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \
(outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \
} while (0)
# else
# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
do { \
(outLo) = vmovn_u64 (in); \
(outHi) = vshrn_n_u64 ((in), 32); \
} while (0)
# endif
/*!
* @ingroup tuning
* @brief Controls the NEON to scalar ratio for XXH3
*
* On AArch64 when not optimizing for size, XXH3 will run 6 lanes using NEON and
* 2 lanes on scalar by default.
*
* This can be set to 2, 4, 6, or 8. ARMv7 will default to all 8 NEON lanes, as the
* emulated 64-bit arithmetic is too slow.
*
* Modern ARM CPUs are _very_ sensitive to how their pipelines are used.
*
* For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but it can't
* have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions,
* you are only using 2/3 of the CPU bandwidth.
*
* This is even more noticeable on the more advanced cores like the A76 which
* can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
*
* Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the
* remaining lanes will use scalar instructions. This improves the bandwidth
* and also gives the integer pipelines something to do besides twiddling loop
* counters and pointers.
*
* This change benefits CPUs with large micro-op buffers without negatively affecting
* other CPUs:
*
* | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. |
* |:----------------------|:--------------------|----------:|-----------:|------:|
* | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% |
* | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% |
* | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% |
*
* It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
*
* @see XXH3_accumulate_512_neon()
*/
# ifndef XXH3_NEON_LANES
# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
&& !defined(__OPTIMIZE_SIZE__)
# define XXH3_NEON_LANES 6
# else
# define XXH3_NEON_LANES XXH_ACC_NB
#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */
/*
* VSX and Z Vector helpers.
*
* This is very messy, and any pull requests to clean this up are welcome.
*
* There are a lot of problems with supporting VSX and s390x, due to
* inconsistent intrinsics, spotty coverage, and multiple endiannesses.
*/
#if XXH_VECTOR == XXH_VSX
# if defined(__s390x__)
# include <s390intrin.h>
# else
/* gcc's altivec.h can have the unwanted consequence to unconditionally
* #define bool, vector, and pixel keywords,
* with bad consequences for programs already using these keywords for other purposes.
* The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined.
* __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler,
* but it seems that, in some cases, it isn't.
* Force the build macro to be defined, so that keywords are not altered.
*/
# if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
# define __APPLE_ALTIVEC__
# endif
# include <altivec.h>
# endif
typedef __vector unsigned long long xxh_u64x2;
typedef __vector unsigned char xxh_u8x16;
typedef __vector unsigned xxh_u32x4;
# ifndef XXH_VSX_BE
# if defined(__BIG_ENDIAN__) \
|| (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
# define XXH_VSX_BE 1
# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
# warning "-maltivec=be is not recommended. Please use native endianness."
# define XXH_VSX_BE 1
# else
# define XXH_VSX_BE 0
# endif
# endif /* !defined(XXH_VSX_BE) */
# if XXH_VSX_BE
# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
# define XXH_vec_revb vec_revb
# else
/*!
* A polyfill for POWER9's vec_revb().
*/
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
{
xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
return vec_perm(val, val, vByteSwap);
}
# endif
# endif /* XXH_VSX_BE */
/*!
* Performs an unaligned vector load and byte swaps it on big endian.
*/
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
{
xxh_u64x2 ret;
XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
# if XXH_VSX_BE
ret = XXH_vec_revb(ret);
# endif
return ret;
}
/*
* vec_mulo and vec_mule are very problematic intrinsics on PowerPC
*
* These intrinsics weren't added until GCC 8, despite existing for a while,
* and they are endian dependent. Also, their meaning swap depending on version.
* */
# if defined(__s390x__)
/* s390x is always big endian, no issue on this platform */
# define XXH_vec_mulo vec_mulo
# define XXH_vec_mule vec_mule
# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
# define XXH_vec_mulo __builtin_altivec_vmulouw
# define XXH_vec_mule __builtin_altivec_vmuleuw
# else
/* gcc needs inline assembly */
/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
{
xxh_u64x2 result;
__asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
return result;
}
XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
{
xxh_u64x2 result;
__asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
return result;
}
# endif /* XXH_vec_mulo, XXH_vec_mule */
#endif /* XXH_VECTOR == XXH_VSX */
/* prefetch
* can be disabled, by declaring XXH_NO_PREFETCH build macro */
#if defined(XXH_NO_PREFETCH)
# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
#else
# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
# else
# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
# endif
#endif /* XXH_NO_PREFETCH */
/* ==========================================
* XXH3 default settings
* ========================================== */
#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
# error "default keyset is not large enough"
#endif
/*! Pseudorandom secret taken directly from FARSH. */
XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
};
#ifdef XXH_OLD_NAMES
# define kSecret XXH3_kSecret
#endif
#ifdef XXH_DOXYGEN
/*!
* @brief Calculates a 32-bit to 64-bit long multiply.
*
* Implemented as a macro.
*
* Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
* need to (but it shouldn't need to anyways, it is about 7 instructions to do
* a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
* use that instead of the normal method.
*
* If you are compiling for platforms like Thumb-1 and don't have a better option,
* you may also want to write your own long multiply routine here.
*
* @param x, y Numbers to be multiplied
* @return 64-bit product of the low 32 bits of @p x and @p y.
*/
XXH_FORCE_INLINE xxh_u64
XXH_mult32to64(xxh_u64 x, xxh_u64 y)
{
return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
}
#elif defined(_MSC_VER) && defined(_M_IX86)
# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
#else
/*
* Downcast + upcast is usually better than masking on older compilers like
* GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
*
* The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
* and perform a full 64x64 multiply -- entirely redundant on 32-bit.
*/
# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
#endif
/*!
* @brief Calculates a 64->128-bit long multiply.
*
* Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
* version.
*
* @param lhs , rhs The 64-bit integers to be multiplied
* @return The 128-bit result represented in an @ref XXH128_hash_t.
*/
static XXH128_hash_t
XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
{
/*
* GCC/Clang __uint128_t method.
*
* On most 64-bit targets, GCC and Clang define a __uint128_t type.
* This is usually the best way as it usually uses a native long 64-bit
* multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
*
* Usually.
*
* Despite being a 32-bit platform, Clang (and emscripten) define this type
* despite not having the arithmetic for it. This results in a laggy
* compiler builtin call which calculates a full 128-bit multiply.
* In that case it is best to use the portable one.
* https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
*/
#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
&& defined(__SIZEOF_INT128__) \
|| (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
__uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
XXH128_hash_t r128;
r128.low64 = (xxh_u64)(product);
r128.high64 = (xxh_u64)(product >> 64);
return r128;
/*
* MSVC for x64's _umul128 method.
*
* xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
*
* This compiles to single operand MUL on x64.
*/
#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
#ifndef _MSC_VER
# pragma intrinsic(_umul128)
#endif
xxh_u64 product_high;
xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
XXH128_hash_t r128;
r128.low64 = product_low;
r128.high64 = product_high;
return r128;
/*
* MSVC for ARM64's __umulh method.
*
* This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
*/
#elif defined(_M_ARM64) || defined(_M_ARM64EC)
#ifndef _MSC_VER
# pragma intrinsic(__umulh)
#endif
XXH128_hash_t r128;
r128.low64 = lhs * rhs;
r128.high64 = __umulh(lhs, rhs);
return r128;
#else
/*
* Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
*
* This is a fast and simple grade school multiply, which is shown below
* with base 10 arithmetic instead of base 0x100000000.
*
* 9 3 // D2 lhs = 93
* x 7 5 // D2 rhs = 75
* ----------
* 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
* 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
* 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
* + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
* ---------
* 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
* + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
* ---------
* 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
*
* The reasons for adding the products like this are:
* 1. It avoids manual carry tracking. Just like how
* (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
* This avoids a lot of complexity.
*
* 2. It hints for, and on Clang, compiles to, the powerful UMAAL
* instruction available in ARM's Digital Signal Processing extension
* in 32-bit ARMv6 and later, which is shown below:
*
* void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
* {
* xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
* *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
* *RdHi = (xxh_u32)(product >> 32);
* }
*
* This instruction was designed for efficient long multiplication, and
* allows this to be calculated in only 4 instructions at speeds
* comparable to some 64-bit ALUs.
*
* 3. It isn't terrible on other platforms. Usually this will be a couple
* of 32-bit ADD/ADCs.
*/
/* First calculate all of the cross products. */
xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
/* Now add the products together. These will never overflow. */
xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
XXH128_hash_t r128;
r128.low64 = lower;
r128.high64 = upper;
return r128;
#endif
}
/*!
* @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
*
* The reason for the separate function is to prevent passing too many structs
* around by value. This will hopefully inline the multiply, but we don't force it.
*
* @param lhs , rhs The 64-bit integers to multiply
* @return The low 64 bits of the product XOR'd by the high 64 bits.
* @see XXH_mult64to128()
*/
static xxh_u64
XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
{
XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
return product.low64 ^ product.high64;
}
/*! Seems to produce slightly better code on GCC for some reason. */
XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
{
XXH_ASSERT(0 <= shift && shift < 64);
return v64 ^ (v64 >> shift);
}
/*
* This is a fast avalanche stage,
* suitable when input bits are already partially mixed
*/
static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
{
h64 = XXH_xorshift64(h64, 37);
h64 *= 0x165667919E3779F9ULL;
h64 = XXH_xorshift64(h64, 32);
return h64;
}
/*
* This is a stronger avalanche,
* inspired by Pelle Evensen's rrmxmx
* preferable when input has not been previously mixed
*/
static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
{
/* this mix is inspired by Pelle Evensen's rrmxmx */
h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
h64 *= 0x9FB21C651E98DF25ULL;
h64 ^= (h64 >> 35) + len ;
h64 *= 0x9FB21C651E98DF25ULL;
return XXH_xorshift64(h64, 28);
}
/* ==========================================
* Short keys
* ==========================================
* One of the shortcomings of XXH32 and XXH64 was that their performance was
* sub-optimal on short lengths. It used an iterative algorithm which strongly
* favored lengths that were a multiple of 4 or 8.
*
* Instead of iterating over individual inputs, we use a set of single shot
* functions which piece together a range of lengths and operate in constant time.
*
* Additionally, the number of multiplies has been significantly reduced. This
* reduces latency, especially when emulating 64-bit multiplies on 32-bit.
*
* Depending on the platform, this may or may not be faster than XXH32, but it
* is almost guaranteed to be faster than XXH64.
*/
/*
* At very short lengths, there isn't enough input to fully hide secrets, or use
* the entire secret.
*
* There is also only a limited amount of mixing we can do before significantly
* impacting performance.
*
* Therefore, we use different sections of the secret and always mix two secret
* samples with an XOR. This should have no effect on performance on the
* seedless or withSeed variants because everything _should_ be constant folded
* by modern compilers.
*
* The XOR mixing hides individual parts of the secret and increases entropy.
*
* This adds an extra layer of strength for custom secrets.
*/
XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(input != NULL);
XXH_ASSERT(1 <= len && len <= 3);
XXH_ASSERT(secret != NULL);
/*
* len = 1: combined = { input[0], 0x01, input[0], input[0] }
* len = 2: combined = { input[1], 0x02, input[0], input[1] }
* len = 3: combined = { input[2], 0x03, input[0], input[1] }
*/
{ xxh_u8 const c1 = input[0];
xxh_u8 const c2 = input[len >> 1];
xxh_u8 const c3 = input[len - 1];
xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
| ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
return XXH64_avalanche(keyed);
}
}
XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(input != NULL);
XXH_ASSERT(secret != NULL);
XXH_ASSERT(4 <= len && len <= 8);
seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
{ xxh_u32 const input1 = XXH_readLE32(input);
xxh_u32 const input2 = XXH_readLE32(input + len - 4);
xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
xxh_u64 const keyed = input64 ^ bitflip;
return XXH3_rrmxmx(keyed, len);
}
}
XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(input != NULL);
XXH_ASSERT(secret != NULL);
XXH_ASSERT(9 <= len && len <= 16);
{ xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
xxh_u64 const acc = len
+ XXH_swap64(input_lo) + input_hi
+ XXH3_mul128_fold64(input_lo, input_hi);
return XXH3_avalanche(acc);
}
}
XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(len <= 16);
{ if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
}
}
/*
* DISCLAIMER: There are known *seed-dependent* multicollisions here due to
* multiplication by zero, affecting hashes of lengths 17 to 240.
*
* However, they are very unlikely.
*
* Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
* unseeded non-cryptographic hashes, it does not attempt to defend itself
* against specially crafted inputs, only random inputs.
*
* Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
* cancelling out the secret is taken an arbitrary number of times (addressed
* in XXH3_accumulate_512), this collision is very unlikely with random inputs
* and/or proper seeding:
*
* This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
* function that is only called up to 16 times per hash with up to 240 bytes of
* input.
*
* This is not too bad for a non-cryptographic hash function, especially with
* only 64 bit outputs.
*
* The 128-bit variant (which trades some speed for strength) is NOT affected
* by this, although it is always a good idea to use a proper seed if you care
* about strength.
*/
XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
{
#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
&& defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
&& !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
/*
* UGLY HACK:
* GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
* slower code.
*
* By forcing seed64 into a register, we disrupt the cost model and
* cause it to scalarize. See `XXH32_round()`
*
* FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
* XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
* GCC 9.2, despite both emitting scalar code.
*
* GCC generates much better scalar code than Clang for the rest of XXH3,
* which is why finding a more optimal codepath is an interest.
*/
XXH_COMPILER_GUARD(seed64);
#endif
{ xxh_u64 const input_lo = XXH_readLE64(input);
xxh_u64 const input_hi = XXH_readLE64(input+8);
return XXH3_mul128_fold64(
input_lo ^ (XXH_readLE64(secret) + seed64),
input_hi ^ (XXH_readLE64(secret+8) - seed64)
);
}
}
/* For mid range keys, XXH3 uses a Mum-hash variant. */
XXH_FORCE_INLINE XXH64_hash_t
XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH64_hash_t seed)
{
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
XXH_ASSERT(16 < len && len <= 128);
{ xxh_u64 acc = len * XXH_PRIME64_1;
if (len > 32) {
if (len > 64) {
if (len > 96) {
acc += XXH3_mix16B(input+48, secret+96, seed);
acc += XXH3_mix16B(input+len-64, secret+112, seed);
}
acc += XXH3_mix16B(input+32, secret+64, seed);
acc += XXH3_mix16B(input+len-48, secret+80, seed);
}
acc += XXH3_mix16B(input+16, secret+32, seed);
acc += XXH3_mix16B(input+len-32, secret+48, seed);
}
acc += XXH3_mix16B(input+0, secret+0, seed);
acc += XXH3_mix16B(input+len-16, secret+16, seed);
return XXH3_avalanche(acc);
}
}
#define XXH3_MIDSIZE_MAX 240
XXH_NO_INLINE XXH64_hash_t
XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH64_hash_t seed)
{
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
#define XXH3_MIDSIZE_STARTOFFSET 3
#define XXH3_MIDSIZE_LASTOFFSET 17
{ xxh_u64 acc = len * XXH_PRIME64_1;
int const nbRounds = (int)len / 16;
int i;
for (i=0; i<8; i++) {
acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
}
acc = XXH3_avalanche(acc);
XXH_ASSERT(nbRounds >= 8);
#if defined(__clang__) /* Clang */ \
&& (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
&& !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
/*
* UGLY HACK:
* Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
* In everywhere else, it uses scalar code.
*
* For 64->128-bit multiplies, even if the NEON was 100% optimal, it
* would still be slower than UMAAL (see XXH_mult64to128).
*
* Unfortunately, Clang doesn't handle the long multiplies properly and
* converts them to the nonexistent "vmulq_u64" intrinsic, which is then
* scalarized into an ugly mess of VMOV.32 instructions.
*
* This mess is difficult to avoid without turning autovectorization
* off completely, but they are usually relatively minor and/or not
* worth it to fix.
*
* This loop is the easiest to fix, as unlike XXH32, this pragma
* _actually works_ because it is a loop vectorization instead of an
* SLP vectorization.
*/
#pragma clang loop vectorize(disable)
#endif
for (i=8 ; i < nbRounds; i++) {
acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
}
/* last bytes */
acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
return XXH3_avalanche(acc);
}
}
/* ======= Long Keys ======= */
#define XXH_STRIPE_LEN 64
#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
#ifdef XXH_OLD_NAMES
# define STRIPE_LEN XXH_STRIPE_LEN
# define ACC_NB XXH_ACC_NB
#endif
XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
{
if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
XXH_memcpy(dst, &v64, sizeof(v64));
}
/* Several intrinsic functions below are supposed to accept __int64 as argument,
* as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
* However, several environments do not define __int64 type,
* requiring a workaround.
*/
#if !defined (__VMS) \
&& (defined (__cplusplus) \
|| (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
typedef int64_t xxh_i64;
#else
/* the following type must have a width of 64-bit */
typedef long long xxh_i64;
#endif
/*
* XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
*
* It is a hardened version of UMAC, based off of FARSH's implementation.
*
* This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
* implementations, and it is ridiculously fast.
*
* We harden it by mixing the original input to the accumulators as well as the product.
*
* This means that in the (relatively likely) case of a multiply by zero, the
* original input is preserved.
*
* On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
* cross-pollination, as otherwise the upper and lower halves would be
* essentially independent.
*
* This doesn't matter on 64-bit hashes since they all get merged together in
* the end, so we skip the extra step.
*
* Both XXH3_64bits and XXH3_128bits use this subroutine.
*/
#if (XXH_VECTOR == XXH_AVX512) \
|| (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
#ifndef XXH_TARGET_AVX512
# define XXH_TARGET_AVX512 /* disable attribute target */
#endif
XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
__m512i* const xacc = (__m512i *) acc;
XXH_ASSERT((((size_t)acc) & 63) == 0);
XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
{
/* data_vec = input[0]; */
__m512i const data_vec = _mm512_loadu_si512 (input);
/* key_vec = secret[0]; */
__m512i const key_vec = _mm512_loadu_si512 (secret);
/* data_key = data_vec ^ key_vec; */
__m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
/* data_key_lo = data_key >> 32; */
__m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
/* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
__m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
/* xacc[0] += swap(data_vec); */
__m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
__m512i const sum = _mm512_add_epi64(*xacc, data_swap);
/* xacc[0] += product; */
*xacc = _mm512_add_epi64(product, sum);
}
}
/*
* XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
*
* Multiplication isn't perfect, as explained by Google in HighwayHash:
*
* // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
* // varying degrees. In descending order of goodness, bytes
* // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
* // As expected, the upper and lower bytes are much worse.
*
* Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
*
* Since our algorithm uses a pseudorandom secret to add some variance into the
* mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
*
* This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
* extraction.
*
* Both XXH3_64bits and XXH3_128bits use this subroutine.
*/
XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 63) == 0);
XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
{ __m512i* const xacc = (__m512i*) acc;
const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
/* xacc[0] ^= (xacc[0] >> 47) */
__m512i const acc_vec = *xacc;
__m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
__m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted);
/* xacc[0] ^= secret; */
__m512i const key_vec = _mm512_loadu_si512 (secret);
__m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
/* xacc[0] *= XXH_PRIME32_1; */
__m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
__m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
__m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
*xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
}
}
XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
{
XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
XXH_ASSERT(((size_t)customSecret & 63) == 0);
(void)(&XXH_writeLE64);
{ int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
__m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64));
const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
__m512i* const dest = ( __m512i*) customSecret;
int i;
XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
XXH_ASSERT(((size_t)dest & 63) == 0);
for (i=0; i < nbRounds; ++i) {
/* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
* this will warn "discards 'const' qualifier". */
union {
const __m512i* cp;
void* p;
} remote_const_void;
remote_const_void.cp = src + i;
dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
} }
}
#endif
#if (XXH_VECTOR == XXH_AVX2) \
|| (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
#ifndef XXH_TARGET_AVX2
# define XXH_TARGET_AVX2 /* disable attribute target */
#endif
XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 31) == 0);
{ __m256i* const xacc = (__m256i *) acc;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
const __m256i* const xinput = (const __m256i *) input;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
const __m256i* const xsecret = (const __m256i *) secret;
size_t i;
for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
/* data_vec = xinput[i]; */
__m256i const data_vec = _mm256_loadu_si256 (xinput+i);
/* key_vec = xsecret[i]; */
__m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
/* data_key = data_vec ^ key_vec; */
__m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
/* data_key_lo = data_key >> 32; */
__m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
/* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
__m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
/* xacc[i] += swap(data_vec); */
__m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
__m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
/* xacc[i] += product; */
xacc[i] = _mm256_add_epi64(product, sum);
} }
}
XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 31) == 0);
{ __m256i* const xacc = (__m256i*) acc;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
const __m256i* const xsecret = (const __m256i *) secret;
const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
size_t i;
for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
/* xacc[i] ^= (xacc[i] >> 47) */
__m256i const acc_vec = xacc[i];
__m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
__m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
/* xacc[i] ^= xsecret; */
__m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
__m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
/* xacc[i] *= XXH_PRIME32_1; */
__m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
__m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
__m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
}
}
}
XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
{
XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
(void)(&XXH_writeLE64);
XXH_PREFETCH(customSecret);
{ __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
__m256i* dest = ( __m256i*) customSecret;
# if defined(__GNUC__) || defined(__clang__)
/*
* On GCC & Clang, marking 'dest' as modified will cause the compiler:
* - do not extract the secret from sse registers in the internal loop
* - use less common registers, and avoid pushing these reg into stack
*/
XXH_COMPILER_GUARD(dest);
# endif
XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
XXH_ASSERT(((size_t)dest & 31) == 0);
/* GCC -O2 need unroll loop manually */
dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed);
dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed);
dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed);
dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed);
dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed);
dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed);
}
}
#endif
/* x86dispatch always generates SSE2 */
#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
#ifndef XXH_TARGET_SSE2
# define XXH_TARGET_SSE2 /* disable attribute target */
#endif
XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
/* SSE2 is just a half-scale version of the AVX2 version. */
XXH_ASSERT((((size_t)acc) & 15) == 0);
{ __m128i* const xacc = (__m128i *) acc;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
const __m128i* const xinput = (const __m128i *) input;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
const __m128i* const xsecret = (const __m128i *) secret;
size_t i;
for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
/* data_vec = xinput[i]; */
__m128i const data_vec = _mm_loadu_si128 (xinput+i);
/* key_vec = xsecret[i]; */
__m128i const key_vec = _mm_loadu_si128 (xsecret+i);
/* data_key = data_vec ^ key_vec; */
__m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
/* data_key_lo = data_key >> 32; */
__m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
/* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
__m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
/* xacc[i] += swap(data_vec); */
__m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
__m128i const sum = _mm_add_epi64(xacc[i], data_swap);
/* xacc[i] += product; */
xacc[i] = _mm_add_epi64(product, sum);
} }
}
XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 15) == 0);
{ __m128i* const xacc = (__m128i*) acc;
/* Unaligned. This is mainly for pointer arithmetic, and because
* _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
const __m128i* const xsecret = (const __m128i *) secret;
const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
size_t i;
for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
/* xacc[i] ^= (xacc[i] >> 47) */
__m128i const acc_vec = xacc[i];
__m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
__m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
/* xacc[i] ^= xsecret[i]; */
__m128i const key_vec = _mm_loadu_si128 (xsecret+i);
__m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
/* xacc[i] *= XXH_PRIME32_1; */
__m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
__m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
__m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
}
}
}
XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
{
XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
(void)(&XXH_writeLE64);
{ int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
/* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
__m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
# else
__m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
# endif
int i;
const void* const src16 = XXH3_kSecret;
__m128i* dst16 = (__m128i*) customSecret;
# if defined(__GNUC__) || defined(__clang__)
/*
* On GCC & Clang, marking 'dest' as modified will cause the compiler:
* - do not extract the secret from sse registers in the internal loop
* - use less common registers, and avoid pushing these reg into stack
*/
XXH_COMPILER_GUARD(dst16);
# endif
XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
XXH_ASSERT(((size_t)dst16 & 15) == 0);
for (i=0; i < nbRounds; ++i) {
dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
} }
}
#endif
#if (XXH_VECTOR == XXH_NEON)
/* forward declarations for the scalar routines */
XXH_FORCE_INLINE void
XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
void const* XXH_RESTRICT secret, size_t lane);
XXH_FORCE_INLINE void
XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
void const* XXH_RESTRICT secret, size_t lane);
/*!
* @internal
* @brief The bulk processing loop for NEON.
*
* The NEON code path is actually partially scalar when running on AArch64. This
* is to optimize the pipelining and can have up to 15% speedup depending on the
* CPU, and it also mitigates some GCC codegen issues.
*
* @see XXH3_NEON_LANES for configuring this and details about this optimization.
*/
XXH_FORCE_INLINE void
XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 15) == 0);
XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
{
uint64x2_t* const xacc = (uint64x2_t *) acc;
/* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
uint8_t const* const xinput = (const uint8_t *) input;
uint8_t const* const xsecret = (const uint8_t *) secret;
size_t i;
/* NEON for the first few lanes (these loops are normally interleaved) */
for (i=0; i < XXH3_NEON_LANES / 2; i++) {
/* data_vec = xinput[i]; */
uint8x16_t data_vec = vld1q_u8(xinput + (i * 16));
/* key_vec = xsecret[i]; */
uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
uint64x2_t data_key;
uint32x2_t data_key_lo, data_key_hi;
/* xacc[i] += swap(data_vec); */
uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec);
uint64x2_t const swapped = vextq_u64(data64, data64, 1);
xacc[i] = vaddq_u64 (xacc[i], swapped);
/* data_key = data_vec ^ key_vec; */
data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
/* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
* data_key_hi = (uint32x2_t) (data_key >> 32);
* data_key = UNDEFINED; */
XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
/* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi);
}
/* Scalar for the remainder. This may be a zero iteration loop. */
for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
XXH3_scalarRound(acc, input, secret, i);
}
}
}
XXH_FORCE_INLINE void
XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 15) == 0);
{ uint64x2_t* xacc = (uint64x2_t*) acc;
uint8_t const* xsecret = (uint8_t const*) secret;
uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1);
size_t i;
/* NEON for the first few lanes (these loops are normally interleaved) */
for (i=0; i < XXH3_NEON_LANES / 2; i++) {
/* xacc[i] ^= (xacc[i] >> 47); */
uint64x2_t acc_vec = xacc[i];
uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47);
uint64x2_t data_vec = veorq_u64 (acc_vec, shifted);
/* xacc[i] ^= xsecret[i]; */
uint8x16_t key_vec = vld1q_u8 (xsecret + (i * 16));
uint64x2_t data_key = veorq_u64 (data_vec, vreinterpretq_u64_u8(key_vec));
/* xacc[i] *= XXH_PRIME32_1 */
uint32x2_t data_key_lo, data_key_hi;
/* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
* data_key_hi = (uint32x2_t) (xacc[i] >> 32);
* xacc[i] = UNDEFINED; */
XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
{ /*
* prod_hi = (data_key >> 32) * XXH_PRIME32_1;
*
* Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
* incorrectly "optimize" this:
* tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b));
* shifted = vshll_n_u32(tmp, 32);
* to this:
* tmp = "vmulq_u64"(a, b); // no such thing!
* shifted = vshlq_n_u64(tmp, 32);
*
* However, unlike SSE, Clang lacks a 64-bit multiply routine
* for NEON, and it scalarizes two 64-bit multiplies instead.
*
* vmull_u32 has the same timing as vmul_u32, and it avoids
* this bug completely.
* See https://bugs.llvm.org/show_bug.cgi?id=39967
*/
uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
/* xacc[i] = prod_hi << 32; */
xacc[i] = vshlq_n_u64(prod_hi, 32);
/* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
}
}
/* Scalar for the remainder. This may be a zero iteration loop. */
for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
XXH3_scalarScrambleRound(acc, secret, i);
}
}
}
#endif
#if (XXH_VECTOR == XXH_VSX)
XXH_FORCE_INLINE void
XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
/* presumed aligned */
unsigned int* const xacc = (unsigned int*) acc;
xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */
xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */
xxh_u64x2 const v32 = { 32, 32 };
size_t i;
for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
/* data_vec = xinput[i]; */
xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
/* key_vec = xsecret[i]; */
xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
xxh_u64x2 const data_key = data_vec ^ key_vec;
/* shuffled = (data_key << 32) | (data_key >> 32); */
xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
/* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
/* acc_vec = xacc[i]; */
xxh_u64x2 acc_vec = (xxh_u64x2)vec_xl(0, xacc + 4 * i);
acc_vec += product;
/* swap high and low halves */
#ifdef __s390x__
acc_vec += vec_permi(data_vec, data_vec, 2);
#else
acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
#endif
/* xacc[i] = acc_vec; */
vec_xst((xxh_u32x4)acc_vec, 0, xacc + 4 * i);
}
}
XXH_FORCE_INLINE void
XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
XXH_ASSERT((((size_t)acc) & 15) == 0);
{ xxh_u64x2* const xacc = (xxh_u64x2*) acc;
const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
/* constants */
xxh_u64x2 const v32 = { 32, 32 };
xxh_u64x2 const v47 = { 47, 47 };
xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
size_t i;
for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
/* xacc[i] ^= (xacc[i] >> 47); */
xxh_u64x2 const acc_vec = xacc[i];
xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
/* xacc[i] ^= xsecret[i]; */
xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
xxh_u64x2 const data_key = data_vec ^ key_vec;
/* xacc[i] *= XXH_PRIME32_1 */
/* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
/* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
xacc[i] = prod_odd + (prod_even << v32);
} }
}
#endif
/* scalar variants - universal */
/*!
* @internal
* @brief Scalar round for @ref XXH3_accumulate_512_scalar().
*
* This is extracted to its own function because the NEON path uses a combination
* of NEON and scalar.
*/
XXH_FORCE_INLINE void
XXH3_scalarRound(void* XXH_RESTRICT acc,
void const* XXH_RESTRICT input,
void const* XXH_RESTRICT secret,
size_t lane)
{
xxh_u64* xacc = (xxh_u64*) acc;
xxh_u8 const* xinput = (xxh_u8 const*) input;
xxh_u8 const* xsecret = (xxh_u8 const*) secret;
XXH_ASSERT(lane < XXH_ACC_NB);
XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
{
xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
xacc[lane] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
}
}
/*!
* @internal
* @brief Processes a 64 byte block of data using the scalar path.
*/
XXH_FORCE_INLINE void
XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
const void* XXH_RESTRICT input,
const void* XXH_RESTRICT secret)
{
size_t i;
for (i=0; i < XXH_ACC_NB; i++) {
XXH3_scalarRound(acc, input, secret, i);
}
}
/*!
* @internal
* @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
*
* This is extracted to its own function because the NEON path uses a combination
* of NEON and scalar.
*/
XXH_FORCE_INLINE void
XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
void const* XXH_RESTRICT secret,
size_t lane)
{
xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
XXH_ASSERT(lane < XXH_ACC_NB);
{
xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
xxh_u64 acc64 = xacc[lane];
acc64 = XXH_xorshift64(acc64, 47);
acc64 ^= key64;
acc64 *= XXH_PRIME32_1;
xacc[lane] = acc64;
}
}
/*!
* @internal
* @brief Scrambles the accumulators after a large chunk has been read
*/
XXH_FORCE_INLINE void
XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
{
size_t i;
for (i=0; i < XXH_ACC_NB; i++) {
XXH3_scalarScrambleRound(acc, secret, i);
}
}
XXH_FORCE_INLINE void
XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
{
/*
* We need a separate pointer for the hack below,
* which requires a non-const pointer.
* Any decent compiler will optimize this out otherwise.
*/
const xxh_u8* kSecretPtr = XXH3_kSecret;
XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
#if defined(__clang__) && defined(__aarch64__)
/*
* UGLY HACK:
* Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
* placed sequentially, in order, at the top of the unrolled loop.
*
* While MOVK is great for generating constants (2 cycles for a 64-bit
* constant compared to 4 cycles for LDR), it fights for bandwidth with
* the arithmetic instructions.
*
* I L S
* MOVK
* MOVK
* MOVK
* MOVK
* ADD
* SUB STR
* STR
* By forcing loads from memory (as the asm line causes Clang to assume
* that XXH3_kSecretPtr has been changed), the pipelines are used more
* efficiently:
* I L S
* LDR
* ADD LDR
* SUB STR
* STR
*
* See XXH3_NEON_LANES for details on the pipsline.
*
* XXH3_64bits_withSeed, len == 256, Snapdragon 835
* without hack: 2654.4 MB/s
* with hack: 3202.9 MB/s
*/
XXH_COMPILER_GUARD(kSecretPtr);
#endif
/*
* Note: in debug mode, this overrides the asm optimization
* and Clang will emit MOVK chains again.
*/
XXH_ASSERT(kSecretPtr == XXH3_kSecret);
{ int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
int i;
for (i=0; i < nbRounds; i++) {
/*
* The asm hack causes Clang to assume that kSecretPtr aliases with
* customSecret, and on aarch64, this prevented LDP from merging two
* loads together for free. Putting the loads together before the stores
* properly generates LDP.
*/
xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
} }
}
typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*);
typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
#if (XXH_VECTOR == XXH_AVX512)
#define XXH3_accumulate_512 XXH3_accumulate_512_avx512
#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
#elif (XXH_VECTOR == XXH_AVX2)
#define XXH3_accumulate_512 XXH3_accumulate_512_avx2
#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
#elif (XXH_VECTOR == XXH_SSE2)
#define XXH3_accumulate_512 XXH3_accumulate_512_sse2
#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
#elif (XXH_VECTOR == XXH_NEON)
#define XXH3_accumulate_512 XXH3_accumulate_512_neon
#define XXH3_scrambleAcc XXH3_scrambleAcc_neon
#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
#elif (XXH_VECTOR == XXH_VSX)
#define XXH3_accumulate_512 XXH3_accumulate_512_vsx
#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
#else /* scalar */
#define XXH3_accumulate_512 XXH3_accumulate_512_scalar
#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
#endif
#ifndef XXH_PREFETCH_DIST
# ifdef __clang__
# define XXH_PREFETCH_DIST 320
# else
# if (XXH_VECTOR == XXH_AVX512)
# define XXH_PREFETCH_DIST 512
# else
# define XXH_PREFETCH_DIST 384
# endif
# endif /* __clang__ */
#endif /* XXH_PREFETCH_DIST */
/*
* XXH3_accumulate()
* Loops over XXH3_accumulate_512().
* Assumption: nbStripes will not overflow the secret size
*/
XXH_FORCE_INLINE void
XXH3_accumulate( xxh_u64* XXH_RESTRICT acc,
const xxh_u8* XXH_RESTRICT input,
const xxh_u8* XXH_RESTRICT secret,
size_t nbStripes,
XXH3_f_accumulate_512 f_acc512)
{
size_t n;
for (n = 0; n < nbStripes; n++ ) {
const xxh_u8* const in = input + n*XXH_STRIPE_LEN;
XXH_PREFETCH(in + XXH_PREFETCH_DIST);
f_acc512(acc,
in,
secret + n*XXH_SECRET_CONSUME_RATE);
}
}
XXH_FORCE_INLINE void
XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
const xxh_u8* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble)
{
size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
size_t const nb_blocks = (len - 1) / block_len;
size_t n;
for (n = 0; n < nb_blocks; n++) {
XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512);
f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
}
/* last partial block */
XXH_ASSERT(len > XXH_STRIPE_LEN);
{ size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512);
/* last stripe */
{ const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
} }
}
XXH_FORCE_INLINE xxh_u64
XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
{
return XXH3_mul128_fold64(
acc[0] ^ XXH_readLE64(secret),
acc[1] ^ XXH_readLE64(secret+8) );
}
static XXH64_hash_t
XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
{
xxh_u64 result64 = start;
size_t i = 0;
for (i = 0; i < 4; i++) {
result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
#if defined(__clang__) /* Clang */ \
&& (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
&& (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
&& !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
/*
* UGLY HACK:
* Prevent autovectorization on Clang ARMv7-a. Exact same problem as
* the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
* XXH3_64bits, len == 256, Snapdragon 835:
* without hack: 2063.7 MB/s
* with hack: 2560.7 MB/s
*/
XXH_COMPILER_GUARD(result64);
#endif
}
return XXH3_avalanche(result64);
}
#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
const void* XXH_RESTRICT secret, size_t secretSize,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble)
{
XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
/* converge into final hash */
XXH_STATIC_ASSERT(sizeof(acc) == 64);
/* do not align on 8, so that the secret is different from the accumulator */
#define XXH_SECRET_MERGEACCS_START 11
XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
}
/*
* It's important for performance to transmit secret's size (when it's static)
* so that the compiler can properly optimize the vectorized loop.
* This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
*/
XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
{
(void)seed64;
return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
}
/*
* It's preferable for performance that XXH3_hashLong is not inlined,
* as it results in a smaller function for small data, easier to the instruction cache.
* Note that inside this no_inline function, we do inline the internal loop,
* and provide a statically defined secret size to allow optimization of vector loop.
*/
XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
{
(void)seed64; (void)secret; (void)secretLen;
return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
}
/*
* XXH3_hashLong_64b_withSeed():
* Generate a custom key based on alteration of default XXH3_kSecret with the seed,
* and then use this key for long mode hashing.
*
* This operation is decently fast but nonetheless costs a little bit of time.
* Try to avoid it whenever possible (typically when seed==0).
*
* It's important for performance that XXH3_hashLong is not inlined. Not sure
* why (uop cache maybe?), but the difference is large and easily measurable.
*/
XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
XXH64_hash_t seed,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble,
XXH3_f_initCustomSecret f_initSec)
{
if (seed == 0)
return XXH3_hashLong_64b_internal(input, len,
XXH3_kSecret, sizeof(XXH3_kSecret),
f_acc512, f_scramble);
{ XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
f_initSec(secret, seed);
return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
f_acc512, f_scramble);
}
}
/*
* It's important for performance that XXH3_hashLong is not inlined.
*/
XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed(const void* input, size_t len,
XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen)
{
(void)secret; (void)secretLen;
return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
}
typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
XXH_FORCE_INLINE XXH64_hash_t
XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
XXH3_hashLong64_f f_hashLong)
{
XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
/*
* If an action is to be taken if `secretLen` condition is not respected,
* it should be done here.
* For now, it's a contract pre-condition.
* Adding a check and a branch here would cost performance at every hash.
* Also, note that function signature doesn't offer room to return an error.
*/
if (len <= 16)
return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
if (len <= 128)
return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
if (len <= XXH3_MIDSIZE_MAX)
return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
}
/* === Public entry point === */
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len)
{
return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
{
return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
{
return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
}
XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
{
if (len <= XXH3_MIDSIZE_MAX)
return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
return XXH3_hashLong_64b_withSecret(input, len, seed, (const xxh_u8*)secret, secretSize);
}
/* === XXH3 streaming === */
/*
* Malloc's a pointer that is always aligned to align.
*
* This must be freed with `XXH_alignedFree()`.
*
* malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
* alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
* or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
*
* This underalignment previously caused a rather obvious crash which went
* completely unnoticed due to XXH3_createState() not actually being tested.
* Credit to RedSpah for noticing this bug.
*
* The alignment is done manually: Functions like posix_memalign or _mm_malloc
* are avoided: To maintain portability, we would have to write a fallback
* like this anyways, and besides, testing for the existence of library
* functions without relying on external build tools is impossible.
*
* The method is simple: Overallocate, manually align, and store the offset
* to the original behind the returned pointer.
*
* Align must be a power of 2 and 8 <= align <= 128.
*/
static void* XXH_alignedMalloc(size_t s, size_t align)
{
XXH_ASSERT(align <= 128 && align >= 8); /* range check */
XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
{ /* Overallocate to make room for manual realignment and an offset byte */
xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
if (base != NULL) {
/*
* Get the offset needed to align this pointer.
*
* Even if the returned pointer is aligned, there will always be
* at least one byte to store the offset to the original pointer.
*/
size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
/* Add the offset for the now-aligned pointer */
xxh_u8* ptr = base + offset;
XXH_ASSERT((size_t)ptr % align == 0);
/* Store the offset immediately before the returned pointer. */
ptr[-1] = (xxh_u8)offset;
return ptr;
}
return NULL;
}
}
/*
* Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
* normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
*/
static void XXH_alignedFree(void* p)
{
if (p != NULL) {
xxh_u8* ptr = (xxh_u8*)p;
/* Get the offset byte we added in XXH_malloc. */
xxh_u8 offset = ptr[-1];
/* Free the original malloc'd pointer */
xxh_u8* base = ptr - offset;
XXH_free(base);
}
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
{
XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
if (state==NULL) return NULL;
XXH3_INITSTATE(state);
return state;
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
{
XXH_alignedFree(statePtr);
return XXH_OK;
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API void
XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
{
XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
}
static void
XXH3_reset_internal(XXH3_state_t* statePtr,
XXH64_hash_t seed,
const void* secret, size_t secretSize)
{
size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
XXH_ASSERT(statePtr != NULL);
/* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
memset((char*)statePtr + initStart, 0, initLength);
statePtr->acc[0] = XXH_PRIME32_3;
statePtr->acc[1] = XXH_PRIME64_1;
statePtr->acc[2] = XXH_PRIME64_2;
statePtr->acc[3] = XXH_PRIME64_3;
statePtr->acc[4] = XXH_PRIME64_4;
statePtr->acc[5] = XXH_PRIME32_2;
statePtr->acc[6] = XXH_PRIME64_5;
statePtr->acc[7] = XXH_PRIME32_1;
statePtr->seed = seed;
statePtr->useSeed = (seed != 0);
statePtr->extSecret = (const unsigned char*)secret;
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset(XXH3_state_t* statePtr)
{
if (statePtr == NULL) return XXH_ERROR;
XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
return XXH_OK;
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
{
if (statePtr == NULL) return XXH_ERROR;
XXH3_reset_internal(statePtr, 0, secret, secretSize);
if (secret == NULL) return XXH_ERROR;
if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
return XXH_OK;
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
{
if (statePtr == NULL) return XXH_ERROR;
if (seed==0) return XXH3_64bits_reset(statePtr);
if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
XXH3_initCustomSecret(statePtr->customSecret, seed);
XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
return XXH_OK;
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed64)
{
if (statePtr == NULL) return XXH_ERROR;
if (secret == NULL) return XXH_ERROR;
if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
XXH3_reset_internal(statePtr, seed64, secret, secretSize);
statePtr->useSeed = 1; /* always, even if seed64==0 */
return XXH_OK;
}
/* Note : when XXH3_consumeStripes() is invoked,
* there must be a guarantee that at least one more byte must be consumed from input
* so that the function can blindly consume all stripes using the "normal" secret segment */
XXH_FORCE_INLINE void
XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble)
{
XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */
XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
/* need a scrambling operation */
size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
f_scramble(acc, secret + secretLimit);
XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
*nbStripesSoFarPtr = nbStripesAfterBlock;
} else {
XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
*nbStripesSoFarPtr += nbStripes;
}
}
#ifndef XXH3_STREAM_USE_STACK
# ifndef __clang__ /* clang doesn't need additional stack space */
# define XXH3_STREAM_USE_STACK 1
# endif
#endif
/*
* Both XXH3_64bits_update and XXH3_128bits_update use this routine.
*/
XXH_FORCE_INLINE XXH_errorcode
XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
const xxh_u8* XXH_RESTRICT input, size_t len,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble)
{
if (input==NULL) {
XXH_ASSERT(len == 0);
return XXH_OK;
}
XXH_ASSERT(state != NULL);
{ const xxh_u8* const bEnd = input + len;
const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
/* For some reason, gcc and MSVC seem to suffer greatly
* when operating accumulators directly into state.
* Operating into stack space seems to enable proper optimization.
* clang, on the other hand, doesn't seem to need this trick */
XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc));
#else
xxh_u64* XXH_RESTRICT const acc = state->acc;
#endif
state->totalLen += len;
XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
/* small input : just fill in tmp buffer */
if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) {
XXH_memcpy(state->buffer + state->bufferedSize, input, len);
state->bufferedSize += (XXH32_hash_t)len;
return XXH_OK;
}
/* total input is now > XXH3_INTERNALBUFFER_SIZE */
#define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
/*
* Internal buffer is partially filled (always, except at beginning)
* Complete it, then consume it.
*/
if (state->bufferedSize) {
size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
input += loadSize;
XXH3_consumeStripes(acc,
&state->nbStripesSoFar, state->nbStripesPerBlock,
state->buffer, XXH3_INTERNALBUFFER_STRIPES,
secret, state->secretLimit,
f_acc512, f_scramble);
state->bufferedSize = 0;
}
XXH_ASSERT(input < bEnd);
/* large input to consume : ingest per full block */
if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) {
size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar);
/* join to current block's end */
{ size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar;
XXH_ASSERT(nbStripesToEnd <= nbStripes);
XXH3_accumulate(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512);
f_scramble(acc, secret + state->secretLimit);
state->nbStripesSoFar = 0;
input += nbStripesToEnd * XXH_STRIPE_LEN;
nbStripes -= nbStripesToEnd;
}
/* consume per entire blocks */
while(nbStripes >= state->nbStripesPerBlock) {
XXH3_accumulate(acc, input, secret, state->nbStripesPerBlock, f_acc512);
f_scramble(acc, secret + state->secretLimit);
input += state->nbStripesPerBlock * XXH_STRIPE_LEN;
nbStripes -= state->nbStripesPerBlock;
}
/* consume last partial block */
XXH3_accumulate(acc, input, secret, nbStripes, f_acc512);
input += nbStripes * XXH_STRIPE_LEN;
XXH_ASSERT(input < bEnd); /* at least some bytes left */
state->nbStripesSoFar = nbStripes;
/* buffer predecessor of last partial stripe */
XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN);
} else {
/* content to consume <= block size */
/* Consume input by a multiple of internal buffer size */
if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
do {
XXH3_consumeStripes(acc,
&state->nbStripesSoFar, state->nbStripesPerBlock,
input, XXH3_INTERNALBUFFER_STRIPES,
secret, state->secretLimit,
f_acc512, f_scramble);
input += XXH3_INTERNALBUFFER_SIZE;
} while (input<limit);
/* buffer predecessor of last partial stripe */
XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
}
}
/* Some remaining input (always) : buffer it */
XXH_ASSERT(input < bEnd);
XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
XXH_ASSERT(state->bufferedSize == 0);
XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
state->bufferedSize = (XXH32_hash_t)(bEnd-input);
#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
/* save stack accumulators into state */
memcpy(state->acc, acc, sizeof(acc));
#endif
}
return XXH_OK;
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
{
return XXH3_update(state, (const xxh_u8*)input, len,
XXH3_accumulate_512, XXH3_scrambleAcc);
}
XXH_FORCE_INLINE void
XXH3_digest_long (XXH64_hash_t* acc,
const XXH3_state_t* state,
const unsigned char* secret)
{
/*
* Digest on a local copy. This way, the state remains unaltered, and it can
* continue ingesting more input afterwards.
*/
XXH_memcpy(acc, state->acc, sizeof(state->acc));
if (state->bufferedSize >= XXH_STRIPE_LEN) {
size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
size_t nbStripesSoFar = state->nbStripesSoFar;
XXH3_consumeStripes(acc,
&nbStripesSoFar, state->nbStripesPerBlock,
state->buffer, nbStripes,
secret, state->secretLimit,
XXH3_accumulate_512, XXH3_scrambleAcc);
/* last stripe */
XXH3_accumulate_512(acc,
state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
secret + state->secretLimit - XXH_SECRET_LASTACC_START);
} else { /* bufferedSize < XXH_STRIPE_LEN */
xxh_u8 lastStripe[XXH_STRIPE_LEN];
size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
XXH3_accumulate_512(acc,
lastStripe,
secret + state->secretLimit - XXH_SECRET_LASTACC_START);
}
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state)
{
const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
if (state->totalLen > XXH3_MIDSIZE_MAX) {
XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
XXH3_digest_long(acc, state, secret);
return XXH3_mergeAccs(acc,
secret + XXH_SECRET_MERGEACCS_START,
(xxh_u64)state->totalLen * XXH_PRIME64_1);
}
/* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
if (state->useSeed)
return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
secret, state->secretLimit + XXH_STRIPE_LEN);
}
/* ==========================================
* XXH3 128 bits (a.k.a XXH128)
* ==========================================
* XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
* even without counting the significantly larger output size.
*
* For example, extra steps are taken to avoid the seed-dependent collisions
* in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
*
* This strength naturally comes at the cost of some speed, especially on short
* lengths. Note that longer hashes are about as fast as the 64-bit version
* due to it using only a slight modification of the 64-bit loop.
*
* XXH128 is also more oriented towards 64-bit machines. It is still extremely
* fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
*/
XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
/* A doubled version of 1to3_64b with different constants. */
XXH_ASSERT(input != NULL);
XXH_ASSERT(1 <= len && len <= 3);
XXH_ASSERT(secret != NULL);
/*
* len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
* len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
* len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
*/
{ xxh_u8 const c1 = input[0];
xxh_u8 const c2 = input[len >> 1];
xxh_u8 const c3 = input[len - 1];
xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
| ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
XXH128_hash_t h128;
h128.low64 = XXH64_avalanche(keyed_lo);
h128.high64 = XXH64_avalanche(keyed_hi);
return h128;
}
}
XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(input != NULL);
XXH_ASSERT(secret != NULL);
XXH_ASSERT(4 <= len && len <= 8);
seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
{ xxh_u32 const input_lo = XXH_readLE32(input);
xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
xxh_u64 const keyed = input_64 ^ bitflip;
/* Shift len to the left to ensure it is even, this avoids even multiplies. */
XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
m128.high64 += (m128.low64 << 1);
m128.low64 ^= (m128.high64 >> 3);
m128.low64 = XXH_xorshift64(m128.low64, 35);
m128.low64 *= 0x9FB21C651E98DF25ULL;
m128.low64 = XXH_xorshift64(m128.low64, 28);
m128.high64 = XXH3_avalanche(m128.high64);
return m128;
}
}
XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(input != NULL);
XXH_ASSERT(secret != NULL);
XXH_ASSERT(9 <= len && len <= 16);
{ xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
xxh_u64 const input_lo = XXH_readLE64(input);
xxh_u64 input_hi = XXH_readLE64(input + len - 8);
XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
/*
* Put len in the middle of m128 to ensure that the length gets mixed to
* both the low and high bits in the 128x64 multiply below.
*/
m128.low64 += (xxh_u64)(len - 1) << 54;
input_hi ^= bitfliph;
/*
* Add the high 32 bits of input_hi to the high 32 bits of m128, then
* add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
* the high 64 bits of m128.
*
* The best approach to this operation is different on 32-bit and 64-bit.
*/
if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
/*
* 32-bit optimized version, which is more readable.
*
* On 32-bit, it removes an ADC and delays a dependency between the two
* halves of m128.high64, but it generates an extra mask on 64-bit.
*/
m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
} else {
/*
* 64-bit optimized (albeit more confusing) version.
*
* Uses some properties of addition and multiplication to remove the mask:
*
* Let:
* a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
* b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
* c = XXH_PRIME32_2
*
* a + (b * c)
* Inverse Property: x + y - x == y
* a + (b * (1 + c - 1))
* Distributive Property: x * (y + z) == (x * y) + (x * z)
* a + (b * 1) + (b * (c - 1))
* Identity Property: x * 1 == x
* a + b + (b * (c - 1))
*
* Substitute a, b, and c:
* input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
*
* Since input_hi.hi + input_hi.lo == input_hi, we get this:
* input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
*/
m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
}
/* m128 ^= XXH_swap64(m128 >> 64); */
m128.low64 ^= XXH_swap64(m128.high64);
{ /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
h128.high64 += m128.high64 * XXH_PRIME64_2;
h128.low64 = XXH3_avalanche(h128.low64);
h128.high64 = XXH3_avalanche(h128.high64);
return h128;
} }
}
/*
* Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
*/
XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
{
XXH_ASSERT(len <= 16);
{ if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
{ XXH128_hash_t h128;
xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
h128.low64 = XXH64_avalanche(seed ^ bitflipl);
h128.high64 = XXH64_avalanche( seed ^ bitfliph);
return h128;
} }
}
/*
* A bit slower than XXH3_mix16B, but handles multiply by zero better.
*/
XXH_FORCE_INLINE XXH128_hash_t
XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
const xxh_u8* secret, XXH64_hash_t seed)
{
acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
return acc;
}
XXH_FORCE_INLINE XXH128_hash_t
XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH64_hash_t seed)
{
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
XXH_ASSERT(16 < len && len <= 128);
{ XXH128_hash_t acc;
acc.low64 = len * XXH_PRIME64_1;
acc.high64 = 0;
if (len > 32) {
if (len > 64) {
if (len > 96) {
acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
}
acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
}
acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
}
acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
{ XXH128_hash_t h128;
h128.low64 = acc.low64 + acc.high64;
h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ (acc.high64 * XXH_PRIME64_4)
+ ((len - seed) * XXH_PRIME64_2);
h128.low64 = XXH3_avalanche(h128.low64);
h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
return h128;
}
}
}
XXH_NO_INLINE XXH128_hash_t
XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH64_hash_t seed)
{
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
{ XXH128_hash_t acc;
int const nbRounds = (int)len / 32;
int i;
acc.low64 = len * XXH_PRIME64_1;
acc.high64 = 0;
for (i=0; i<4; i++) {
acc = XXH128_mix32B(acc,
input + (32 * i),
input + (32 * i) + 16,
secret + (32 * i),
seed);
}
acc.low64 = XXH3_avalanche(acc.low64);
acc.high64 = XXH3_avalanche(acc.high64);
XXH_ASSERT(nbRounds >= 4);
for (i=4 ; i < nbRounds; i++) {
acc = XXH128_mix32B(acc,
input + (32 * i),
input + (32 * i) + 16,
secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
seed);
}
/* last bytes */
acc = XXH128_mix32B(acc,
input + len - 16,
input + len - 32,
secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
0ULL - seed);
{ XXH128_hash_t h128;
h128.low64 = acc.low64 + acc.high64;
h128.high64 = (acc.low64 * XXH_PRIME64_1)
+ (acc.high64 * XXH_PRIME64_4)
+ ((len - seed) * XXH_PRIME64_2);
h128.low64 = XXH3_avalanche(h128.low64);
h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
return h128;
}
}
}
XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble)
{
XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble);
/* converge into final hash */
XXH_STATIC_ASSERT(sizeof(acc) == 64);
XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
{ XXH128_hash_t h128;
h128.low64 = XXH3_mergeAccs(acc,
secret + XXH_SECRET_MERGEACCS_START,
(xxh_u64)len * XXH_PRIME64_1);
h128.high64 = XXH3_mergeAccs(acc,
secret + secretSize
- sizeof(acc) - XXH_SECRET_MERGEACCS_START,
~((xxh_u64)len * XXH_PRIME64_2));
return h128;
}
}
/*
* It's important for performance that XXH3_hashLong is not inlined.
*/
XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64,
const void* XXH_RESTRICT secret, size_t secretLen)
{
(void)seed64; (void)secret; (void)secretLen;
return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
XXH3_accumulate_512, XXH3_scrambleAcc);
}
/*
* It's important for performance to pass @secretLen (when it's static)
* to the compiler, so that it can properly optimize the vectorized loop.
*/
XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64,
const void* XXH_RESTRICT secret, size_t secretLen)
{
(void)seed64;
return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
XXH3_accumulate_512, XXH3_scrambleAcc);
}
XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
XXH64_hash_t seed64,
XXH3_f_accumulate_512 f_acc512,
XXH3_f_scrambleAcc f_scramble,
XXH3_f_initCustomSecret f_initSec)
{
if (seed64 == 0)
return XXH3_hashLong_128b_internal(input, len,
XXH3_kSecret, sizeof(XXH3_kSecret),
f_acc512, f_scramble);
{ XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
f_initSec(secret, seed64);
return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
f_acc512, f_scramble);
}
}
/*
* It's important for performance that XXH3_hashLong is not inlined.
*/
XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed(const void* input, size_t len,
XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
{
(void)secret; (void)secretLen;
return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
}
typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
XXH64_hash_t, const void* XXH_RESTRICT, size_t);
XXH_FORCE_INLINE XXH128_hash_t
XXH3_128bits_internal(const void* input, size_t len,
XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
XXH3_hashLong128_f f_hl128)
{
XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
/*
* If an action is to be taken if `secret` conditions are not respected,
* it should be done here.
* For now, it's a contract pre-condition.
* Adding a check and a branch here would cost performance at every hash.
*/
if (len <= 16)
return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
if (len <= 128)
return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
if (len <= XXH3_MIDSIZE_MAX)
return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
return f_hl128(input, len, seed64, secret, secretLen);
}
/* === Public XXH128 API === */
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
{
return XXH3_128bits_internal(input, len, 0,
XXH3_kSecret, sizeof(XXH3_kSecret),
XXH3_hashLong_128b_default);
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
{
return XXH3_128bits_internal(input, len, 0,
(const xxh_u8*)secret, secretSize,
XXH3_hashLong_128b_withSecret);
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
{
return XXH3_128bits_internal(input, len, seed,
XXH3_kSecret, sizeof(XXH3_kSecret),
XXH3_hashLong_128b_withSeed);
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
{
if (len <= XXH3_MIDSIZE_MAX)
return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH128_hash_t
XXH128(const void* input, size_t len, XXH64_hash_t seed)
{
return XXH3_128bits_withSeed(input, len, seed);
}
/* === XXH3 128-bit streaming === */
/*
* All initialization and update functions are identical to 64-bit streaming variant.
* The only difference is the finalization routine.
*/
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset(XXH3_state_t* statePtr)
{
return XXH3_64bits_reset(statePtr);
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
{
return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed)
{
return XXH3_64bits_reset_withSeed(statePtr, seed);
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed)
{
return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
{
return XXH3_update(state, (const xxh_u8*)input, len,
XXH3_accumulate_512, XXH3_scrambleAcc);
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state)
{
const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
if (state->totalLen > XXH3_MIDSIZE_MAX) {
XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
XXH3_digest_long(acc, state, secret);
XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
{ XXH128_hash_t h128;
h128.low64 = XXH3_mergeAccs(acc,
secret + XXH_SECRET_MERGEACCS_START,
(xxh_u64)state->totalLen * XXH_PRIME64_1);
h128.high64 = XXH3_mergeAccs(acc,
secret + state->secretLimit + XXH_STRIPE_LEN
- sizeof(acc) - XXH_SECRET_MERGEACCS_START,
~((xxh_u64)state->totalLen * XXH_PRIME64_2));
return h128;
}
}
/* len <= XXH3_MIDSIZE_MAX : short code */
if (state->seed)
return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
secret, state->secretLimit + XXH_STRIPE_LEN);
}
/* 128-bit utility functions */
#include <string.h> /* memcmp, memcpy */
/* return : 1 is equal, 0 if different */
/*! @ingroup xxh3_family */
XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
{
/* note : XXH128_hash_t is compact, it has no padding byte */
return !(memcmp(&h1, &h2, sizeof(h1)));
}
/* This prototype is compatible with stdlib's qsort().
* return : >0 if *h128_1 > *h128_2
* <0 if *h128_1 < *h128_2
* =0 if *h128_1 == *h128_2 */
/*! @ingroup xxh3_family */
XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2)
{
XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
/* note : bets that, in most cases, hash values are different */
if (hcmp) return hcmp;
return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
}
/*====== Canonical representation ======*/
/*! @ingroup xxh3_family */
XXH_PUBLIC_API void
XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash)
{
XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
if (XXH_CPU_LITTLE_ENDIAN) {
hash.high64 = XXH_swap64(hash.high64);
hash.low64 = XXH_swap64(hash.low64);
}
XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH128_hash_t
XXH128_hashFromCanonical(const XXH128_canonical_t* src)
{
XXH128_hash_t h;
h.high64 = XXH_readBE64(src);
h.low64 = XXH_readBE64(src->digest + 8);
return h;
}
/* ==========================================
* Secret generators
* ==========================================
*/
#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
{
XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API XXH_errorcode
XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize)
{
#if (XXH_DEBUGLEVEL >= 1)
XXH_ASSERT(secretBuffer != NULL);
XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
#else
/* production mode, assert() are disabled */
if (secretBuffer == NULL) return XXH_ERROR;
if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
#endif
if (customSeedSize == 0) {
customSeed = XXH3_kSecret;
customSeedSize = XXH_SECRET_DEFAULT_SIZE;
}
#if (XXH_DEBUGLEVEL >= 1)
XXH_ASSERT(customSeed != NULL);
#else
if (customSeed == NULL) return XXH_ERROR;
#endif
/* Fill secretBuffer with a copy of customSeed - repeat as needed */
{ size_t pos = 0;
while (pos < secretSize) {
size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
memcpy((char*)secretBuffer + pos, customSeed, toCopy);
pos += toCopy;
} }
{ size_t const nbSeg16 = secretSize / 16;
size_t n;
XXH128_canonical_t scrambler;
XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
for (n=0; n<nbSeg16; n++) {
XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
XXH3_combine16((char*)secretBuffer + n*16, h128);
}
/* last segment */
XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
}
return XXH_OK;
}
/*! @ingroup xxh3_family */
XXH_PUBLIC_API void
XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed)
{
XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
XXH3_initCustomSecret(secret, seed);
XXH_ASSERT(secretBuffer != NULL);
memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
}
/* Pop our optimization override from above */
#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
&& defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
&& defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
# pragma GCC pop_options
#endif
#endif /* XXH_NO_LONG_LONG */
#endif /* XXH_NO_XXH3 */
/*!
* @}
*/
#endif /* XXH_IMPLEMENTATION */
/* *************************************
* Tuning parameters
***************************************/
/*!XXH_FORCE_MEMORY_ACCESS :
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
* Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
* It can generate buggy code on targets which do not support unaligned memory accesses.
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
* See http://stackoverflow.com/a/32095106/646947 for details.
* Prefer these methods in priority order (0 > 1 > 2)
*/
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define XXH_FORCE_MEMORY_ACCESS 2
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
defined(__ICCARM__)
# define XXH_FORCE_MEMORY_ACCESS 1
# endif
#endif
/*!XXH_ACCEPT_NULL_INPUT_POINTER :
* If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
* When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
* By default, this option is disabled. To enable it, uncomment below define :
*/
/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
/*!XXH_FORCE_NATIVE_FORMAT :
* By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
* Results are therefore identical for little-endian and big-endian CPU.
* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
* Should endian-independence be of no importance for your application, you may set the #define below to 1,
* to improve speed for Big-endian CPU.
* This option has no impact on Little_Endian CPU.
*/
#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
# define XXH_FORCE_NATIVE_FORMAT 0
#endif
/*!XXH_FORCE_ALIGN_CHECK :
* This is a minor performance trick, only useful with lots of very small keys.
* It means : check for aligned/unaligned input.
* The check costs one initial branch per hash; set to 0 when the input data
* is guaranteed to be aligned.
/*
* xxhash.c instantiates functions defined in xxhash.h
/* *************************************
* Includes & Memory related functions
***************************************/
/* Modify the local functions below should you wish to use some other memory routines */
/* for ZSTD_malloc(), ZSTD_free() */
#define ZSTD_DEPS_NEED_MALLOC
#include "zstd_deps.h" /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */
static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); }
static void XXH_free (void* p) { ZSTD_free(p); }
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); }
#ifndef XXH_STATIC_LINKING_ONLY
# define XXH_STATIC_LINKING_ONLY
#endif
/* *************************************
* Compiler Specific Options
***************************************/
#include "compiler.h"
/* *************************************
* Basic Types
***************************************/
#include "mem.h" /* BYTE, U32, U64, size_t */
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; }
static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign;
static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
#else
/* portable and safe solution. Generally efficient.
* see : http://stackoverflow.com/a/32095106/646947
*/
static U32 XXH_read32(const void* memPtr)
{
U32 val;
ZSTD_memcpy(&val, memPtr, sizeof(val));
return val;
}
static U64 XXH_read64(const void* memPtr)
{
U64 val;
ZSTD_memcpy(&val, memPtr, sizeof(val));
return val;
}
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
/* ****************************************
* Compiler-specific Functions and Macros
******************************************/
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
#if defined(_MSC_VER)
# define XXH_rotl32(x,r) _rotl(x,r)
# define XXH_rotl64(x,r) _rotl64(x,r)
#else
#if defined(__ICCARM__)
# include <intrinsics.h>
# define XXH_rotl32(x,r) __ROR(x,(32 - r))
#else
# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
#endif
# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
#endif
#if defined(_MSC_VER) /* Visual Studio */
# define XXH_swap32 _byteswap_ulong
# define XXH_swap64 _byteswap_uint64
#elif GCC_VERSION >= 403
# define XXH_swap32 __builtin_bswap32
# define XXH_swap64 __builtin_bswap64
#else
static U32 XXH_swap32 (U32 x)
{
return ((x << 24) & 0xff000000 ) |
((x << 8) & 0x00ff0000 ) |
((x >> 8) & 0x0000ff00 ) |
((x >> 24) & 0x000000ff );
}
static U64 XXH_swap64 (U64 x)
{
return ((x << 56) & 0xff00000000000000ULL) |
((x << 40) & 0x00ff000000000000ULL) |
((x << 24) & 0x0000ff0000000000ULL) |
((x << 8) & 0x000000ff00000000ULL) |
((x >> 8) & 0x00000000ff000000ULL) |
((x >> 24) & 0x0000000000ff0000ULL) |
((x >> 40) & 0x000000000000ff00ULL) |
((x >> 56) & 0x00000000000000ffULL);
}
#endif
/* *************************************
* Architecture Macros
***************************************/
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
#ifndef XXH_CPU_LITTLE_ENDIAN
static const int g_one = 1;
# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one))
#endif
/* ***************************
* Memory reads
*****************************/
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
{
if (align==XXH_unaligned)
return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
else
return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
}
FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
{
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
}
static U32 XXH_readBE32(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
}
FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
{
if (align==XXH_unaligned)
return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
else
return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
}
FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
{
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
}
static U64 XXH_readBE64(const void* ptr)
{
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
}
/* *************************************
* Macros
***************************************/
#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* *************************************
* Constants
***************************************/
static const U32 PRIME32_1 = 2654435761U;
static const U32 PRIME32_2 = 2246822519U;
static const U32 PRIME32_3 = 3266489917U;
static const U32 PRIME32_4 = 668265263U;
static const U32 PRIME32_5 = 374761393U;
static const U64 PRIME64_1 = 11400714785074694791ULL;
static const U64 PRIME64_2 = 14029467366897019727ULL;
static const U64 PRIME64_3 = 1609587929392839161ULL;
static const U64 PRIME64_4 = 9650029242287828579ULL;
static const U64 PRIME64_5 = 2870177450012600261ULL;
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
/* **************************
* Utils
****************************/
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
{
ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
}
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
{
ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
}
/* ***************************
* Simple Hash Functions
*****************************/
static U32 XXH32_round(U32 seed, U32 input)
{
seed += input * PRIME32_2;
seed = XXH_rotl32(seed, 13);
seed *= PRIME32_1;
return seed;
}
FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)input;
const BYTE* bEnd = p + len;
U32 h32;
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (p==NULL) {
len=0;
bEnd=p=(const BYTE*)(size_t)16;
}
#endif
if (len>=16) {
const BYTE* const limit = bEnd - 16;
U32 v1 = seed + PRIME32_1 + PRIME32_2;
U32 v2 = seed + PRIME32_2;
U32 v3 = seed + 0;
U32 v4 = seed - PRIME32_1;
do {
v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
} while (p<=limit);
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
} else {
h32 = seed + PRIME32_5;
}
h32 += (U32) len;
while (p+4<=bEnd) {
h32 += XXH_get32bits(p) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
p+=4;
}
while (p<bEnd) {
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
}
XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
{
#if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH32_CREATESTATE_STATIC(state);
XXH32_reset(state, seed);
XXH32_update(state, input, len);
return XXH32_digest(state);
#else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
} }
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
#endif
}
static U64 XXH64_round(U64 acc, U64 input)
{
acc += input * PRIME64_2;
acc = XXH_rotl64(acc, 31);
acc *= PRIME64_1;
return acc;
}
static U64 XXH64_mergeRound(U64 acc, U64 val)
{
val = XXH64_round(0, val);
acc ^= val;
acc = acc * PRIME64_1 + PRIME64_4;
return acc;
}
FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
U64 h64;
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (p==NULL) {
len=0;
bEnd=p=(const BYTE*)(size_t)32;
}
#endif
if (len>=32) {
const BYTE* const limit = bEnd - 32;
U64 v1 = seed + PRIME64_1 + PRIME64_2;
U64 v2 = seed + PRIME64_2;
U64 v3 = seed + 0;
U64 v4 = seed - PRIME64_1;
do {
v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
} while (p<=limit);
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
h64 = XXH64_mergeRound(h64, v1);
h64 = XXH64_mergeRound(h64, v2);
h64 = XXH64_mergeRound(h64, v3);
h64 = XXH64_mergeRound(h64, v4);
} else {
h64 = seed + PRIME64_5;
}
h64 += (U64) len;
while (p+8<=bEnd) {
U64 const k1 = XXH64_round(0, XXH_get64bits(p));
h64 ^= k1;
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
p+=8;
}
if (p+4<=bEnd) {
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
p+=4;
}
while (p<bEnd) {
h64 ^= (*p) * PRIME64_5;
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
p++;
}
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
h64 ^= h64 >> 29;
h64 *= PRIME64_3;
h64 ^= h64 >> 32;
return h64;
}
XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
{
#if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH64_CREATESTATE_STATIC(state);
XXH64_reset(state, seed);
XXH64_update(state, input, len);
return XXH64_digest(state);
#else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
else
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
} }
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
else
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
#endif
}
/* **************************************************
* Advanced Hash Functions
****************************************************/
XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
{
return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
}
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
{
XXH_free(statePtr);
return XXH_OK;
}
XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
{
return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
}
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
{
XXH_free(statePtr);
return XXH_OK;
}
/*** Hash feed ***/
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
{
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
ZSTD_memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
state.v1 = seed + PRIME32_1 + PRIME32_2;
state.v2 = seed + PRIME32_2;
state.v3 = seed + 0;
state.v4 = seed - PRIME32_1;
ZSTD_memcpy(statePtr, &state, sizeof(state));
return XXH_OK;
}
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
{
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
ZSTD_memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
state.v1 = seed + PRIME64_1 + PRIME64_2;
state.v2 = seed + PRIME64_2;
state.v3 = seed + 0;
state.v4 = seed - PRIME64_1;
ZSTD_memcpy(statePtr, &state, sizeof(state));
return XXH_OK;
}
FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
{
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (input==NULL) return XXH_ERROR;
#endif
state->total_len_32 += (unsigned)len;
state->large_len |= (len>=16) | (state->total_len_32>=16);
if (state->memsize + len < 16) { /* fill in tmp buffer */
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
state->memsize += (unsigned)len;
return XXH_OK;
}
if (state->memsize) { /* some data left from previous update */
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
{ const U32* p32 = state->mem32;
state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
}
p += 16-state->memsize;
state->memsize = 0;
}
if (p <= bEnd-16) {
const BYTE* const limit = bEnd - 16;
U32 v1 = state->v1;
U32 v2 = state->v2;
U32 v3 = state->v3;
U32 v4 = state->v4;
do {
v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
} while (p<=limit);
state->v1 = v1;
state->v2 = v2;
state->v3 = v3;
state->v4 = v4;
}
if (p < bEnd) {
XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
state->memsize = (unsigned)(bEnd-p);
}
return XXH_OK;
}
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
else
return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
}
FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
{
const BYTE * p = (const BYTE*)state->mem32;
const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
U32 h32;
if (state->large_len) {
h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
} else {
h32 = state->v3 /* == seed */ + PRIME32_5;
}
h32 += state->total_len_32;
while (p+4<=bEnd) {
h32 += XXH_readLE32(p, endian) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4;
p+=4;
}
while (p<bEnd) {
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
}
XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_digest_endian(state_in, XXH_littleEndian);
else
return XXH32_digest_endian(state_in, XXH_bigEndian);
}
/* **** XXH64 **** */
FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
{
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (input==NULL) return XXH_ERROR;
#endif
state->total_len += len;
if (state->memsize + len < 32) { /* fill in tmp buffer */
if (input != NULL) {
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
}
state->memsize += (U32)len;
return XXH_OK;
}
if (state->memsize) { /* tmp buffer is full */
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
p += 32-state->memsize;
state->memsize = 0;
}
if (p+32 <= bEnd) {
const BYTE* const limit = bEnd - 32;
U64 v1 = state->v1;
U64 v2 = state->v2;
U64 v3 = state->v3;
U64 v4 = state->v4;
do {
v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
} while (p<=limit);
state->v1 = v1;
state->v2 = v2;
state->v3 = v3;
state->v4 = v4;
}
if (p < bEnd) {
XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
state->memsize = (unsigned)(bEnd-p);
}
return XXH_OK;
}
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
else
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
}
FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
{
const BYTE * p = (const BYTE*)state->mem64;
const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
U64 h64;
if (state->total_len >= 32) {
U64 const v1 = state->v1;
U64 const v2 = state->v2;
U64 const v3 = state->v3;
U64 const v4 = state->v4;
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
h64 = XXH64_mergeRound(h64, v1);
h64 = XXH64_mergeRound(h64, v2);
h64 = XXH64_mergeRound(h64, v3);
h64 = XXH64_mergeRound(h64, v4);
} else {
h64 = state->v3 + PRIME64_5;
}
h64 += (U64) state->total_len;
while (p+8<=bEnd) {
U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
h64 ^= k1;
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
p+=8;
}
if (p+4<=bEnd) {
h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
p+=4;
}
while (p<bEnd) {
h64 ^= (*p) * PRIME64_5;
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
p++;
}
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
h64 ^= h64 >> 29;
h64 *= PRIME64_3;
h64 ^= h64 >> 32;
return h64;
}
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_digest_endian(state_in, XXH_littleEndian);
else
return XXH64_digest_endian(state_in, XXH_bigEndian);
}
/* **************************
* Canonical representation
****************************/
/*! Default XXH result types are basic unsigned 32 and 64 bits.
* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
* These functions allow transformation of hash result into and from its canonical format.
* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
*/
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
{
XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
ZSTD_memcpy(dst, &hash, sizeof(*dst));
}
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
{
XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
ZSTD_memcpy(dst, &hash, sizeof(*dst));
}
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
{
return XXH_readBE32(src);
}
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
{
return XXH_readBE64(src);
}
ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg;
thread->arg = thread->start_routine(thread->arg);
void* (*start_routine)(void*);
void* thread_arg;
/* Inialized thread_arg and start_routine and signal main thread that we don't need it
* to wait any longer.
*/
{
ZSTD_thread_params_t* thread_param = (ZSTD_thread_params_t*)arg;
thread_arg = thread_param->arg;
start_routine = thread_param->start_routine;
/* Signal main thread that we are running and do not depend on its memory anymore */
ZSTD_pthread_mutex_lock(&thread_param->initialized_mutex);
thread_param->initialized = 1;
ZSTD_pthread_cond_signal(&thread_param->initialized_cond);
ZSTD_pthread_mutex_unlock(&thread_param->initialized_mutex);
}
start_routine(thread_arg);
if (!thread->handle)
thread_param.start_routine = start_routine;
thread_param.arg = arg;
thread_param.initialized = 0;
*thread = NULL;
/* Setup thread initialization synchronization */
if(ZSTD_pthread_cond_init(&thread_param.initialized_cond, NULL)) {
/* Should never happen on Windows */
return -1;
}
if(ZSTD_pthread_mutex_init(&thread_param.initialized_mutex, NULL)) {
/* Should never happen on Windows */
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
return -1;
}
/* Spawn thread */
*thread = (HANDLE)_beginthreadex(NULL, 0, worker, &thread_param, 0, NULL);
if (!thread) {
ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex);
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
else
return 0;
}
/* Wait for thread to be initialized */
ZSTD_pthread_mutex_lock(&thread_param.initialized_mutex);
while(!thread_param.initialized) {
ZSTD_pthread_cond_wait(&thread_param.initialized_cond, &thread_param.initialized_mutex);
}
ZSTD_pthread_mutex_unlock(&thread_param.initialized_mutex);
ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex);
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
return 0;
/*! POOL_joinJobs() :
* Waits for all queued jobs to finish executing.
*/
void POOL_joinJobs(POOL_ctx* ctx) {
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) {
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
}
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
}
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
/* MEM_FORCE_MEMORY_ACCESS :
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
* Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
* The below switch allow to select different access method for improved performance.
* Method 0 (default) : use `memcpy()`. Safe and portable.
* Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
/* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory:
* Method 0 : always use `memcpy()`. Safe and portable.
* Method 1 : Use compiler extension to set unaligned access.
* In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
* Prefer these methods in priority order (0 > 1 > 2)
* Default : method 1 if supported, else method 0
# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
# define MEM_FORCE_MEMORY_ACCESS 2
# elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
# ifdef __GNUC__
#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
return 1;
#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
return 0;
#elif defined(__clang__) && __LITTLE_ENDIAN__
return 1;
#elif defined(__clang__) && __BIG_ENDIAN__
return 0;
#elif defined(_MSC_VER) && (_M_AMD64 || _M_IX86)
return 1;
#elif defined(__DMC__) && defined(_M_IX86)
return 1;
#else
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
/* currently only defined for gcc and icc */
#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
__pragma( pack(push, 1) )
typedef struct { U16 v; } unalign16;
typedef struct { U32 v; } unalign32;
typedef struct { U64 v; } unalign64;
typedef struct { size_t v; } unalignArch;
__pragma( pack(pop) )
#else
typedef struct { U16 v; } __attribute__((packed)) unalign16;
typedef struct { U32 v; } __attribute__((packed)) unalign32;
typedef struct { U64 v; } __attribute__((packed)) unalign64;
typedef struct { size_t v; } __attribute__((packed)) unalignArch;
#endif
typedef __attribute__((aligned(1))) U16 unalign16;
typedef __attribute__((aligned(1))) U32 unalign32;
typedef __attribute__((aligned(1))) U64 unalign64;
typedef __attribute__((aligned(1))) size_t unalignArch;
MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; }
MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; }
MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; }
MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; }
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; }
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; }
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_uint64(in);
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|| (defined(__clang__) && __has_builtin(__builtin_bswap64))
return __builtin_bswap64(in);
#else
return ((in << 56) & 0xff00000000000000ULL) |
return ((in << 56) & 0xff00000000000000ULL) |
}
MEM_STATIC U64 MEM_swap64(U64 in)
{
#if defined(_MSC_VER) /* Visual Studio */
return _byteswap_uint64(in);
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|| (defined(__clang__) && __has_builtin(__builtin_bswap64))
return __builtin_bswap64(in);
#else
return MEM_swap64_fallback(in);
/* *** library symbols visibility *** */
/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
* HUF symbols remain "private" (internal symbols for library only).
* Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
# define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */
# define HUF_PUBLIC_API __declspec(dllexport)
#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
#else
# define HUF_PUBLIC_API
#endif
/* ========================== */
/* *** simple functions *** */
/* ========================== */
/** HUF_compress() :
* Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'.
* 'dst' buffer must be already allocated.
* Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize).
* `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB.
* @return : size of compressed data (<= `dstCapacity`).
* Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
* if HUF_isError(return), compression failed (more details using HUF_getErrorName())
*/
HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
const void* src, size_t srcSize);
/** HUF_decompress() :
* Decompress HUF data from buffer 'cSrc', of size 'cSrcSize',
* into already allocated buffer 'dst', of minimum size 'dstSize'.
* `originalSize` : **must** be the ***exact*** size of original (uncompressed) data.
* Note : in contrast with FSE, HUF_decompress can regenerate
* RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data,
* because it knows size to regenerate (originalSize).
* @return : size of regenerated data (== originalSize),
* or an error code, which can be tested using HUF_isError()
*/
HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize,
const void* cSrc, size_t cSrcSize);
#include "mem.h" /* U32 */
#define FSE_STATIC_LINKING_ONLY
#include "fse.h"
#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
/* *** Advanced function *** */
/** HUF_compress2() :
* Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`.
* `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX .
* `tableLog` must be `<= HUF_TABLELOG_MAX` . */
HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned tableLog);
/** HUF_compress4X_wksp() :
* Same as HUF_compress2(), but uses externally allocated `workSpace`.
* `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
#define HUF_WORKSPACE_SIZE ((6 << 10) + 256)
#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned tableLog,
void* workSpace, size_t wkspSize);
#endif /* HUF_H_298734234 */
/* ******************************************************************
* WARNING !!
* The following section contains advanced and experimental definitions
* which shall never be used in the context of a dynamic library,
* because they are not guaranteed to remain stable in the future.
* Only consider them in association with static linking.
* *****************************************************************/
#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY)
#define HUF_H_HUF_STATIC_LINKING_ONLY
unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
struct HUF_CElt_s {
U16 val;
BYTE nbBits;
}; /* typedef'd to HUF_CElt */
typedef struct HUF_CElt_s HUF_CElt; /* consider it an incomplete type */
#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */
#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
typedef size_t HUF_CElt; /* consider it an incomplete type */
#define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */
#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t))
size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
#ifndef HUF_FORCE_DECOMPRESS_X1
size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
#endif
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
#ifndef HUF_FORCE_DECOMPRESS_X1
size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
#endif
/**
* Huffman flags bitset.
* For all flags, 0 is the default value.
*/
typedef enum {
/**
* If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime.
* Otherwise: Ignored.
*/
HUF_flags_bmi2 = (1 << 0),
/**
* If set: Test possible table depths to find the one that produces the smallest header + encoded size.
* If unset: Use heuristic to find the table depth.
*/
HUF_flags_optimalDepth = (1 << 1),
/**
* If set: If the previous table can encode the input, always reuse the previous table.
* If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output.
*/
HUF_flags_preferRepeat = (1 << 2),
/**
* If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress.
* If unset: Always histogram the entire input.
*/
HUF_flags_suspectUncompressible = (1 << 3),
/**
* If set: Don't use assembly implementations
* If unset: Allow using assembly implementations
*/
HUF_flags_disableAsm = (1 << 4),
/**
* If set: Don't use the fast decoding loop, always use the fallback decoding loop.
* If unset: Use the fast decoding loop when possible.
*/
HUF_flags_disableFast = (1 << 5)
} HUF_flags_e;
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
unsigned HUF_minTableLog(unsigned symbolCardinality);
unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue);
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace,
size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */
size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
* Note 1 : is not inlined, as HUF_CElt definition is private
* Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
* Note 1 : is not inlined, as HUF_CElt definition is private */
U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
#endif
#ifndef HUF_FORCE_DECOMPRESS_X1
size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
#endif
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
#endif
#ifndef HUF_FORCE_DECOMPRESS_X1
size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
#endif
size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
#ifndef HUF_FORCE_DECOMPRESS_X1
size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
#endif
size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
#endif
#ifndef HUF_FORCE_DECOMPRESS_X1
size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
#endif
HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
#ifndef HUF_FORCE_DECOMPRESS_X2
size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
#endif
size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /**< double-symbols decoder */
size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
#endif
#ifndef HUF_FORCE_DECOMPRESS_X1
size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
DTableH->tableLog = 0;
DTableH->fastMode = 0;
cell->newState = 0;
cell->symbol = symbolValue;
cell->nbBits = 0;
return 0;
}
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSV1 = tableMask+1;
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
}
size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt)
{
const void* ptr = dt;
const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
const U32 fastMode = DTableH->fastMode;
/* select fast mode (static) */
if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
size_t const NCountLength = FSE_readNCount_bmi2(counting, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
if (FSE_isError(NCountLength)) return NCountLength;
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
assert(NCountLength <= cSrcSize);
ip += NCountLength;
cSrcSize -= NCountLength;
{
size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
if (FSE_isError(NCountLength)) return NCountLength;
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
assert(NCountLength <= cSrcSize);
ip += NCountLength;
cSrcSize -= NCountLength;
}
workSpace = dtable + FSE_DTABLE_SIZE_U32(tableLog);
wkspSize -= FSE_DTABLE_SIZE(tableLog);
assert(sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog) <= wkspSize);
workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1);
return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0);
if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
#ifndef ZSTD_NO_UNUSED_FUNCTIONS
size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) {
U32 wksp[FSE_BUILD_DTABLE_WKSP_SIZE_U32(FSE_TABLELOG_ABSOLUTE_MAX, FSE_MAX_SYMBOL_VALUE)];
return FSE_buildDTable_wksp(dt, normalizedCounter, maxSymbolValue, tableLog, wksp, sizeof(wksp));
}
size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
{
/* Static analyzer seems unable to understand this table will be properly initialized later */
U32 wksp[FSE_DECOMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, FSE_MAX_TABLELOG, wksp, sizeof(wksp));
}
#endif
/*-****************************************
* FSE simple functions
******************************************/
/*! FSE_compress() :
Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'.
'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize).
@return : size of compressed data (<= dstCapacity).
Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!!
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead.
if FSE_isError(return), compression failed (more details using FSE_getErrorName())
*/
FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity,
const void* src, size_t srcSize);
/*! FSE_decompress():
Decompress FSE data from buffer 'cSrc', of size 'cSrcSize',
into already allocated destination buffer 'dst', of size 'dstCapacity'.
@return : size of regenerated data (<= maxDstSize),
or an error code, which can be tested using FSE_isError() .
** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!!
Why ? : making this distinction requires a header.
Header management is intentionally delegated to the user layer, which can better manage special cases.
*/
FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity,
const void* cSrc, size_t cSrcSize);
* FSE advanced functions
******************************************/
/*! FSE_compress2() :
Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog'
Both parameters can be defined as '0' to mean : use default value
@return : size of compressed data
Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!!
if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression.
if FSE_isError(return), it's an error code.
*/
FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
/*-*****************************************
FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog);
FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt);
/*! FSE_buildDTable():
Builds 'dt', which must be already allocated, using FSE_createDTable().
return : 0, or an errorCode, which can be tested using FSE_isError() */
FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
/*! FSE_decompress_usingDTable():
Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
into `dst` which must be already allocated.
@return : size of regenerated data (necessarily <= `dstCapacity`),
or an errorCode, which can be tested using FSE_isError() */
FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
/* FSE_compress_wksp() :
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
* FSE_COMPRESS_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
*/
#define FSE_COMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2)))
#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */)
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue))
#define FSE_DECOMPRESS_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) (FSE_DTABLE_SIZE_U32(maxTableLog) + 1 + FSE_BUILD_DTABLE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) + (FSE_MAX_SYMBOL_VALUE + 1) / 2 + 1)
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize);
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)` */
/**< Same as FSE_decompress_wksp() but with dynamic BMI2 support. Pass 1 if your CPU supports BMI2 or 0 if it doesn't. */
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DECOMPRESS_WKSP_SIZE_U32(maxLog, maxSymbolValue)`.
* Set bmi2 to 1 if your CPU supports BMI2 or 0 if it doesn't */
}
/**
* Ignore: this is an internal helper.
*
* This is a helper function to help force C99-correctness during compilation.
* Under strict compilation modes, variadic macro arguments can't be empty.
* However, variadic function arguments can be. Using a function therefore lets
* us statically check that at least one (string) argument was passed,
* independent of the compilation flags.
*/
static INLINE_KEYWORD UNUSED_ATTR
void _force_has_format_string(const char *format, ...) {
(void)format;
/**
* Ignore: this is an internal helper.
*
* We want to force this function invocation to be syntactically correct, but
* we don't want to force runtime evaluation of its arguments.
*/
#define _FORCE_HAS_FORMAT_STRING(...) \
if (0) { \
_force_has_format_string(__VA_ARGS__); \
}
#define ERR_QUOTE(str) #str
/**
* Return the specified error if the condition evaluates to true.
*
* In debug modes, prints additional information.
* In order to do that (particularly, printing the conditional that failed),
* this can't just wrap RETURN_ERROR().
*/
#define RETURN_ERROR_IF(cond, err, ...) \
if (cond) { \
RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \
__FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return ERROR(err); \
}
/**
* Unconditionally return the specified error.
*
* In debug modes, prints additional information.
*/
#define RETURN_ERROR(err, ...) \
do { \
RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \
__FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return ERROR(err); \
} while(0);
/**
* If the provided expression evaluates to an error code, returns that error code.
*
* In debug modes, prints additional information.
*/
#define FORWARD_IF_ERROR(err, ...) \
do { \
size_t const err_code = (err); \
if (ERR_isError(err_code)) { \
RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \
__FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \
_FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \
RAWLOG(3, ": " __VA_ARGS__); \
RAWLOG(3, "\n"); \
return err_code; \
} \
} while(0);
static U32 FSE_ctz(U32 val)
{
assert(val != 0);
{
# if defined(_MSC_VER) /* Visual */
unsigned long r=0;
return _BitScanForward(&r, val) ? (unsigned)r : 0;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
return __builtin_ctz(val);
# elif defined(__ICCARM__) /* IAR Intrinsic */
return __CTZ(val);
# else /* Software version */
U32 count = 0;
while ((val & 1) == 0) {
val >>= 1;
++count;
}
return count;
# endif
}
}
return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* bmi2 */ 0);
return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* flags */ 0);
/* Enable runtime BMI2 dispatch based on the CPU.
* Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
/* Target attribute for BMI2 dynamic dispatch.
* Enable lzcnt, bmi, and bmi2.
* We test for bmi1 & bmi2. lzcnt is included in bmi1.
#ifndef DYNAMIC_BMI2
#if ((defined(__clang__) && __has_attribute(__target__)) \
|| (defined(__GNUC__) \
&& (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
&& (defined(__x86_64__) || defined(_M_X86)) \
&& !defined(__BMI2__)
# define DYNAMIC_BMI2 1
#else
# define DYNAMIC_BMI2 0
#endif
#endif
#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
* older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */
#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__)
* older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
* and some compilers, like Intel ICC and MCST LCC, do not support it at all. */
#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__)
/* compat. with non-clang compilers */
#ifndef __has_builtin
# define __has_builtin(x) 0
/* compile time determination of SIMD support */
#if !defined(ZSTD_NO_INTRINSICS)
# if defined(__SSE2__) || defined(_M_AMD64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2))
# define ZSTD_ARCH_X86_SSE2
# endif
# if defined(__ARM_NEON) || defined(_M_ARM64)
# define ZSTD_ARCH_ARM_NEON
# endif
#
# if defined(ZSTD_ARCH_X86_SSE2)
# include <emmintrin.h>
# elif defined(ZSTD_ARCH_ARM_NEON)
# include <arm_neon.h>
# endif
/* compat. with non-clang compilers */
#ifndef __has_feature
# define __has_feature(x) 0
/* C-language Attributes are added in C23. */
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
#else
# define ZSTD_HAS_C_ATTRIBUTE(x) 0
/* detects whether we are being compiled under msan */
#ifndef ZSTD_MEMORY_SANITIZER
# if __has_feature(memory_sanitizer)
# define ZSTD_MEMORY_SANITIZER 1
# else
# define ZSTD_MEMORY_SANITIZER 0
# endif
/* Only use C++ attributes in C++. Some compilers report support for C++
* attributes when compiling with C.
*/
#if defined(__cplusplus) && defined(__has_cpp_attribute)
# define ZSTD_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
#else
# define ZSTD_HAS_CPP_ATTRIBUTE(x) 0
#endif
/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute.
* - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough
* - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough
* - Else: __attribute__((__fallthrough__))
*/
#ifndef ZSTD_FALLTHROUGH
# if ZSTD_HAS_C_ATTRIBUTE(fallthrough)
# define ZSTD_FALLTHROUGH [[fallthrough]]
# elif ZSTD_HAS_CPP_ATTRIBUTE(fallthrough)
# define ZSTD_FALLTHROUGH [[fallthrough]]
# elif __has_attribute(__fallthrough__)
/* Leading semicolon is to satisfy gcc-11 with -pedantic. Without the semicolon
* gcc complains about: a label can only be part of a statement and a declaration is not a statement.
*/
# define ZSTD_FALLTHROUGH ; __attribute__((__fallthrough__))
# else
# define ZSTD_FALLTHROUGH
# endif
/*-**************************************************************
* Alignment check
*****************************************************************/
/* this test was initially positioned in mem.h,
* but this file is removed (or replaced) for linux kernel
* so it's now hosted in compiler.h,
* which remains valid for both user & kernel spaces.
*/
#ifndef ZSTD_ALIGNOF
# if defined(__GNUC__) || defined(_MSC_VER)
/* covers gcc, clang & MSVC */
/* note : this section must come first, before C11,
* due to a limitation in the kernel source generator */
# define ZSTD_ALIGNOF(T) __alignof(T)
#if ZSTD_MEMORY_SANITIZER
# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
/* C11 support */
# include <stdalign.h>
# define ZSTD_ALIGNOF(T) alignof(T)
# else
/* No known support for alignof() - imperfect backup */
# define ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T))
# endif
#endif /* ZSTD_ALIGNOF */
/*-**************************************************************
* Sanitizer
*****************************************************************/
/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an
* abundance of caution, disable our custom poisoning on mingw. */
#ifdef __MINGW32__
#ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE
#define ZSTD_ASAN_DONT_POISON_WORKSPACE 1
#endif
#ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE
#define ZSTD_MSAN_DONT_POISON_WORKSPACE 1
#endif
#endif
#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE)
/* detects whether we are being compiled under asan */
#ifndef ZSTD_ADDRESS_SANITIZER
# if __has_feature(address_sanitizer)
# define ZSTD_ADDRESS_SANITIZER 1
# elif defined(__SANITIZE_ADDRESS__)
# define ZSTD_ADDRESS_SANITIZER 1
# else
# define ZSTD_ADDRESS_SANITIZER 0
# endif
#endif
#if ZSTD_ADDRESS_SANITIZER
#if ZSTD_ADDRESS_SANITIZER && !defined(ZSTD_ASAN_DONT_POISON_WORKSPACE)
/*-**************************************************************
* Internal functions
****************************************************************/
MEM_STATIC unsigned BIT_highbit32 (U32 val)
{
assert(val != 0);
{
# if defined(_MSC_VER) /* Visual */
# if STATIC_BMI2 == 1
return _lzcnt_u32(val) ^ 31;
# else
unsigned long r = 0;
return _BitScanReverse(&r, val) ? (unsigned)r : 0;
# endif
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return __builtin_clz (val) ^ 31;
# elif defined(__ICCARM__) /* IAR Intrinsic */
return 31 - __CLZ(val);
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29,
11, 14, 16, 18, 22, 25, 3, 30,
8, 12, 20, 28, 15, 17, 24, 7,
19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
# endif
}
}
}
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
{
#if defined(STATIC_BMI2) && STATIC_BMI2 == 1 && !defined(ZSTD_NO_INTRINSICS)
return _bzhi_u64(bitContainer, nbBits);
#else
assert(nbBits < BIT_MASK_SIZE);
return bitContainer & BIT_mask[nbBits];
#endif
return (bitContainer >> (start & regMask)) & BIT_mask[nbBits];
}
MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
{
#if defined(STATIC_BMI2) && STATIC_BMI2 == 1
return _bzhi_u64(bitContainer, nbBits);
/* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better
* than accessing memory. When bmi2 instruction is not present, we consider
* such cpus old (pre-Haswell, 2013) and their performance is not of that
* importance.
*/
#if defined(__x86_64__) || defined(_M_X86)
return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1);
/* ************************************************************
* Detect POSIX version
* PLATFORM_POSIX_VERSION = 0 for non-Unix e.g. Windows
* PLATFORM_POSIX_VERSION = 1 for Unix-like but non-POSIX
* PLATFORM_POSIX_VERSION > 1 is equal to found _POSIX_VERSION
* Value of PLATFORM_POSIX_VERSION can be forced on command line
***************************************************************/
#ifndef PLATFORM_POSIX_VERSION
# if (defined(__APPLE__) && defined(__MACH__)) || defined(__SVR4) || defined(_AIX) || defined(__hpux) /* POSIX.1-2001 (SUSv3) conformant */ \
|| defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) /* BSD distros */
/* exception rule : force posix version to 200112L,
* note: it's better to use unistd.h's _POSIX_VERSION whenever possible */
# define PLATFORM_POSIX_VERSION 200112L
/* try to determine posix version through official unistd.h's _POSIX_VERSION (https://pubs.opengroup.org/onlinepubs/7908799/xsh/unistd.h.html).
* note : there is no simple way to know in advance if <unistd.h> is present or not on target system,
* Posix specification mandates its presence and its content, but target system must respect this spec.
* It's necessary to _not_ #include <unistd.h> whenever target OS is not unix-like
* otherwise it will block preprocessing stage.
* The following list of build macros tries to "guess" if target OS is likely unix-like, and therefore can #include <unistd.h>
*/
# elif !defined(_WIN32) \
&& ( defined(__unix__) || defined(__unix) \
|| defined(__midipix__) || defined(__VMS) || defined(__HAIKU__) )
# if defined(__linux__) || defined(__linux) || defined(__CYGWIN__)
# ifndef _POSIX_C_SOURCE
# define _POSIX_C_SOURCE 200809L /* feature test macro : https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html */
# endif
# endif
# include <unistd.h> /* declares _POSIX_VERSION */
# if defined(_POSIX_VERSION) /* POSIX compliant */
# define PLATFORM_POSIX_VERSION _POSIX_VERSION
# else
# define PLATFORM_POSIX_VERSION 1
# endif
# ifdef __UCLIBC__
# ifndef __USE_MISC
# define __USE_MISC /* enable st_mtim on uclibc */
# endif
# endif
# else /* non-unix target platform (like Windows) */
# define PLATFORM_POSIX_VERSION 0
# endif
#endif /* PLATFORM_POSIX_VERSION */
return 0;
}
ZSTD_seekTable* ZSTD_seekTable_create_fromSeekable(const ZSTD_seekable* zs)
{
ZSTD_seekTable* const st = (ZSTD_seekTable*)malloc(sizeof(ZSTD_seekTable));
if (st==NULL) return NULL;
st->checksumFlag = zs->seekTable.checksumFlag;
st->tableLen = zs->seekTable.tableLen;
/* Allocate an extra entry at the end to match logic of initial allocation */
size_t const entriesSize = sizeof(seekEntry_t) * (zs->seekTable.tableLen + 1);
seekEntry_t* const entries = (seekEntry_t*)malloc(entriesSize);
if (entries==NULL) {
free(st);
return NULL;
}
unsigned ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long pos)
unsigned ZSTD_seekable_offsetToFrameIndex(const ZSTD_seekable* zs, unsigned long long pos)
{
return ZSTD_seekTable_offsetToFrameIndex(&zs->seekTable, pos);
}
unsigned ZSTD_seekTable_offsetToFrameIndex(const ZSTD_seekTable* st, unsigned long long pos)
if (frameIndex >= zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge);
return zs->seekTable.entries[frameIndex + 1].cOffset -
zs->seekTable.entries[frameIndex].cOffset;
if (frameIndex >= st->tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
return st->entries[frameIndex].cOffset;
if (frameIndex > zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge);
return zs->seekTable.entries[frameIndex + 1].dOffset -
zs->seekTable.entries[frameIndex].dOffset;
return ZSTD_seekTable_getFrameDecompressedOffset(&zs->seekTable, frameIndex);
}
unsigned long long ZSTD_seekTable_getFrameDecompressedOffset(const ZSTD_seekTable* st, unsigned frameIndex)
{
if (frameIndex >= st->tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
return st->entries[frameIndex].dOffset;
}
size_t ZSTD_seekable_getFrameCompressedSize(const ZSTD_seekable* zs, unsigned frameIndex)
{
return ZSTD_seekTable_getFrameCompressedSize(&zs->seekTable, frameIndex);
}
size_t ZSTD_seekTable_getFrameCompressedSize(const ZSTD_seekTable* st, unsigned frameIndex)
{
if (frameIndex >= st->tableLen) return ERROR(frameIndex_tooLarge);
return st->entries[frameIndex + 1].cOffset -
st->entries[frameIndex].cOffset;
}
size_t ZSTD_seekable_getFrameDecompressedSize(const ZSTD_seekable* zs, unsigned frameIndex)
{
return ZSTD_seekTable_getFrameDecompressedSize(&zs->seekTable, frameIndex);
}
size_t ZSTD_seekTable_getFrameDecompressedSize(const ZSTD_seekTable* st, unsigned frameIndex)
{
if (frameIndex > st->tableLen) return ERROR(frameIndex_tooLarge);
return st->entries[frameIndex + 1].dOffset -
st->entries[frameIndex].dOffset;
zs->decompressedOffset += outTmp.pos - prevOutPos;
forwardProgress = outTmp.pos - prevOutPos;
if (forwardProgress == 0) {
if (noOutputProgressCount++ > ZSTD_SEEKABLE_NO_OUTPUT_PROGRESS_MAX) {
return ERROR(seekableIO);
}
} else {
noOutputProgressCount = 0;
}
zs->decompressedOffset += forwardProgress;
srcBytesRead += zs->in.pos - prevInPos;
/*===== Seek Table access functions =====*/
ZSTDLIB_API unsigned ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs);
ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex);
ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex);
ZSTDLIB_API size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, unsigned frameIndex);
ZSTDLIB_API size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, unsigned frameIndex);
ZSTDLIB_API unsigned ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long offset);
/*===== Seekable seek table access functions =====*/
ZSTDLIB_API unsigned ZSTD_seekable_getNumFrames(const ZSTD_seekable* zs);
ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameCompressedOffset(const ZSTD_seekable* zs, unsigned frameIndex);
ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameDecompressedOffset(const ZSTD_seekable* zs, unsigned frameIndex);
ZSTDLIB_API size_t ZSTD_seekable_getFrameCompressedSize(const ZSTD_seekable* zs, unsigned frameIndex);
ZSTDLIB_API size_t ZSTD_seekable_getFrameDecompressedSize(const ZSTD_seekable* zs, unsigned frameIndex);
ZSTDLIB_API unsigned ZSTD_seekable_offsetToFrameIndex(const ZSTD_seekable* zs, unsigned long long offset);
/*-****************************************************************************
* Direct exploitation of the seekTable
*
* Memory constrained use cases that manage multiple archives
* benefit from retaining multiple archive seek tables
* without retaining a ZSTD_seekable instance for each.
*
* Below API allow the above-mentioned use cases
* to initialize a ZSTD_seekable, extract its (smaller) ZSTD_seekTable,
* then throw the ZSTD_seekable away to save memory.
*
* Standard ZSTD operations can then be used
* to decompress frames based on seek table offsets.
******************************************************************************/
/*===== Independent seek table management =====*/
ZSTDLIB_API ZSTD_seekTable* ZSTD_seekTable_create_fromSeekable(const ZSTD_seekable* zs);
ZSTDLIB_API size_t ZSTD_seekTable_free(ZSTD_seekTable* st);
/*===== Direct seek table access functions =====*/
ZSTDLIB_API unsigned ZSTD_seekTable_getNumFrames(const ZSTD_seekTable* st);
ZSTDLIB_API unsigned long long ZSTD_seekTable_getFrameCompressedOffset(const ZSTD_seekTable* st, unsigned frameIndex);
ZSTDLIB_API unsigned long long ZSTD_seekTable_getFrameDecompressedOffset(const ZSTD_seekTable* st, unsigned frameIndex);
ZSTDLIB_API size_t ZSTD_seekTable_getFrameCompressedSize(const ZSTD_seekTable* st, unsigned frameIndex);
ZSTDLIB_API size_t ZSTD_seekTable_getFrameDecompressedSize(const ZSTD_seekTable* st, unsigned frameIndex);
ZSTDLIB_API unsigned ZSTD_seekTable_offsetToFrameIndex(const ZSTD_seekTable* st, unsigned long long offset);
use std::ffi::{CStr, CString};
#[cfg(not(target_family = "wasm"))]
use std::ffi::{CString};
use std::ffi::{c_void, CStr};
#[cfg(target_family = "wasm")]
type c_longlong = i64;
pub fn version() -> &'static CStr {
unsafe {
CStr::from_ptr(ZSTD_versionString())
}
}
with import <nixpkgs> {};
with import <nixpkgs> {
overlays = map (uri: import (fetchTarball uri)) [
https://github.com/mozilla/nixpkgs-mozilla/archive/master.tar.gz
];
};
let rust = ((rustChannelOf { channel = "stable"; })
.rust.override {
targets = [
"wasm32-unknown-unknown"
# "wasm32-unknown-emscripten"
# "wasm32-wasi"
];
});
in
if true || pkg_config::Config::new()
.statik(true)
.probe("libzstd")
.is_err()
{
let mut cc = cc::Build::new();
if !cfg!(target_arch = "x86_64") || !cfg!(target_family="unix") {
cc.define("ZSTD_DISABLE_ASM", "1");
}
for &dir in &[
"zstd/lib/common",
"zstd/lib/compress",
"zstd/lib/decompress",
] {
cc.include(dir);
for entry in std::fs::read_dir(dir).unwrap() {
let entry = entry.unwrap();
let path = entry.path();
let name = path.file_name();
if let Some(f) = name {
let ext = std::path::Path::new(f)
.extension()
.map(|v| v.to_str().unwrap());
if ext == Some("c") {
cc.file(path);
} else if ext == Some("S") && cfg!(target_arch = "x86_64") && cfg!(target_family="unix") {
cc.file(path);
}
}
}
}
cc.opt_level(3).compile("zstd");
}
let out_dir = std::env::var("OUT_DIR").unwrap();
let out_dirp = std::path::Path::new(&out_dir);
std::fs::create_dir_all(&out_dir).unwrap();
if cfg!(target_family = "windows") && cfg!(target_arch = "x86") {
std::fs::copy("libzstd-win32.lib", &out_dirp.join("zstd.lib")).unwrap();
// Link with static library in this directory.
println!("cargo:rustc-link-search={}", out_dir);
println!("cargo:rustc-link-lib=zstd");
} else if cfg!(target_family = "windows") && cfg!(target_arch = "x86") {
std::fs::copy("libzstd-win64.lib", &out_dirp.join("zstd.lib")).unwrap();
// Link with static library in this directory.
println!("cargo:rustc-link-search={}", out_dir);
println!("cargo:rustc-link-lib=zstd");
} else {
if pkg_config::Config::new()
.range_version("1.4.0".."1.5.0")
.statik(true)
.probe("libzstd")
.is_err()
{
// Make
let options = fs_extra::dir::CopyOptions::new()
.skip_exist(true);
let out = out_dirp.join("zstd");
std::fs::create_dir_all(&out).unwrap();
fs_extra::dir::copy("zstd/lib", &out, &options).unwrap();
let dir = out.join("lib");
let mut make = std::process::Command::new("make")
.current_dir(&dir)
.spawn()
.unwrap();
let status = make.wait().unwrap();
assert!(status.success());
println!("cargo:rustc-link-search={}", dir.display());
println!("cargo:rustc-link-lib=zstd");
} else {
// Already found by pkg-config
}
}
if pkg_config::probe_library("libxxhash").is_err() {
println!("cargo:rustc-link-lib=xxhash");
}
"zstd/lib/legacy",
"zstd/lib/legacy/zstd_v06.c",
"zstd/lib/legacy/zstd_v04.h",
"zstd/lib/legacy/zstd_v04.c",
"zstd/lib/legacy/zstd_v01.h",
"zstd/lib/legacy/zstd_v06.h",
"zstd/lib/legacy/zstd_v07.c",
"zstd/lib/legacy/zstd_v07.h",
"zstd/lib/legacy/zstd_v01.c",
"zstd/lib/legacy/zstd_v02.c",
"zstd/lib/legacy/zstd_v05.h",
"zstd/lib/legacy/zstd_v02.h",
"zstd/lib/legacy/zstd_v03.h",
"zstd/lib/legacy/zstd_legacy.h",
"zstd/lib/legacy/zstd_v03.c",
"zstd/lib/legacy/zstd_v05.c",
"zstd/lib/common",