Merge pull request #2896 from facebook/m68k
Zstandard compiles and run on m68k cpus
This commit is contained in:
commit
1bf3d8a475
1
.github/workflows/dev-short-tests.yml
vendored
1
.github/workflows/dev-short-tests.yml
vendored
@ -294,7 +294,6 @@ jobs:
|
||||
LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check
|
||||
- name: M68K
|
||||
if: ${{ matrix.name == 'M68K' }}
|
||||
continue-on-error: true # disable reporting errors (alignment issues)
|
||||
run: |
|
||||
LDFLAGS="-static" CC=$XCC QEMU_SYS=$XEMU make clean check
|
||||
|
||||
|
@ -285,6 +285,39 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/*-**************************************************************
|
||||
* Alignment check
|
||||
*****************************************************************/
|
||||
|
||||
/* this test was initially positioned in mem.h,
|
||||
* but this file is removed (or replaced) for linux kernel
|
||||
* so it's now hosted in compiler.h,
|
||||
* which remains valid for both user & kernel spaces.
|
||||
*/
|
||||
|
||||
#ifndef ZSTD_ALIGNOF
|
||||
# if defined(__GNUC__) || defined(_MSC_VER)
|
||||
/* covers gcc, clang & MSVC */
|
||||
/* note : this section must come first, before C11,
|
||||
* due to a limitation in the kernel source generator */
|
||||
# define ZSTD_ALIGNOF(T) __alignof(T)
|
||||
|
||||
# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
|
||||
/* C11 support */
|
||||
# include <stdalign.h>
|
||||
# define ZSTD_ALIGNOF(T) alignof(T)
|
||||
|
||||
# else
|
||||
/* No known support for alignof() - imperfect backup */
|
||||
# define ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T))
|
||||
|
||||
# endif
|
||||
#endif /* ZSTD_ALIGNOF */
|
||||
|
||||
/*-**************************************************************
|
||||
* Sanitizer
|
||||
*****************************************************************/
|
||||
|
||||
#if ZSTD_MEMORY_SANITIZER
|
||||
/* Not all platforms that support msan provide sanitizers/msan_interface.h.
|
||||
* We therefore declare the functions we need ourselves, rather than trying to
|
||||
|
@ -97,7 +97,7 @@ static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightT
|
||||
|
||||
unsigned maxSymbolValue = HUF_TABLELOG_MAX;
|
||||
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
|
||||
HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, sizeof(U32));
|
||||
HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
|
||||
|
||||
if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
|
||||
|
||||
@ -176,7 +176,7 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
|
||||
HUF_CElt const* const ct = CTable + 1;
|
||||
BYTE* op = (BYTE*)dst;
|
||||
U32 n;
|
||||
HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, sizeof(U32));
|
||||
HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
|
||||
|
||||
/* check conditions */
|
||||
if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
|
||||
@ -679,7 +679,7 @@ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, i
|
||||
|
||||
size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
|
||||
{
|
||||
HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, sizeof(U32));
|
||||
HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32));
|
||||
nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
|
||||
nodeElt* const huffNode = huffNode0+1;
|
||||
int nonNullRank;
|
||||
@ -1183,7 +1183,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
|
||||
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
|
||||
const int bmi2, unsigned suspectUncompressible)
|
||||
{
|
||||
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, sizeof(size_t));
|
||||
HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
|
||||
BYTE* const ostart = (BYTE*)dst;
|
||||
BYTE* const oend = ostart + dstSize;
|
||||
BYTE* op = ostart;
|
||||
|
@ -422,8 +422,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
||||
DEBUGLOG(5,
|
||||
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
|
||||
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
||||
assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
|
||||
assert((bytes & (sizeof(void*)-1)) == 0);
|
||||
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
|
||||
assert(bytes % ZSTD_ALIGNOF(void*) == 0);
|
||||
ZSTD_cwksp_assert_internal_consistency(ws);
|
||||
/* we must be in the first phase, no advance is possible */
|
||||
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user