blob: 5da248756ffd4585cf54acd80e099f53070f0151 [file] [log] [blame]
khenaidooac637102019-01-14 15:44:34 -05001/*
2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11#ifndef MEM_H_MODULE
12#define MEM_H_MODULE
13
14#if defined (__cplusplus)
15extern "C" {
16#endif
17
18/*-****************************************
19* Dependencies
20******************************************/
21#include <stddef.h> /* size_t, ptrdiff_t */
22#include <string.h> /* memcpy */
23
24
25/*-****************************************
26* Compiler specifics
27******************************************/
28#if defined(_MSC_VER) /* Visual Studio */
29# include <stdlib.h> /* _byteswap_ulong */
30# include <intrin.h> /* _byteswap_* */
31#endif
32#if defined(__GNUC__)
33# define MEM_STATIC static __inline __attribute__((unused))
34#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
35# define MEM_STATIC static inline
36#elif defined(_MSC_VER)
37# define MEM_STATIC static __inline
38#else
39# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
40#endif
41
Scott Baker8461e152019-10-01 14:44:30 -070042#ifndef __has_builtin
43# define __has_builtin(x) 0 /* compat. with non-clang compilers */
44#endif
45
khenaidooac637102019-01-14 15:44:34 -050046/* code only tested on 32 and 64 bits systems */
47#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
48MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
49
50
51/*-**************************************************************
52* Basic Types
53*****************************************************************/
54#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
55# include <stdint.h>
56 typedef uint8_t BYTE;
57 typedef uint16_t U16;
58 typedef int16_t S16;
59 typedef uint32_t U32;
60 typedef int32_t S32;
61 typedef uint64_t U64;
62 typedef int64_t S64;
63#else
Scott Baker8461e152019-10-01 14:44:30 -070064# include <limits.h>
65#if CHAR_BIT != 8
66# error "this implementation requires char to be exactly 8-bit type"
67#endif
khenaidooac637102019-01-14 15:44:34 -050068 typedef unsigned char BYTE;
Scott Baker8461e152019-10-01 14:44:30 -070069#if USHRT_MAX != 65535
70# error "this implementation requires short to be exactly 16-bit type"
71#endif
khenaidooac637102019-01-14 15:44:34 -050072 typedef unsigned short U16;
73 typedef signed short S16;
Scott Baker8461e152019-10-01 14:44:30 -070074#if UINT_MAX != 4294967295
75# error "this implementation requires int to be exactly 32-bit type"
76#endif
khenaidooac637102019-01-14 15:44:34 -050077 typedef unsigned int U32;
78 typedef signed int S32;
Scott Baker8461e152019-10-01 14:44:30 -070079/* note : there are no limits defined for long long type in C90.
80 * limits exist in C99, however, in such case, <stdint.h> is preferred */
khenaidooac637102019-01-14 15:44:34 -050081 typedef unsigned long long U64;
82 typedef signed long long S64;
83#endif
84
85
86/*-**************************************************************
87* Memory I/O
88*****************************************************************/
89/* MEM_FORCE_MEMORY_ACCESS :
90 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
91 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
92 * The below switch allow to select different access method for improved performance.
93 * Method 0 (default) : use `memcpy()`. Safe and portable.
94 * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
95 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
96 * Method 2 : direct access. This method is portable but violate C standard.
97 * It can generate buggy code on targets depending on alignment.
98 * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6)
99 * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
100 * Prefer these methods in priority order (0 > 1 > 2)
101 */
102#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
103# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
104# define MEM_FORCE_MEMORY_ACCESS 2
105# elif defined(__INTEL_COMPILER) || defined(__GNUC__)
106# define MEM_FORCE_MEMORY_ACCESS 1
107# endif
108#endif
109
110MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
111MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
112
113MEM_STATIC unsigned MEM_isLittleEndian(void)
114{
115 const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
116 return one.c[0];
117}
118
119#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
120
121/* violates C standard, by lying on structure alignment.
122Only use if no other choice to achieve best performance on target platform */
123MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
124MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
125MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
126MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; }
127
128MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
129MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
130MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
131
132#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
133
134/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
135/* currently only defined for gcc and icc */
136#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32))
137 __pragma( pack(push, 1) )
138 typedef struct { U16 v; } unalign16;
139 typedef struct { U32 v; } unalign32;
140 typedef struct { U64 v; } unalign64;
141 typedef struct { size_t v; } unalignArch;
142 __pragma( pack(pop) )
143#else
144 typedef struct { U16 v; } __attribute__((packed)) unalign16;
145 typedef struct { U32 v; } __attribute__((packed)) unalign32;
146 typedef struct { U64 v; } __attribute__((packed)) unalign64;
147 typedef struct { size_t v; } __attribute__((packed)) unalignArch;
148#endif
149
150MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; }
151MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; }
152MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; }
153MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; }
154
155MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; }
156MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; }
157MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; }
158
159#else
160
161/* default method, safe and standard.
162 can sometimes prove slower */
163
164MEM_STATIC U16 MEM_read16(const void* memPtr)
165{
166 U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
167}
168
169MEM_STATIC U32 MEM_read32(const void* memPtr)
170{
171 U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
172}
173
174MEM_STATIC U64 MEM_read64(const void* memPtr)
175{
176 U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
177}
178
179MEM_STATIC size_t MEM_readST(const void* memPtr)
180{
181 size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
182}
183
184MEM_STATIC void MEM_write16(void* memPtr, U16 value)
185{
186 memcpy(memPtr, &value, sizeof(value));
187}
188
189MEM_STATIC void MEM_write32(void* memPtr, U32 value)
190{
191 memcpy(memPtr, &value, sizeof(value));
192}
193
194MEM_STATIC void MEM_write64(void* memPtr, U64 value)
195{
196 memcpy(memPtr, &value, sizeof(value));
197}
198
199#endif /* MEM_FORCE_MEMORY_ACCESS */
200
201MEM_STATIC U32 MEM_swap32(U32 in)
202{
203#if defined(_MSC_VER) /* Visual Studio */
204 return _byteswap_ulong(in);
Scott Baker8461e152019-10-01 14:44:30 -0700205#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
206 || (defined(__clang__) && __has_builtin(__builtin_bswap32))
khenaidooac637102019-01-14 15:44:34 -0500207 return __builtin_bswap32(in);
208#else
209 return ((in << 24) & 0xff000000 ) |
210 ((in << 8) & 0x00ff0000 ) |
211 ((in >> 8) & 0x0000ff00 ) |
212 ((in >> 24) & 0x000000ff );
213#endif
214}
215
216MEM_STATIC U64 MEM_swap64(U64 in)
217{
218#if defined(_MSC_VER) /* Visual Studio */
219 return _byteswap_uint64(in);
Scott Baker8461e152019-10-01 14:44:30 -0700220#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
221 || (defined(__clang__) && __has_builtin(__builtin_bswap64))
khenaidooac637102019-01-14 15:44:34 -0500222 return __builtin_bswap64(in);
223#else
224 return ((in << 56) & 0xff00000000000000ULL) |
225 ((in << 40) & 0x00ff000000000000ULL) |
226 ((in << 24) & 0x0000ff0000000000ULL) |
227 ((in << 8) & 0x000000ff00000000ULL) |
228 ((in >> 8) & 0x00000000ff000000ULL) |
229 ((in >> 24) & 0x0000000000ff0000ULL) |
230 ((in >> 40) & 0x000000000000ff00ULL) |
231 ((in >> 56) & 0x00000000000000ffULL);
232#endif
233}
234
235MEM_STATIC size_t MEM_swapST(size_t in)
236{
237 if (MEM_32bits())
238 return (size_t)MEM_swap32((U32)in);
239 else
240 return (size_t)MEM_swap64((U64)in);
241}
242
243/*=== Little endian r/w ===*/
244
245MEM_STATIC U16 MEM_readLE16(const void* memPtr)
246{
247 if (MEM_isLittleEndian())
248 return MEM_read16(memPtr);
249 else {
250 const BYTE* p = (const BYTE*)memPtr;
251 return (U16)(p[0] + (p[1]<<8));
252 }
253}
254
255MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
256{
257 if (MEM_isLittleEndian()) {
258 MEM_write16(memPtr, val);
259 } else {
260 BYTE* p = (BYTE*)memPtr;
261 p[0] = (BYTE)val;
262 p[1] = (BYTE)(val>>8);
263 }
264}
265
266MEM_STATIC U32 MEM_readLE24(const void* memPtr)
267{
268 return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
269}
270
271MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
272{
273 MEM_writeLE16(memPtr, (U16)val);
274 ((BYTE*)memPtr)[2] = (BYTE)(val>>16);
275}
276
277MEM_STATIC U32 MEM_readLE32(const void* memPtr)
278{
279 if (MEM_isLittleEndian())
280 return MEM_read32(memPtr);
281 else
282 return MEM_swap32(MEM_read32(memPtr));
283}
284
285MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
286{
287 if (MEM_isLittleEndian())
288 MEM_write32(memPtr, val32);
289 else
290 MEM_write32(memPtr, MEM_swap32(val32));
291}
292
293MEM_STATIC U64 MEM_readLE64(const void* memPtr)
294{
295 if (MEM_isLittleEndian())
296 return MEM_read64(memPtr);
297 else
298 return MEM_swap64(MEM_read64(memPtr));
299}
300
301MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
302{
303 if (MEM_isLittleEndian())
304 MEM_write64(memPtr, val64);
305 else
306 MEM_write64(memPtr, MEM_swap64(val64));
307}
308
309MEM_STATIC size_t MEM_readLEST(const void* memPtr)
310{
311 if (MEM_32bits())
312 return (size_t)MEM_readLE32(memPtr);
313 else
314 return (size_t)MEM_readLE64(memPtr);
315}
316
317MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
318{
319 if (MEM_32bits())
320 MEM_writeLE32(memPtr, (U32)val);
321 else
322 MEM_writeLE64(memPtr, (U64)val);
323}
324
325/*=== Big endian r/w ===*/
326
327MEM_STATIC U32 MEM_readBE32(const void* memPtr)
328{
329 if (MEM_isLittleEndian())
330 return MEM_swap32(MEM_read32(memPtr));
331 else
332 return MEM_read32(memPtr);
333}
334
335MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
336{
337 if (MEM_isLittleEndian())
338 MEM_write32(memPtr, MEM_swap32(val32));
339 else
340 MEM_write32(memPtr, val32);
341}
342
343MEM_STATIC U64 MEM_readBE64(const void* memPtr)
344{
345 if (MEM_isLittleEndian())
346 return MEM_swap64(MEM_read64(memPtr));
347 else
348 return MEM_read64(memPtr);
349}
350
351MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
352{
353 if (MEM_isLittleEndian())
354 MEM_write64(memPtr, MEM_swap64(val64));
355 else
356 MEM_write64(memPtr, val64);
357}
358
359MEM_STATIC size_t MEM_readBEST(const void* memPtr)
360{
361 if (MEM_32bits())
362 return (size_t)MEM_readBE32(memPtr);
363 else
364 return (size_t)MEM_readBE64(memPtr);
365}
366
367MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
368{
369 if (MEM_32bits())
370 MEM_writeBE32(memPtr, (U32)val);
371 else
372 MEM_writeBE64(memPtr, (U64)val);
373}
374
375
376#if defined (__cplusplus)
377}
378#endif
379
380#endif /* MEM_H_MODULE */