|
25 | 25 | // We define a translation layer for both x86 and ARM for the ease of use and |
26 | 26 | // most performance gains. |
27 | 27 |
|
28 | | -// This implementation requires 64-bit CRC instructions (part of SSE 4.2) and |
29 | | -// PCLMULQDQ instructions. 32-bit builds with SSE 4.2 do exist, so the |
30 | | -// __x86_64__ condition is necessary. |
31 | | -#if defined(__x86_64__) && defined(__SSE4_2__) && defined(__PCLMUL__) |
| 28 | +// This implementation requires CRC instructions (part of SSE 4.2) and |
| 29 | +// PCLMULQDQ instructions. |
| 30 | +#if defined(__SSE4_2__) && defined(__PCLMUL__) |
32 | 31 |
|
33 | 32 | #include <x86intrin.h> |
34 | 33 | #define ABSL_CRC_INTERNAL_HAVE_X86_SIMD |
@@ -143,7 +142,13 @@ inline uint32_t CRC32_u32(uint32_t crc, uint32_t v) { |
143 | 142 | } |
144 | 143 |
|
145 | 144 | inline uint32_t CRC32_u64(uint32_t crc, uint64_t v) { |
| 145 | +#if defined(__x86_64__) || defined(_M_X64) |
146 | 146 | return static_cast<uint32_t>(_mm_crc32_u64(crc, v)); |
| 147 | +#else |
| 148 | + uint32_t v_lo = static_cast<uint32_t>(v); |
| 149 | + uint32_t v_hi = static_cast<uint32_t>(v >> 32); |
| 150 | + return _mm_crc32_u32(_mm_crc32_u32(crc, v_lo), v_hi); |
| 151 | +#endif |
147 | 152 | } |
148 | 153 |
|
149 | 154 | inline V128 V128_Load(const V128* src) { return _mm_load_si128(src); } |
@@ -191,7 +196,15 @@ inline uint64_t V128_Extract64(const V128 l) { |
191 | 196 | return static_cast<uint64_t>(_mm_extract_epi64(l, imm)); |
192 | 197 | } |
193 | 198 |
|
194 | | -inline int64_t V128_Low64(const V128 l) { return _mm_cvtsi128_si64(l); } |
| 199 | +inline int64_t V128_Low64(const V128 l) { |
| 200 | +#if defined(__x86_64__) || defined(_M_X64) |
| 201 | + return _mm_cvtsi128_si64(l); |
| 202 | +#else |
| 203 | + uint32_t r_lo = static_cast<uint32_t>(_mm_extract_epi32(l, 0)); |
| 204 | + uint32_t r_hi = static_cast<uint32_t>(_mm_extract_epi32(l, 1)); |
| 205 | + return static_cast<int64_t>((static_cast<uint64_t>(r_hi) << 32) | r_lo); |
| 206 | +#endif |
| 207 | +} |
195 | 208 |
|
196 | 209 | inline V128 V128_ShiftLeft64(const V128 l, const V128 r) { |
197 | 210 | return _mm_sll_epi64(l, r); |
|
0 commit comments