1
0

memcmplen.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. ///////////////////////////////////////////////////////////////////////////////
  2. //
  3. /// \file memcmplen.h
  4. /// \brief Optimized comparison of two buffers
  5. //
  6. // Author: Lasse Collin
  7. //
  8. // This file has been put into the public domain.
  9. // You can do whatever you want with this file.
  10. //
  11. ///////////////////////////////////////////////////////////////////////////////
  12. #ifndef LZMA_MEMCMPLEN_H
  13. #define LZMA_MEMCMPLEN_H
  14. #include "common.h"
  15. #ifdef HAVE_IMMINTRIN_H
  16. # include <immintrin.h>
  17. #endif
  18. /// Find out how many equal bytes the two buffers have.
  19. ///
  20. /// \param buf1 First buffer
  21. /// \param buf2 Second buffer
  22. /// \param len How many bytes have already been compared and will
  23. /// be assumed to match
  24. /// \param limit How many bytes to compare at most, including the
  25. /// already-compared bytes. This must be significantly
  26. /// smaller than UINT32_MAX to avoid integer overflows.
  27. /// Up to LZMA_MEMCMPLEN_EXTRA bytes may be read past
  28. /// the specified limit from both buf1 and buf2.
  29. ///
  30. /// \return Number of equal bytes in the buffers is returned.
  31. /// This is always at least len and at most limit.
  32. ///
  33. /// \note LZMA_MEMCMPLEN_EXTRA defines how many extra bytes may be read.
  34. /// It's rounded up to 2^n. This extra amount needs to be
  35. /// allocated in the buffers being used. It needs to be
  36. /// initialized too to keep Valgrind quiet.
  37. static inline uint32_t lzma_attribute((__always_inline__))
  38. lzma_memcmplen(const uint8_t *buf1, const uint8_t *buf2,
  39. uint32_t len, uint32_t limit)
  40. {
  41. assert(len <= limit);
  42. assert(limit <= UINT32_MAX / 2);
  43. #if defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
  44. && ((TUKLIB_GNUC_REQ(3, 4) && defined(__x86_64__)) \
  45. || (defined(__INTEL_COMPILER) && defined(__x86_64__)) \
  46. || (defined(__INTEL_COMPILER) && defined(_M_X64)) \
  47. || (defined(_MSC_VER) && defined(_M_X64)))
  48. // NOTE: This will use 64-bit unaligned access which
  49. // TUKLIB_FAST_UNALIGNED_ACCESS wasn't meant to permit, but
  50. // it's convenient here at least as long as it's x86-64 only.
  51. //
  52. // I keep this x86-64 only for now since that's where I know this
  53. // to be a good method. This may be fine on other 64-bit CPUs too.
  54. // On big endian one should use xor instead of subtraction and switch
  55. // to __builtin_clzll().
  56. #define LZMA_MEMCMPLEN_EXTRA 8
  57. while (len < limit) {
  58. const uint64_t x = read64ne(buf1 + len) - read64ne(buf2 + len);
  59. if (x != 0) {
  60. # if defined(_M_X64) // MSVC or Intel C compiler on Windows
  61. unsigned long tmp;
  62. _BitScanForward64(&tmp, x);
  63. len += (uint32_t)tmp >> 3;
  64. # else // GCC, clang, or Intel C compiler
  65. len += (uint32_t)__builtin_ctzll(x) >> 3;
  66. # endif
  67. return my_min(len, limit);
  68. }
  69. len += 8;
  70. }
  71. return limit;
  72. #elif defined(TUKLIB_FAST_UNALIGNED_ACCESS) \
  73. && defined(HAVE__MM_MOVEMASK_EPI8) \
  74. && ((defined(__GNUC__) && defined(__SSE2_MATH__)) \
  75. || (defined(__INTEL_COMPILER) && defined(__SSE2__)) \
  76. || (defined(_MSC_VER) && defined(_M_IX86_FP) \
  77. && _M_IX86_FP >= 2))
  78. // NOTE: Like above, this will use 128-bit unaligned access which
  79. // TUKLIB_FAST_UNALIGNED_ACCESS wasn't meant to permit.
  80. //
  81. // SSE2 version for 32-bit and 64-bit x86. On x86-64 the above
  82. // version is sometimes significantly faster and sometimes
  83. // slightly slower than this SSE2 version, so this SSE2
  84. // version isn't used on x86-64.
  85. # define LZMA_MEMCMPLEN_EXTRA 16
  86. while (len < limit) {
  87. const uint32_t x = 0xFFFF ^ _mm_movemask_epi8(_mm_cmpeq_epi8(
  88. _mm_loadu_si128((const __m128i *)(buf1 + len)),
  89. _mm_loadu_si128((const __m128i *)(buf2 + len))));
  90. if (x != 0) {
  91. len += ctz32(x);
  92. return my_min(len, limit);
  93. }
  94. len += 16;
  95. }
  96. return limit;
  97. #elif defined(TUKLIB_FAST_UNALIGNED_ACCESS) && !defined(WORDS_BIGENDIAN)
  98. // Generic 32-bit little endian method
  99. # define LZMA_MEMCMPLEN_EXTRA 4
  100. while (len < limit) {
  101. uint32_t x = read32ne(buf1 + len) - read32ne(buf2 + len);
  102. if (x != 0) {
  103. if ((x & 0xFFFF) == 0) {
  104. len += 2;
  105. x >>= 16;
  106. }
  107. if ((x & 0xFF) == 0)
  108. ++len;
  109. return my_min(len, limit);
  110. }
  111. len += 4;
  112. }
  113. return limit;
  114. #elif defined(TUKLIB_FAST_UNALIGNED_ACCESS) && defined(WORDS_BIGENDIAN)
  115. // Generic 32-bit big endian method
  116. # define LZMA_MEMCMPLEN_EXTRA 4
  117. while (len < limit) {
  118. uint32_t x = read32ne(buf1 + len) ^ read32ne(buf2 + len);
  119. if (x != 0) {
  120. if ((x & 0xFFFF0000) == 0) {
  121. len += 2;
  122. x <<= 16;
  123. }
  124. if ((x & 0xFF000000) == 0)
  125. ++len;
  126. return my_min(len, limit);
  127. }
  128. len += 4;
  129. }
  130. return limit;
  131. #else
  132. // Simple portable version that doesn't use unaligned access.
  133. # define LZMA_MEMCMPLEN_EXTRA 0
  134. while (len < limit && buf1[len] == buf2[len])
  135. ++len;
  136. return len;
  137. #endif
  138. }
  139. #endif