Browse Source

libuv: Fix building with mingw toolchains for ARM/AArch64

This is a backport of f9ad802fa5dd5afe6730f8e00cfdbf98f1d7a969
from the v1.x branch from upstream libuv:

    mingw: fix building for ARM/AArch64

    Don't use x86 inline assembly in these cases, but fall back to
    __sync_fetch_and_or, similar to _InterlockedOr8 in the MSVC case.

    This corresponds to what is done in src/unix/atomic-ops.h, where
    ARM/AArch64 cases end up implementing cmpxchgi with
    __sync_val_compare_and_swap.

    PR-URL: https://github.com/libuv/libuv/pull/3236
    Reviewed-By: Jameson Nash <[email protected]>
Martin Storsjö 4 years ago
parent
commit
1aba3a8367
1 changed files with 6 additions and 2 deletions
  1. 6 2
      Utilities/cmlibuv/src/win/atomicops-inl.h

+ 6 - 2
Utilities/cmlibuv/src/win/atomicops-inl.h

@@ -39,10 +39,11 @@ static char INLINE uv__atomic_exchange_set(char volatile* target) {
   return _InterlockedOr8(target, 1);
 }
 
-#else /* GCC */
+#else /* GCC, Clang in mingw mode */
 
-/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */
 static inline char uv__atomic_exchange_set(char volatile* target) {
+#if defined(__i386__) || defined(__x86_64__)
+  /* Mingw-32 version, hopefully this works for 64-bit gcc as well. */
   const char one = 1;
   char old_value;
   __asm__ __volatile__ ("lock xchgb %0, %1\n\t"
@@ -50,6 +51,9 @@ static inline char uv__atomic_exchange_set(char volatile* target) {
                         : "0"(one), "m"(*target)
                         : "memory");
   return old_value;
+#else
+  return __sync_fetch_and_or(target, 1);
+#endif
 }
 
 #endif