refcount.h 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /*
  2. * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. #ifndef OSSL_INTERNAL_REFCOUNT_H
  10. # define OSSL_INTERNAL_REFCOUNT_H
  11. # pragma once
  12. # include <openssl/e_os2.h>
  13. # include <openssl/trace.h>
  14. # include <openssl/err.h>
  15. # if defined(OPENSSL_THREADS) && !defined(OPENSSL_DEV_NO_ATOMICS)
  16. # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \
  17. && !defined(__STDC_NO_ATOMICS__)
  18. # include <stdatomic.h>
  19. # define HAVE_C11_ATOMICS
  20. # endif
  21. # if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \
  22. && ATOMIC_INT_LOCK_FREE > 0
  23. # define HAVE_ATOMICS 1
  24. # if defined(__has_feature)
  25. # if __has_feature(thread_sanitizer)
  26. # define OSSL_TSAN_BUILD
  27. # endif
  28. # endif
  29. typedef struct {
  30. _Atomic int val;
  31. } CRYPTO_REF_COUNT;
  32. static inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  33. {
  34. *ret = atomic_fetch_add_explicit(&refcnt->val, 1, memory_order_relaxed) + 1;
  35. return 1;
  36. }
  37. /*
  38. * Changes to shared structure other than reference counter have to be
  39. * serialized. And any kind of serialization implies a release fence. This
  40. * means that by the time reference counter is decremented all other
  41. * changes are visible on all processors. Hence decrement itself can be
  42. * relaxed. In case it hits zero, object will be destructed. Since it's
  43. * last use of the object, destructor programmer might reason that access
  44. * to mutable members doesn't have to be serialized anymore, which would
  45. * otherwise imply an acquire fence. Hence conditional acquire fence...
  46. */
  47. static inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  48. {
  49. # ifdef OSSL_TSAN_BUILD
  50. /*
  51. * TSAN requires acq_rel as it indicates a false positive error when
  52. * the object that contains the refcount is freed otherwise.
  53. */
  54. *ret = atomic_fetch_sub_explicit(&refcnt->val, 1, memory_order_acq_rel) - 1;
  55. # else
  56. *ret = atomic_fetch_sub_explicit(&refcnt->val, 1, memory_order_release) - 1;
  57. if (*ret == 0)
  58. atomic_thread_fence(memory_order_acquire);
  59. # endif
  60. return 1;
  61. }
  62. static inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  63. {
  64. *ret = atomic_load_explicit(&refcnt->val, memory_order_acquire);
  65. return 1;
  66. }
  67. # elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0
  68. # define HAVE_ATOMICS 1
  69. typedef struct {
  70. int val;
  71. } CRYPTO_REF_COUNT;
  72. static __inline__ int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  73. {
  74. *ret = __atomic_fetch_add(&refcnt->val, 1, __ATOMIC_RELAXED) + 1;
  75. return 1;
  76. }
  77. static __inline__ int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  78. {
  79. *ret = __atomic_fetch_sub(&refcnt->val, 1, __ATOMIC_RELEASE) - 1;
  80. if (*ret == 0)
  81. __atomic_thread_fence(__ATOMIC_ACQUIRE);
  82. return 1;
  83. }
  84. static __inline__ int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  85. {
  86. *ret = __atomic_load_n(&refcnt->val, __ATOMIC_RELAXED);
  87. return 1;
  88. }
  89. # elif defined(__ICL) && defined(_WIN32)
  90. # define HAVE_ATOMICS 1
  91. typedef struct {
  92. volatile int val;
  93. } CRYPTO_REF_COUNT;
  94. static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  95. {
  96. *ret = _InterlockedExchangeAdd((void *)&refcnt->val, 1) + 1;
  97. return 1;
  98. }
  99. static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  100. {
  101. *ret = _InterlockedExchangeAdd((void *)&refcnt->val, -1) - 1;
  102. return 1;
  103. }
  104. static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  105. {
  106. *ret = _InterlockedExchangeAdd((void *)&refcnt->val, 0);
  107. return 1;
  108. }
  109. # elif defined(_MSC_VER) && _MSC_VER>=1200
  110. # define HAVE_ATOMICS 1
  111. typedef struct {
  112. volatile int val;
  113. } CRYPTO_REF_COUNT;
  114. # if (defined(_M_ARM) && _M_ARM>=7 && !defined(_WIN32_WCE)) || defined(_M_ARM64)
  115. # include <intrin.h>
  116. # if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH)
  117. # define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH
  118. # endif
  119. static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  120. {
  121. *ret = _InterlockedExchangeAdd_nf(&refcnt->val, 1) + 1;
  122. return 1;
  123. }
  124. static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  125. {
  126. *ret = _InterlockedExchangeAdd(&refcnt->val, -1) - 1;
  127. return 1;
  128. }
  129. static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  130. {
  131. *ret = _InterlockedExchangeAdd_acq((void *)&refcnt->val, 0);
  132. return 1;
  133. }
  134. # else
  135. # if !defined(_WIN32_WCE)
  136. # pragma intrinsic(_InterlockedExchangeAdd)
  137. # else
  138. # if _WIN32_WCE >= 0x600
  139. extern long __cdecl _InterlockedExchangeAdd(long volatile*, long);
  140. # else
  141. /* under Windows CE we still have old-style Interlocked* functions */
  142. extern long __cdecl InterlockedExchangeAdd(long volatile*, long);
  143. # define _InterlockedExchangeAdd InterlockedExchangeAdd
  144. # endif
  145. # endif
  146. static __inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  147. {
  148. *ret = _InterlockedExchangeAdd(&refcnt->val, 1) + 1;
  149. return 1;
  150. }
  151. static __inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  152. {
  153. *ret = _InterlockedExchangeAdd(&refcnt->val, -1) - 1;
  154. return 1;
  155. }
  156. static __inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt, int *ret)
  157. {
  158. *ret = _InterlockedExchangeAdd(&refcnt->val, 0);
  159. return 1;
  160. }
  161. # endif
  162. # endif
  163. # endif /* !OPENSSL_DEV_NO_ATOMICS */
  164. /*
  165. * All the refcounting implementations above define HAVE_ATOMICS, so if it's
  166. * still undefined here (such as when OPENSSL_DEV_NO_ATOMICS is defined), it
  167. * means we need to implement a fallback. This fallback uses locks.
  168. */
  169. # ifndef HAVE_ATOMICS
  170. typedef struct {
  171. int val;
  172. # ifdef OPENSSL_THREADS
  173. CRYPTO_RWLOCK *lock;
  174. # endif
  175. } CRYPTO_REF_COUNT;
  176. # ifdef OPENSSL_THREADS
  177. static ossl_unused ossl_inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt,
  178. int *ret)
  179. {
  180. return CRYPTO_atomic_add(&refcnt->val, 1, ret, refcnt->lock);
  181. }
  182. static ossl_unused ossl_inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt,
  183. int *ret)
  184. {
  185. return CRYPTO_atomic_add(&refcnt->val, -1, ret, refcnt->lock);
  186. }
  187. static ossl_unused ossl_inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt,
  188. int *ret)
  189. {
  190. return CRYPTO_atomic_load_int(&refcnt->val, ret, refcnt->lock);
  191. }
  192. # define CRYPTO_NEW_FREE_DEFINED 1
  193. static ossl_unused ossl_inline int CRYPTO_NEW_REF(CRYPTO_REF_COUNT *refcnt, int n)
  194. {
  195. refcnt->val = n;
  196. refcnt->lock = CRYPTO_THREAD_lock_new();
  197. if (refcnt->lock == NULL) {
  198. ERR_raise(ERR_LIB_CRYPTO, ERR_R_CRYPTO_LIB);
  199. return 0;
  200. }
  201. return 1;
  202. }
  203. static ossl_unused ossl_inline void CRYPTO_FREE_REF(CRYPTO_REF_COUNT *refcnt) \
  204. {
  205. if (refcnt != NULL)
  206. CRYPTO_THREAD_lock_free(refcnt->lock);
  207. }
  208. # else /* OPENSSL_THREADS */
  209. static ossl_unused ossl_inline int CRYPTO_UP_REF(CRYPTO_REF_COUNT *refcnt,
  210. int *ret)
  211. {
  212. refcnt->val++;
  213. *ret = refcnt->val;
  214. return 1;
  215. }
  216. static ossl_unused ossl_inline int CRYPTO_DOWN_REF(CRYPTO_REF_COUNT *refcnt,
  217. int *ret)
  218. {
  219. refcnt->val--;
  220. *ret = refcnt->val;
  221. return 1;
  222. }
  223. static ossl_unused ossl_inline int CRYPTO_GET_REF(CRYPTO_REF_COUNT *refcnt,
  224. int *ret)
  225. {
  226. *ret = refcnt->val;
  227. return 1;
  228. }
  229. # endif /* OPENSSL_THREADS */
  230. # endif
  231. # ifndef CRYPTO_NEW_FREE_DEFINED
  232. static ossl_unused ossl_inline int CRYPTO_NEW_REF(CRYPTO_REF_COUNT *refcnt, int n)
  233. {
  234. refcnt->val = n;
  235. return 1;
  236. }
  237. static ossl_unused ossl_inline void CRYPTO_FREE_REF(CRYPTO_REF_COUNT *refcnt) \
  238. {
  239. }
  240. # endif /* CRYPTO_NEW_FREE_DEFINED */
  241. #undef CRYPTO_NEW_FREE_DEFINED
  242. # if !defined(NDEBUG) && !defined(OPENSSL_NO_STDIO)
  243. # define REF_ASSERT_ISNT(test) \
  244. (void)((test) ? (OPENSSL_die("refcount error", __FILE__, __LINE__), 1) : 0)
  245. # else
  246. # define REF_ASSERT_ISNT(i)
  247. # endif
  248. # define REF_PRINT_EX(text, count, object) \
  249. OSSL_TRACE3(REF_COUNT, "%p:%4d:%s\n", (object), (count), (text));
  250. # define REF_PRINT_COUNT(text, val, object) \
  251. REF_PRINT_EX(text, val, (void *)object)
  252. #endif