threads_pthread.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. /* We need to use the OPENSSL_fork_*() deprecated APIs */
  10. #define OPENSSL_SUPPRESS_DEPRECATED
  11. #include <openssl/crypto.h>
  12. #include <crypto/cryptlib.h>
  13. #include "internal/cryptlib.h"
  14. #include "internal/rcu.h"
  15. #include "rcu_internal.h"
  16. #if defined(__sun)
  17. # include <atomic.h>
  18. #endif
  19. #if defined(__apple_build_version__) && __apple_build_version__ < 6000000
  20. /*
  21. * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
  22. * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
  23. * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
  24. * All of this makes impossible to use __atomic_is_lock_free here.
  25. *
  26. * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
  27. */
  28. # define BROKEN_CLANG_ATOMICS
  29. #endif
  30. #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
  31. # if defined(OPENSSL_SYS_UNIX)
  32. # include <sys/types.h>
  33. # include <unistd.h>
  34. # endif
  35. # include <assert.h>
  36. /*
  37. * The Non-Stop KLT thread model currently seems broken in its rwlock
  38. * implementation
  39. * Likewise is there a problem with the glibc implementation on riscv.
  40. */
  41. # if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_) \
  42. && !defined(__riscv)
  43. # define USE_RWLOCK
  44. # endif
  45. /*
  46. * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
  47. * other compilers.
  48. * Unfortunately, we can't do that with some "generic type", because there's no
  49. * guarantee that the chosen generic type is large enough to cover all cases.
  50. * Therefore, we implement fallbacks for each applicable type, with composed
  51. * names that include the type they handle.
  52. *
  53. * (an anecdote: we previously tried to use |void *| as the generic type, with
  54. * the thought that the pointer itself is the largest type. However, this is
  55. * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
  56. *
  57. * All applicable ATOMIC_ macros take the intended type as first parameter, so
  58. * they can map to the correct fallback function. In the GNU/clang case, that
  59. * parameter is simply ignored.
  60. */
  61. /*
  62. * Internal types used with the ATOMIC_ macros, to make it possible to compose
  63. * fallback function names.
  64. */
  65. typedef void *pvoid;
  66. # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
  67. && !defined(USE_ATOMIC_FALLBACKS)
  68. # define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
  69. # define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
  70. # define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
  71. # define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
  72. # define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
  73. # else
  74. static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
  75. # define IMPL_fallback_atomic_load_n(t) \
  76. static ossl_inline t fallback_atomic_load_n_##t(t *p) \
  77. { \
  78. t ret; \
  79. \
  80. pthread_mutex_lock(&atomic_sim_lock); \
  81. ret = *p; \
  82. pthread_mutex_unlock(&atomic_sim_lock); \
  83. return ret; \
  84. }
  85. IMPL_fallback_atomic_load_n(uint32_t)
  86. IMPL_fallback_atomic_load_n(uint64_t)
  87. IMPL_fallback_atomic_load_n(pvoid)
  88. # define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
  89. # define IMPL_fallback_atomic_store_n(t) \
  90. static ossl_inline t fallback_atomic_store_n_##t(t *p, t v) \
  91. { \
  92. t ret; \
  93. \
  94. pthread_mutex_lock(&atomic_sim_lock); \
  95. ret = *p; \
  96. *p = v; \
  97. pthread_mutex_unlock(&atomic_sim_lock); \
  98. return ret; \
  99. }
  100. IMPL_fallback_atomic_store_n(uint32_t)
  101. # define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
  102. # define IMPL_fallback_atomic_store(t) \
  103. static ossl_inline void fallback_atomic_store_##t(t *p, t *v) \
  104. { \
  105. pthread_mutex_lock(&atomic_sim_lock); \
  106. *p = *v; \
  107. pthread_mutex_unlock(&atomic_sim_lock); \
  108. }
  109. IMPL_fallback_atomic_store(pvoid)
  110. # define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
  111. /*
  112. * The fallbacks that follow don't need any per type implementation, as
  113. * they are designed for uint64_t only. If there comes a time when multiple
  114. * types need to be covered, it's relatively easy to refactor them the same
  115. * way as the fallbacks above.
  116. */
  117. static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
  118. {
  119. uint64_t ret;
  120. pthread_mutex_lock(&atomic_sim_lock);
  121. *p += v;
  122. ret = *p;
  123. pthread_mutex_unlock(&atomic_sim_lock);
  124. return ret;
  125. }
  126. # define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
  127. static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
  128. {
  129. uint64_t ret;
  130. pthread_mutex_lock(&atomic_sim_lock);
  131. *p -= v;
  132. ret = *p;
  133. pthread_mutex_unlock(&atomic_sim_lock);
  134. return ret;
  135. }
  136. # define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
  137. # endif
  138. /*
  139. * This is the core of an rcu lock. It tracks the readers and writers for the
  140. * current quiescence point for a given lock. Users is the 64 bit value that
  141. * stores the READERS/ID as defined above
  142. *
  143. */
  144. struct rcu_qp {
  145. uint64_t users;
  146. };
  147. struct thread_qp {
  148. struct rcu_qp *qp;
  149. unsigned int depth;
  150. CRYPTO_RCU_LOCK *lock;
  151. };
  152. # define MAX_QPS 10
  153. /*
  154. * This is the per thread tracking data
  155. * that is assigned to each thread participating
  156. * in an rcu qp
  157. *
  158. * qp points to the qp that it last acquired
  159. *
  160. */
  161. struct rcu_thr_data {
  162. struct thread_qp thread_qps[MAX_QPS];
  163. };
  164. /*
  165. * This is the internal version of a CRYPTO_RCU_LOCK
  166. * it is cast from CRYPTO_RCU_LOCK
  167. */
  168. struct rcu_lock_st {
  169. /* Callbacks to call for next ossl_synchronize_rcu */
  170. struct rcu_cb_item *cb_items;
  171. /* The context we are being created against */
  172. OSSL_LIB_CTX *ctx;
  173. /* Array of quiescent points for synchronization */
  174. struct rcu_qp *qp_group;
  175. /* rcu generation counter for in-order retirement */
  176. uint32_t id_ctr;
  177. /* Number of elements in qp_group array */
  178. uint32_t group_count;
  179. /* Index of the current qp in the qp_group array */
  180. uint32_t reader_idx;
  181. /* value of the next id_ctr value to be retired */
  182. uint32_t next_to_retire;
  183. /* index of the next free rcu_qp in the qp_group */
  184. uint32_t current_alloc_idx;
  185. /* number of qp's in qp_group array currently being retired */
  186. uint32_t writers_alloced;
  187. /* lock protecting write side operations */
  188. pthread_mutex_t write_lock;
  189. /* lock protecting updates to writers_alloced/current_alloc_idx */
  190. pthread_mutex_t alloc_lock;
  191. /* signal to wake threads waiting on alloc_lock */
  192. pthread_cond_t alloc_signal;
  193. /* lock to enforce in-order retirement */
  194. pthread_mutex_t prior_lock;
  195. /* signal to wake threads waiting on prior_lock */
  196. pthread_cond_t prior_signal;
  197. };
  198. /* Read side acquisition of the current qp */
  199. static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
  200. {
  201. uint32_t qp_idx;
  202. /* get the current qp index */
  203. for (;;) {
  204. qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
  205. /*
  206. * Notes on use of __ATOMIC_ACQUIRE
  207. * We need to ensure the following:
  208. * 1) That subsequent operations aren't optimized by hoisting them above
  209. * this operation. Specifically, we don't want the below re-load of
  210. * qp_idx to get optimized away
  211. * 2) We want to ensure that any updating of reader_idx on the write side
  212. * of the lock is flushed from a local cpu cache so that we see any
  213. * updates prior to the load. This is a non-issue on cache coherent
  214. * systems like x86, but is relevant on other arches
  215. */
  216. ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
  217. __ATOMIC_ACQUIRE);
  218. /* if the idx hasn't changed, we're good, else try again */
  219. if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
  220. __ATOMIC_ACQUIRE))
  221. break;
  222. ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
  223. __ATOMIC_RELAXED);
  224. }
  225. return &lock->qp_group[qp_idx];
  226. }
  227. static void ossl_rcu_free_local_data(void *arg)
  228. {
  229. OSSL_LIB_CTX *ctx = arg;
  230. CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
  231. struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
  232. OPENSSL_free(data);
  233. CRYPTO_THREAD_set_local(lkey, NULL);
  234. }
  235. void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
  236. {
  237. struct rcu_thr_data *data;
  238. int i, available_qp = -1;
  239. CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
  240. /*
  241. * we're going to access current_qp here so ask the
  242. * processor to fetch it
  243. */
  244. data = CRYPTO_THREAD_get_local(lkey);
  245. if (data == NULL) {
  246. data = OPENSSL_zalloc(sizeof(*data));
  247. OPENSSL_assert(data != NULL);
  248. CRYPTO_THREAD_set_local(lkey, data);
  249. ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
  250. }
  251. for (i = 0; i < MAX_QPS; i++) {
  252. if (data->thread_qps[i].qp == NULL && available_qp == -1)
  253. available_qp = i;
  254. /* If we have a hold on this lock already, we're good */
  255. if (data->thread_qps[i].lock == lock) {
  256. data->thread_qps[i].depth++;
  257. return;
  258. }
  259. }
  260. /*
  261. * if we get here, then we don't have a hold on this lock yet
  262. */
  263. assert(available_qp != -1);
  264. data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
  265. data->thread_qps[available_qp].depth = 1;
  266. data->thread_qps[available_qp].lock = lock;
  267. }
  268. void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
  269. {
  270. int i;
  271. CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
  272. struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
  273. uint64_t ret;
  274. assert(data != NULL);
  275. for (i = 0; i < MAX_QPS; i++) {
  276. if (data->thread_qps[i].lock == lock) {
  277. /*
  278. * we have to use __ATOMIC_RELEASE here
  279. * to ensure that all preceding read instructions complete
  280. * before the decrement is visible to ossl_synchronize_rcu
  281. */
  282. data->thread_qps[i].depth--;
  283. if (data->thread_qps[i].depth == 0) {
  284. ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
  285. (uint64_t)1, __ATOMIC_RELEASE);
  286. OPENSSL_assert(ret != UINT64_MAX);
  287. data->thread_qps[i].qp = NULL;
  288. data->thread_qps[i].lock = NULL;
  289. }
  290. return;
  291. }
  292. }
  293. /*
  294. * If we get here, we're trying to unlock a lock that we never acquired -
  295. * that's fatal.
  296. */
  297. assert(0);
  298. }
  299. /*
  300. * Write side allocation routine to get the current qp
  301. * and replace it with a new one
  302. */
  303. static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
  304. {
  305. uint32_t current_idx;
  306. pthread_mutex_lock(&lock->alloc_lock);
  307. /*
  308. * we need at least one qp to be available with one
  309. * left over, so that readers can start working on
  310. * one that isn't yet being waited on
  311. */
  312. while (lock->group_count - lock->writers_alloced < 2)
  313. /* we have to wait for one to be free */
  314. pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
  315. current_idx = lock->current_alloc_idx;
  316. /* Allocate the qp */
  317. lock->writers_alloced++;
  318. /* increment the allocation index */
  319. lock->current_alloc_idx =
  320. (lock->current_alloc_idx + 1) % lock->group_count;
  321. *curr_id = lock->id_ctr;
  322. lock->id_ctr++;
  323. /*
  324. * make the current state of everything visible by this release
  325. * when get_hold_current_qp acquires the next qp
  326. */
  327. ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
  328. __ATOMIC_RELEASE);
  329. /*
  330. * this should make sure that the new value of reader_idx is visible in
  331. * get_hold_current_qp, directly after incrementing the users count
  332. */
  333. ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
  334. __ATOMIC_RELEASE);
  335. /* wake up any waiters */
  336. pthread_cond_signal(&lock->alloc_signal);
  337. pthread_mutex_unlock(&lock->alloc_lock);
  338. return &lock->qp_group[current_idx];
  339. }
  340. static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
  341. {
  342. pthread_mutex_lock(&lock->alloc_lock);
  343. lock->writers_alloced--;
  344. pthread_cond_signal(&lock->alloc_signal);
  345. pthread_mutex_unlock(&lock->alloc_lock);
  346. }
  347. static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
  348. uint32_t count)
  349. {
  350. struct rcu_qp *new =
  351. OPENSSL_zalloc(sizeof(*new) * count);
  352. lock->group_count = count;
  353. return new;
  354. }
  355. void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
  356. {
  357. pthread_mutex_lock(&lock->write_lock);
  358. }
  359. void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
  360. {
  361. pthread_mutex_unlock(&lock->write_lock);
  362. }
  363. void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
  364. {
  365. struct rcu_qp *qp;
  366. uint64_t count;
  367. uint32_t curr_id;
  368. struct rcu_cb_item *cb_items, *tmpcb;
  369. pthread_mutex_lock(&lock->write_lock);
  370. cb_items = lock->cb_items;
  371. lock->cb_items = NULL;
  372. pthread_mutex_unlock(&lock->write_lock);
  373. qp = update_qp(lock, &curr_id);
  374. /* retire in order */
  375. pthread_mutex_lock(&lock->prior_lock);
  376. while (lock->next_to_retire != curr_id)
  377. pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
  378. /*
  379. * wait for the reader count to reach zero
  380. * Note the use of __ATOMIC_ACQUIRE here to ensure that any
  381. * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
  382. * is visible prior to our read
  383. * however this is likely just necessary to silence a tsan warning
  384. * because the read side should not do any write operation
  385. * outside the atomic itself
  386. */
  387. do {
  388. count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
  389. } while (count != (uint64_t)0);
  390. lock->next_to_retire++;
  391. pthread_cond_broadcast(&lock->prior_signal);
  392. pthread_mutex_unlock(&lock->prior_lock);
  393. retire_qp(lock, qp);
  394. /* handle any callbacks that we have */
  395. while (cb_items != NULL) {
  396. tmpcb = cb_items;
  397. cb_items = cb_items->next;
  398. tmpcb->fn(tmpcb->data);
  399. OPENSSL_free(tmpcb);
  400. }
  401. }
  402. /*
  403. * Note: This call assumes its made under the protection of
  404. * ossl_rcu_write_lock
  405. */
  406. int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
  407. {
  408. struct rcu_cb_item *new =
  409. OPENSSL_zalloc(sizeof(*new));
  410. if (new == NULL)
  411. return 0;
  412. new->data = data;
  413. new->fn = cb;
  414. new->next = lock->cb_items;
  415. lock->cb_items = new;
  416. return 1;
  417. }
  418. void *ossl_rcu_uptr_deref(void **p)
  419. {
  420. return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
  421. }
  422. void ossl_rcu_assign_uptr(void **p, void **v)
  423. {
  424. ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
  425. }
  426. CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
  427. {
  428. struct rcu_lock_st *new;
  429. /*
  430. * We need a minimum of 2 qp's
  431. */
  432. if (num_writers < 2)
  433. num_writers = 2;
  434. ctx = ossl_lib_ctx_get_concrete(ctx);
  435. if (ctx == NULL)
  436. return 0;
  437. new = OPENSSL_zalloc(sizeof(*new));
  438. if (new == NULL)
  439. return NULL;
  440. new->ctx = ctx;
  441. pthread_mutex_init(&new->write_lock, NULL);
  442. pthread_mutex_init(&new->prior_lock, NULL);
  443. pthread_mutex_init(&new->alloc_lock, NULL);
  444. pthread_cond_init(&new->prior_signal, NULL);
  445. pthread_cond_init(&new->alloc_signal, NULL);
  446. new->qp_group = allocate_new_qp_group(new, num_writers);
  447. if (new->qp_group == NULL) {
  448. OPENSSL_free(new);
  449. new = NULL;
  450. }
  451. return new;
  452. }
  453. void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
  454. {
  455. struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
  456. if (lock == NULL)
  457. return;
  458. /* make sure we're synchronized */
  459. ossl_synchronize_rcu(rlock);
  460. OPENSSL_free(rlock->qp_group);
  461. /* There should only be a single qp left now */
  462. OPENSSL_free(rlock);
  463. }
  464. CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
  465. {
  466. # ifdef USE_RWLOCK
  467. CRYPTO_RWLOCK *lock;
  468. if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
  469. /* Don't set error, to avoid recursion blowup. */
  470. return NULL;
  471. if (pthread_rwlock_init(lock, NULL) != 0) {
  472. OPENSSL_free(lock);
  473. return NULL;
  474. }
  475. # else
  476. pthread_mutexattr_t attr;
  477. CRYPTO_RWLOCK *lock;
  478. if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
  479. /* Don't set error, to avoid recursion blowup. */
  480. return NULL;
  481. /*
  482. * We don't use recursive mutexes, but try to catch errors if we do.
  483. */
  484. pthread_mutexattr_init(&attr);
  485. # if !defined (__TANDEM) && !defined (_SPT_MODEL_)
  486. # if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
  487. pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
  488. # endif
  489. # else
  490. /* The SPT Thread Library does not define MUTEX attributes. */
  491. # endif
  492. if (pthread_mutex_init(lock, &attr) != 0) {
  493. pthread_mutexattr_destroy(&attr);
  494. OPENSSL_free(lock);
  495. return NULL;
  496. }
  497. pthread_mutexattr_destroy(&attr);
  498. # endif
  499. return lock;
  500. }
  501. __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
  502. {
  503. # ifdef USE_RWLOCK
  504. if (pthread_rwlock_rdlock(lock) != 0)
  505. return 0;
  506. # else
  507. if (pthread_mutex_lock(lock) != 0) {
  508. assert(errno != EDEADLK && errno != EBUSY);
  509. return 0;
  510. }
  511. # endif
  512. return 1;
  513. }
  514. __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
  515. {
  516. # ifdef USE_RWLOCK
  517. if (pthread_rwlock_wrlock(lock) != 0)
  518. return 0;
  519. # else
  520. if (pthread_mutex_lock(lock) != 0) {
  521. assert(errno != EDEADLK && errno != EBUSY);
  522. return 0;
  523. }
  524. # endif
  525. return 1;
  526. }
  527. int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
  528. {
  529. # ifdef USE_RWLOCK
  530. if (pthread_rwlock_unlock(lock) != 0)
  531. return 0;
  532. # else
  533. if (pthread_mutex_unlock(lock) != 0) {
  534. assert(errno != EPERM);
  535. return 0;
  536. }
  537. # endif
  538. return 1;
  539. }
  540. void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
  541. {
  542. if (lock == NULL)
  543. return;
  544. # ifdef USE_RWLOCK
  545. pthread_rwlock_destroy(lock);
  546. # else
  547. pthread_mutex_destroy(lock);
  548. # endif
  549. OPENSSL_free(lock);
  550. return;
  551. }
  552. int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
  553. {
  554. if (pthread_once(once, init) != 0)
  555. return 0;
  556. return 1;
  557. }
  558. int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
  559. {
  560. if (pthread_key_create(key, cleanup) != 0)
  561. return 0;
  562. return 1;
  563. }
  564. void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
  565. {
  566. return pthread_getspecific(*key);
  567. }
  568. int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
  569. {
  570. if (pthread_setspecific(*key, val) != 0)
  571. return 0;
  572. return 1;
  573. }
  574. int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
  575. {
  576. if (pthread_key_delete(*key) != 0)
  577. return 0;
  578. return 1;
  579. }
  580. CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
  581. {
  582. return pthread_self();
  583. }
  584. int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
  585. {
  586. return pthread_equal(a, b);
  587. }
  588. int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
  589. {
  590. # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
  591. if (__atomic_is_lock_free(sizeof(*val), val)) {
  592. *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
  593. return 1;
  594. }
  595. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  596. /* This will work for all future Solaris versions. */
  597. if (ret != NULL) {
  598. *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
  599. return 1;
  600. }
  601. # endif
  602. if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
  603. return 0;
  604. *val += amount;
  605. *ret = *val;
  606. if (!CRYPTO_THREAD_unlock(lock))
  607. return 0;
  608. return 1;
  609. }
  610. int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
  611. CRYPTO_RWLOCK *lock)
  612. {
  613. # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
  614. if (__atomic_is_lock_free(sizeof(*val), val)) {
  615. *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
  616. return 1;
  617. }
  618. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  619. /* This will work for all future Solaris versions. */
  620. if (ret != NULL) {
  621. *ret = atomic_or_64_nv(val, op);
  622. return 1;
  623. }
  624. # endif
  625. if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
  626. return 0;
  627. *val |= op;
  628. *ret = *val;
  629. if (!CRYPTO_THREAD_unlock(lock))
  630. return 0;
  631. return 1;
  632. }
  633. int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
  634. {
  635. # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
  636. if (__atomic_is_lock_free(sizeof(*val), val)) {
  637. __atomic_load(val, ret, __ATOMIC_ACQUIRE);
  638. return 1;
  639. }
  640. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  641. /* This will work for all future Solaris versions. */
  642. if (ret != NULL) {
  643. *ret = atomic_or_64_nv(val, 0);
  644. return 1;
  645. }
  646. # endif
  647. if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
  648. return 0;
  649. *ret = *val;
  650. if (!CRYPTO_THREAD_unlock(lock))
  651. return 0;
  652. return 1;
  653. }
  654. int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
  655. {
  656. # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
  657. if (__atomic_is_lock_free(sizeof(*val), val)) {
  658. __atomic_load(val, ret, __ATOMIC_ACQUIRE);
  659. return 1;
  660. }
  661. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  662. /* This will work for all future Solaris versions. */
  663. if (ret != NULL) {
  664. *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
  665. return 1;
  666. }
  667. # endif
  668. if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
  669. return 0;
  670. *ret = *val;
  671. if (!CRYPTO_THREAD_unlock(lock))
  672. return 0;
  673. return 1;
  674. }
  675. # ifndef FIPS_MODULE
  676. int openssl_init_fork_handlers(void)
  677. {
  678. return 1;
  679. }
  680. # endif /* FIPS_MODULE */
  681. int openssl_get_fork_id(void)
  682. {
  683. return getpid();
  684. }
  685. #endif