threads_pthread.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. /*
  2. * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. */
  9. /* We need to use the OPENSSL_fork_*() deprecated APIs */
  10. #define OPENSSL_SUPPRESS_DEPRECATED
  11. #include <openssl/crypto.h>
  12. #include <crypto/cryptlib.h>
  13. #include "internal/cryptlib.h"
  14. #include "internal/rcu.h"
  15. #include "rcu_internal.h"
  16. #if defined(__sun)
  17. # include <atomic.h>
  18. #endif
  19. #if defined(__apple_build_version__) && __apple_build_version__ < 6000000
  20. /*
  21. * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
  22. * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
  23. * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
  24. * All of this makes impossible to use __atomic_is_lock_free here.
  25. *
  26. * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
  27. */
  28. # define BROKEN_CLANG_ATOMICS
  29. #endif
  30. #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
  31. # if defined(OPENSSL_SYS_UNIX)
  32. # include <sys/types.h>
  33. # include <unistd.h>
  34. # endif
  35. # include <assert.h>
  36. # ifdef PTHREAD_RWLOCK_INITIALIZER
  37. # define USE_RWLOCK
  38. # endif
  39. /*
  40. * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
  41. * other compilers.
  42. * Unfortunately, we can't do that with some "generic type", because there's no
  43. * guarantee that the chosen generic type is large enough to cover all cases.
  44. * Therefore, we implement fallbacks for each applicable type, with composed
  45. * names that include the type they handle.
  46. *
  47. * (an anecdote: we previously tried to use |void *| as the generic type, with
  48. * the thought that the pointer itself is the largest type. However, this is
  49. * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
  50. *
  51. * All applicable ATOMIC_ macros take the intended type as first parameter, so
  52. * they can map to the correct fallback function. In the GNU/clang case, that
  53. * parameter is simply ignored.
  54. */
  55. /*
  56. * Internal types used with the ATOMIC_ macros, to make it possible to compose
  57. * fallback function names.
  58. */
  59. typedef void *pvoid;
  60. # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
  61. && !defined(USE_ATOMIC_FALLBACKS)
  62. # define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
  63. # define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
  64. # define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
  65. # define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
  66. # define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
  67. # else
  68. static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
  69. # define IMPL_fallback_atomic_load_n(t) \
  70. static ossl_inline t fallback_atomic_load_n_##t(t *p) \
  71. { \
  72. t ret; \
  73. \
  74. pthread_mutex_lock(&atomic_sim_lock); \
  75. ret = *p; \
  76. pthread_mutex_unlock(&atomic_sim_lock); \
  77. return ret; \
  78. }
  79. IMPL_fallback_atomic_load_n(uint32_t)
  80. IMPL_fallback_atomic_load_n(uint64_t)
  81. IMPL_fallback_atomic_load_n(pvoid)
  82. # define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
  83. # define IMPL_fallback_atomic_store_n(t) \
  84. static ossl_inline t fallback_atomic_store_n_##t(t *p, t v) \
  85. { \
  86. t ret; \
  87. \
  88. pthread_mutex_lock(&atomic_sim_lock); \
  89. ret = *p; \
  90. *p = v; \
  91. pthread_mutex_unlock(&atomic_sim_lock); \
  92. return ret; \
  93. }
  94. IMPL_fallback_atomic_store_n(uint32_t)
  95. # define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
  96. # define IMPL_fallback_atomic_store(t) \
  97. static ossl_inline void fallback_atomic_store_##t(t *p, t *v) \
  98. { \
  99. pthread_mutex_lock(&atomic_sim_lock); \
  100. *p = *v; \
  101. pthread_mutex_unlock(&atomic_sim_lock); \
  102. }
  103. IMPL_fallback_atomic_store(pvoid)
  104. # define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
  105. /*
  106. * The fallbacks that follow don't need any per type implementation, as
  107. * they are designed for uint64_t only. If there comes a time when multiple
  108. * types need to be covered, it's relatively easy to refactor them the same
  109. * way as the fallbacks above.
  110. */
  111. static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
  112. {
  113. uint64_t ret;
  114. pthread_mutex_lock(&atomic_sim_lock);
  115. *p += v;
  116. ret = *p;
  117. pthread_mutex_unlock(&atomic_sim_lock);
  118. return ret;
  119. }
  120. # define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
  121. static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
  122. {
  123. uint64_t ret;
  124. pthread_mutex_lock(&atomic_sim_lock);
  125. *p -= v;
  126. ret = *p;
  127. pthread_mutex_unlock(&atomic_sim_lock);
  128. return ret;
  129. }
  130. # define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
  131. # endif
  132. /*
  133. * This is the core of an rcu lock. It tracks the readers and writers for the
  134. * current quiescence point for a given lock. Users is the 64 bit value that
  135. * stores the READERS/ID as defined above
  136. *
  137. */
  138. struct rcu_qp {
  139. uint64_t users;
  140. };
  141. struct thread_qp {
  142. struct rcu_qp *qp;
  143. unsigned int depth;
  144. CRYPTO_RCU_LOCK *lock;
  145. };
  146. # define MAX_QPS 10
  147. /*
  148. * This is the per thread tracking data
  149. * that is assigned to each thread participating
  150. * in an rcu qp
  151. *
  152. * qp points to the qp that it last acquired
  153. *
  154. */
  155. struct rcu_thr_data {
  156. struct thread_qp thread_qps[MAX_QPS];
  157. };
  158. /*
  159. * This is the internal version of a CRYPTO_RCU_LOCK
  160. * it is cast from CRYPTO_RCU_LOCK
  161. */
  162. struct rcu_lock_st {
  163. /* Callbacks to call for next ossl_synchronize_rcu */
  164. struct rcu_cb_item *cb_items;
  165. /* The context we are being created against */
  166. OSSL_LIB_CTX *ctx;
  167. /* Array of quiescent points for synchronization */
  168. struct rcu_qp *qp_group;
  169. /* rcu generation counter for in-order retirement */
  170. uint32_t id_ctr;
  171. /* Number of elements in qp_group array */
  172. uint32_t group_count;
  173. /* Index of the current qp in the qp_group array */
  174. uint32_t reader_idx;
  175. /* value of the next id_ctr value to be retired */
  176. uint32_t next_to_retire;
  177. /* index of the next free rcu_qp in the qp_group */
  178. uint32_t current_alloc_idx;
  179. /* number of qp's in qp_group array currently being retired */
  180. uint32_t writers_alloced;
  181. /* lock protecting write side operations */
  182. pthread_mutex_t write_lock;
  183. /* lock protecting updates to writers_alloced/current_alloc_idx */
  184. pthread_mutex_t alloc_lock;
  185. /* signal to wake threads waiting on alloc_lock */
  186. pthread_cond_t alloc_signal;
  187. /* lock to enforce in-order retirement */
  188. pthread_mutex_t prior_lock;
  189. /* signal to wake threads waiting on prior_lock */
  190. pthread_cond_t prior_signal;
  191. };
  192. /* Read side acquisition of the current qp */
  193. static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
  194. {
  195. uint32_t qp_idx;
  196. /* get the current qp index */
  197. for (;;) {
  198. qp_idx = ATOMIC_LOAD_N(uint32_t, &lock->reader_idx, __ATOMIC_RELAXED);
  199. /*
  200. * Notes on use of __ATOMIC_ACQUIRE
  201. * We need to ensure the following:
  202. * 1) That subsequent operations aren't optimized by hoisting them above
  203. * this operation. Specifically, we don't want the below re-load of
  204. * qp_idx to get optimized away
  205. * 2) We want to ensure that any updating of reader_idx on the write side
  206. * of the lock is flushed from a local cpu cache so that we see any
  207. * updates prior to the load. This is a non-issue on cache coherent
  208. * systems like x86, but is relevant on other arches
  209. */
  210. ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
  211. __ATOMIC_ACQUIRE);
  212. /* if the idx hasn't changed, we're good, else try again */
  213. if (qp_idx == ATOMIC_LOAD_N(uint32_t, &lock->reader_idx,
  214. __ATOMIC_RELAXED))
  215. break;
  216. ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, (uint64_t)1,
  217. __ATOMIC_RELAXED);
  218. }
  219. return &lock->qp_group[qp_idx];
  220. }
  221. static void ossl_rcu_free_local_data(void *arg)
  222. {
  223. OSSL_LIB_CTX *ctx = arg;
  224. CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx);
  225. struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
  226. OPENSSL_free(data);
  227. CRYPTO_THREAD_set_local(lkey, NULL);
  228. }
  229. void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
  230. {
  231. struct rcu_thr_data *data;
  232. int i, available_qp = -1;
  233. CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
  234. /*
  235. * we're going to access current_qp here so ask the
  236. * processor to fetch it
  237. */
  238. data = CRYPTO_THREAD_get_local(lkey);
  239. if (data == NULL) {
  240. data = OPENSSL_zalloc(sizeof(*data));
  241. OPENSSL_assert(data != NULL);
  242. CRYPTO_THREAD_set_local(lkey, data);
  243. ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data);
  244. }
  245. for (i = 0; i < MAX_QPS; i++) {
  246. if (data->thread_qps[i].qp == NULL && available_qp == -1)
  247. available_qp = i;
  248. /* If we have a hold on this lock already, we're good */
  249. if (data->thread_qps[i].lock == lock) {
  250. data->thread_qps[i].depth++;
  251. return;
  252. }
  253. }
  254. /*
  255. * if we get here, then we don't have a hold on this lock yet
  256. */
  257. assert(available_qp != -1);
  258. data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
  259. data->thread_qps[available_qp].depth = 1;
  260. data->thread_qps[available_qp].lock = lock;
  261. }
  262. void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
  263. {
  264. int i;
  265. CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx);
  266. struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey);
  267. uint64_t ret;
  268. assert(data != NULL);
  269. for (i = 0; i < MAX_QPS; i++) {
  270. if (data->thread_qps[i].lock == lock) {
  271. /*
  272. * we have to use __ATOMIC_RELEASE here
  273. * to ensure that all preceding read instructions complete
  274. * before the decrement is visible to ossl_synchronize_rcu
  275. */
  276. data->thread_qps[i].depth--;
  277. if (data->thread_qps[i].depth == 0) {
  278. ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users,
  279. (uint64_t)1, __ATOMIC_RELEASE);
  280. OPENSSL_assert(ret != UINT64_MAX);
  281. data->thread_qps[i].qp = NULL;
  282. data->thread_qps[i].lock = NULL;
  283. }
  284. return;
  285. }
  286. }
  287. /*
  288. * If we get here, we're trying to unlock a lock that we never acquired -
  289. * that's fatal.
  290. */
  291. assert(0);
  292. }
  293. /*
  294. * Write side allocation routine to get the current qp
  295. * and replace it with a new one
  296. */
  297. static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock, uint32_t *curr_id)
  298. {
  299. uint32_t current_idx;
  300. pthread_mutex_lock(&lock->alloc_lock);
  301. /*
  302. * we need at least one qp to be available with one
  303. * left over, so that readers can start working on
  304. * one that isn't yet being waited on
  305. */
  306. while (lock->group_count - lock->writers_alloced < 2)
  307. /* we have to wait for one to be free */
  308. pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
  309. current_idx = lock->current_alloc_idx;
  310. /* Allocate the qp */
  311. lock->writers_alloced++;
  312. /* increment the allocation index */
  313. lock->current_alloc_idx =
  314. (lock->current_alloc_idx + 1) % lock->group_count;
  315. *curr_id = lock->id_ctr;
  316. lock->id_ctr++;
  317. ATOMIC_STORE_N(uint32_t, &lock->reader_idx, lock->current_alloc_idx,
  318. __ATOMIC_RELAXED);
  319. /*
  320. * this should make sure that the new value of reader_idx is visible in
  321. * get_hold_current_qp, directly after incrementing the users count
  322. */
  323. ATOMIC_ADD_FETCH(&lock->qp_group[current_idx].users, (uint64_t)0,
  324. __ATOMIC_RELEASE);
  325. /* wake up any waiters */
  326. pthread_cond_signal(&lock->alloc_signal);
  327. pthread_mutex_unlock(&lock->alloc_lock);
  328. return &lock->qp_group[current_idx];
  329. }
  330. static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
  331. {
  332. pthread_mutex_lock(&lock->alloc_lock);
  333. lock->writers_alloced--;
  334. pthread_cond_signal(&lock->alloc_signal);
  335. pthread_mutex_unlock(&lock->alloc_lock);
  336. }
  337. static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
  338. uint32_t count)
  339. {
  340. struct rcu_qp *new =
  341. OPENSSL_zalloc(sizeof(*new) * count);
  342. lock->group_count = count;
  343. return new;
  344. }
  345. void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
  346. {
  347. pthread_mutex_lock(&lock->write_lock);
  348. }
  349. void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
  350. {
  351. pthread_mutex_unlock(&lock->write_lock);
  352. }
  353. void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
  354. {
  355. struct rcu_qp *qp;
  356. uint64_t count;
  357. uint32_t curr_id;
  358. struct rcu_cb_item *cb_items, *tmpcb;
  359. pthread_mutex_lock(&lock->write_lock);
  360. cb_items = lock->cb_items;
  361. lock->cb_items = NULL;
  362. pthread_mutex_unlock(&lock->write_lock);
  363. qp = update_qp(lock, &curr_id);
  364. /* retire in order */
  365. pthread_mutex_lock(&lock->prior_lock);
  366. while (lock->next_to_retire != curr_id)
  367. pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
  368. /*
  369. * wait for the reader count to reach zero
  370. * Note the use of __ATOMIC_ACQUIRE here to ensure that any
  371. * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
  372. * is visible prior to our read
  373. * however this is likely just necessary to silence a tsan warning
  374. * because the read side should not do any write operation
  375. * outside the atomic itself
  376. */
  377. do {
  378. count = ATOMIC_LOAD_N(uint64_t, &qp->users, __ATOMIC_ACQUIRE);
  379. } while (count != (uint64_t)0);
  380. lock->next_to_retire++;
  381. pthread_cond_broadcast(&lock->prior_signal);
  382. pthread_mutex_unlock(&lock->prior_lock);
  383. retire_qp(lock, qp);
  384. /* handle any callbacks that we have */
  385. while (cb_items != NULL) {
  386. tmpcb = cb_items;
  387. cb_items = cb_items->next;
  388. tmpcb->fn(tmpcb->data);
  389. OPENSSL_free(tmpcb);
  390. }
  391. }
  392. /*
  393. * Note: This call assumes its made under the protection of
  394. * ossl_rcu_write_lock
  395. */
  396. int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
  397. {
  398. struct rcu_cb_item *new =
  399. OPENSSL_zalloc(sizeof(*new));
  400. if (new == NULL)
  401. return 0;
  402. new->data = data;
  403. new->fn = cb;
  404. new->next = lock->cb_items;
  405. lock->cb_items = new;
  406. return 1;
  407. }
  408. void *ossl_rcu_uptr_deref(void **p)
  409. {
  410. return ATOMIC_LOAD_N(pvoid, p, __ATOMIC_ACQUIRE);
  411. }
  412. void ossl_rcu_assign_uptr(void **p, void **v)
  413. {
  414. ATOMIC_STORE(pvoid, p, v, __ATOMIC_RELEASE);
  415. }
  416. CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx)
  417. {
  418. struct rcu_lock_st *new;
  419. /*
  420. * We need a minimum of 2 qp's
  421. */
  422. if (num_writers < 2)
  423. num_writers = 2;
  424. ctx = ossl_lib_ctx_get_concrete(ctx);
  425. if (ctx == NULL)
  426. return 0;
  427. new = OPENSSL_zalloc(sizeof(*new));
  428. if (new == NULL)
  429. return NULL;
  430. new->ctx = ctx;
  431. pthread_mutex_init(&new->write_lock, NULL);
  432. pthread_mutex_init(&new->prior_lock, NULL);
  433. pthread_mutex_init(&new->alloc_lock, NULL);
  434. pthread_cond_init(&new->prior_signal, NULL);
  435. pthread_cond_init(&new->alloc_signal, NULL);
  436. new->qp_group = allocate_new_qp_group(new, num_writers);
  437. if (new->qp_group == NULL) {
  438. OPENSSL_free(new);
  439. new = NULL;
  440. }
  441. return new;
  442. }
  443. void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
  444. {
  445. struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
  446. if (lock == NULL)
  447. return;
  448. /* make sure we're synchronized */
  449. ossl_synchronize_rcu(rlock);
  450. OPENSSL_free(rlock->qp_group);
  451. /* There should only be a single qp left now */
  452. OPENSSL_free(rlock);
  453. }
  454. CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
  455. {
  456. # ifdef USE_RWLOCK
  457. CRYPTO_RWLOCK *lock;
  458. if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
  459. /* Don't set error, to avoid recursion blowup. */
  460. return NULL;
  461. if (pthread_rwlock_init(lock, NULL) != 0) {
  462. OPENSSL_free(lock);
  463. return NULL;
  464. }
  465. # else
  466. pthread_mutexattr_t attr;
  467. CRYPTO_RWLOCK *lock;
  468. if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
  469. /* Don't set error, to avoid recursion blowup. */
  470. return NULL;
  471. /*
  472. * We don't use recursive mutexes, but try to catch errors if we do.
  473. */
  474. pthread_mutexattr_init(&attr);
  475. # if !defined (__TANDEM) && !defined (_SPT_MODEL_)
  476. # if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
  477. pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
  478. # endif
  479. # else
  480. /* The SPT Thread Library does not define MUTEX attributes. */
  481. # endif
  482. if (pthread_mutex_init(lock, &attr) != 0) {
  483. pthread_mutexattr_destroy(&attr);
  484. OPENSSL_free(lock);
  485. return NULL;
  486. }
  487. pthread_mutexattr_destroy(&attr);
  488. # endif
  489. return lock;
  490. }
  491. __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
  492. {
  493. # ifdef USE_RWLOCK
  494. if (pthread_rwlock_rdlock(lock) != 0)
  495. return 0;
  496. # else
  497. if (pthread_mutex_lock(lock) != 0) {
  498. assert(errno != EDEADLK && errno != EBUSY);
  499. return 0;
  500. }
  501. # endif
  502. return 1;
  503. }
  504. __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
  505. {
  506. # ifdef USE_RWLOCK
  507. if (pthread_rwlock_wrlock(lock) != 0)
  508. return 0;
  509. # else
  510. if (pthread_mutex_lock(lock) != 0) {
  511. assert(errno != EDEADLK && errno != EBUSY);
  512. return 0;
  513. }
  514. # endif
  515. return 1;
  516. }
  517. int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
  518. {
  519. # ifdef USE_RWLOCK
  520. if (pthread_rwlock_unlock(lock) != 0)
  521. return 0;
  522. # else
  523. if (pthread_mutex_unlock(lock) != 0) {
  524. assert(errno != EPERM);
  525. return 0;
  526. }
  527. # endif
  528. return 1;
  529. }
  530. void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
  531. {
  532. if (lock == NULL)
  533. return;
  534. # ifdef USE_RWLOCK
  535. pthread_rwlock_destroy(lock);
  536. # else
  537. pthread_mutex_destroy(lock);
  538. # endif
  539. OPENSSL_free(lock);
  540. return;
  541. }
  542. int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
  543. {
  544. if (pthread_once(once, init) != 0)
  545. return 0;
  546. return 1;
  547. }
  548. int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
  549. {
  550. if (pthread_key_create(key, cleanup) != 0)
  551. return 0;
  552. return 1;
  553. }
  554. void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
  555. {
  556. return pthread_getspecific(*key);
  557. }
  558. int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
  559. {
  560. if (pthread_setspecific(*key, val) != 0)
  561. return 0;
  562. return 1;
  563. }
  564. int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
  565. {
  566. if (pthread_key_delete(*key) != 0)
  567. return 0;
  568. return 1;
  569. }
  570. CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
  571. {
  572. return pthread_self();
  573. }
  574. int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
  575. {
  576. return pthread_equal(a, b);
  577. }
  578. int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
  579. {
  580. # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
  581. if (__atomic_is_lock_free(sizeof(*val), val)) {
  582. *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
  583. return 1;
  584. }
  585. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  586. /* This will work for all future Solaris versions. */
  587. if (ret != NULL) {
  588. *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
  589. return 1;
  590. }
  591. # endif
  592. if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
  593. return 0;
  594. *val += amount;
  595. *ret = *val;
  596. if (!CRYPTO_THREAD_unlock(lock))
  597. return 0;
  598. return 1;
  599. }
  600. int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
  601. CRYPTO_RWLOCK *lock)
  602. {
  603. # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
  604. if (__atomic_is_lock_free(sizeof(*val), val)) {
  605. *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
  606. return 1;
  607. }
  608. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  609. /* This will work for all future Solaris versions. */
  610. if (ret != NULL) {
  611. *ret = atomic_or_64_nv(val, op);
  612. return 1;
  613. }
  614. # endif
  615. if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
  616. return 0;
  617. *val |= op;
  618. *ret = *val;
  619. if (!CRYPTO_THREAD_unlock(lock))
  620. return 0;
  621. return 1;
  622. }
  623. int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
  624. {
  625. # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
  626. if (__atomic_is_lock_free(sizeof(*val), val)) {
  627. __atomic_load(val, ret, __ATOMIC_ACQUIRE);
  628. return 1;
  629. }
  630. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  631. /* This will work for all future Solaris versions. */
  632. if (ret != NULL) {
  633. *ret = atomic_or_64_nv(val, 0);
  634. return 1;
  635. }
  636. # endif
  637. if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
  638. return 0;
  639. *ret = *val;
  640. if (!CRYPTO_THREAD_unlock(lock))
  641. return 0;
  642. return 1;
  643. }
  644. int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
  645. {
  646. # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
  647. if (__atomic_is_lock_free(sizeof(*val), val)) {
  648. __atomic_load(val, ret, __ATOMIC_ACQUIRE);
  649. return 1;
  650. }
  651. # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
  652. /* This will work for all future Solaris versions. */
  653. if (ret != NULL) {
  654. *ret = (int)atomic_or_uint_nv((unsigned int *)val, 0);
  655. return 1;
  656. }
  657. # endif
  658. if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
  659. return 0;
  660. *ret = *val;
  661. if (!CRYPTO_THREAD_unlock(lock))
  662. return 0;
  663. return 1;
  664. }
  665. # ifndef FIPS_MODULE
  666. int openssl_init_fork_handlers(void)
  667. {
  668. return 1;
  669. }
  670. # endif /* FIPS_MODULE */
  671. int openssl_get_fork_id(void)
  672. {
  673. return getpid();
  674. }
  675. #endif