hashtable.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /*
  2. * Copyright 2024-2025 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. *
  9. *
  10. *
  11. * Notes On hash table design and layout
  12. * This hashtable uses a hopscotch algorithm to do indexing. The data structure
  13. * looks as follows:
  14. *
  15. * hash +--------------+
  16. * value+------->+ HT_VALUE |
  17. * + +--------------+
  18. * +-------+
  19. * | |
  20. * +---------------------------------------------------------+
  21. * | | | | | |
  22. * | entry | entry | entry | entry | |
  23. * | | | | | |
  24. * +---------------------------------------------------------+
  25. * | | |
  26. * | | |
  27. * +---------------------------------------------------------+
  28. * | + + +
  29. * | neighborhood[0] neighborhood[1] |
  30. * | |
  31. * | |
  32. * +---------------------------------------------------------+
  33. * |
  34. * +
  35. * neighborhoods
  36. *
  37. * On lookup/insert/delete, the items key is hashed to a 64 bit value
  38. * and the result is masked to provide an index into the neighborhoods
  39. * table. Once a neighborhood is determined, an in-order search is done
  40. * of the elements in the neighborhood indexes entries for a matching hash
  41. * value, if found, the corresponding HT_VALUE is used for the respective
  42. * operation. The number of entries in a neighborhood is determined at build
  43. * time based on the cacheline size of the target CPU. The intent is for a
  44. * neighborhood to have all entries in the neighborhood fit into a single cache
  45. * line to speed up lookups. If all entries in a neighborhood are in use at the
  46. * time of an insert, the table is expanded and rehashed.
  47. *
  48. * Lockless reads hash table is based on the same design but does not
  49. * allow growing and deletion. Thus subsequent neighborhoods are always
  50. * searched for a match until an empty entry is found.
  51. */
  52. #include <string.h>
  53. #include <internal/rcu.h>
  54. #include <internal/hashtable.h>
  55. #include <internal/hashfunc.h>
  56. #include <openssl/rand.h>
  57. /*
  58. * gcc defines __SANITIZE_THREAD__
  59. * but clang uses the feature attributes api
  60. * map the latter to the former
  61. */
  62. #if defined(__clang__) && defined(__has_feature)
  63. # if __has_feature(thread_sanitizer)
  64. # define __SANITIZE_THREADS__
  65. # endif
  66. #endif
  67. #ifdef __SANITIZE_THREADS__
  68. # include <sanitizer/tsan_interface.h>
  69. #endif
  70. #include "internal/numbers.h"
  71. /*
  72. * When we do a lookup/insert/delete, there is a high likelihood
  73. * that we will iterate over at least part of the neighborhood list
  74. * As such, because we design a neighborhood entry to fit into a single
  75. * cache line it is advantageous, when supported to fetch the entire
  76. * structure for faster lookups
  77. */
  78. #if defined(__GNUC__) || defined(__CLANG__)
  79. # define PREFETCH_NEIGHBORHOOD(x) __builtin_prefetch(x.entries)
  80. # define PREFETCH(x) __builtin_prefetch(x)
  81. #else
  82. # define PREFETCH_NEIGHBORHOOD(x)
  83. # define PREFETCH(x)
  84. #endif
  85. /*
  86. * Define our neighborhood list length
  87. * Note: It should always be a power of 2
  88. */
  89. #define DEFAULT_NEIGH_LEN_LOG 4
  90. #define DEFAULT_NEIGH_LEN (1 << DEFAULT_NEIGH_LEN_LOG)
  91. /*
  92. * For now assume cache line size is 64 bytes
  93. */
  94. #define CACHE_LINE_BYTES 64
  95. #define CACHE_LINE_ALIGNMENT CACHE_LINE_BYTES
  96. #define NEIGHBORHOOD_LEN (CACHE_LINE_BYTES / sizeof(struct ht_neighborhood_entry_st))
  97. /*
  98. * Defines our chains of values
  99. */
  100. struct ht_internal_value_st {
  101. HT_VALUE value;
  102. HT *ht;
  103. };
  104. struct ht_neighborhood_entry_st {
  105. uint64_t hash;
  106. struct ht_internal_value_st *value;
  107. };
  108. struct ht_neighborhood_st {
  109. struct ht_neighborhood_entry_st entries[NEIGHBORHOOD_LEN];
  110. };
  111. /*
  112. * Updates to data in this struct
  113. * require an rcu sync after modification
  114. * prior to free
  115. */
  116. struct ht_mutable_data_st {
  117. struct ht_neighborhood_st *neighborhoods;
  118. void *neighborhood_ptr_to_free;
  119. uint64_t neighborhood_mask;
  120. };
  121. /*
  122. * Private data may be updated on the write
  123. * side only, and so do not require rcu sync
  124. */
  125. struct ht_write_private_data_st {
  126. size_t neighborhood_len;
  127. size_t value_count;
  128. int need_sync;
  129. };
  130. struct ht_internal_st {
  131. HT_CONFIG config;
  132. CRYPTO_RCU_LOCK *lock;
  133. CRYPTO_RWLOCK *atomic_lock;
  134. struct ht_mutable_data_st *md;
  135. struct ht_write_private_data_st wpd;
  136. };
  137. static void free_value(struct ht_internal_value_st *v);
  138. static struct ht_neighborhood_st *alloc_new_neighborhood_list(size_t len,
  139. void **freeptr)
  140. {
  141. struct ht_neighborhood_st *ret;
  142. ret = OPENSSL_aligned_alloc(sizeof(struct ht_neighborhood_st) * len,
  143. CACHE_LINE_BYTES, freeptr);
  144. /* fall back to regular malloc */
  145. if (ret == NULL) {
  146. ret = *freeptr = OPENSSL_malloc(sizeof(struct ht_neighborhood_st) * len);
  147. if (ret == NULL)
  148. return NULL;
  149. }
  150. memset(ret, 0, sizeof(struct ht_neighborhood_st) * len);
  151. return ret;
  152. }
  153. static void internal_free_nop(HT_VALUE *v)
  154. {
  155. return;
  156. }
  157. HT *ossl_ht_new(const HT_CONFIG *conf)
  158. {
  159. HT *new = OPENSSL_zalloc(sizeof(*new));
  160. if (new == NULL)
  161. return NULL;
  162. new->atomic_lock = CRYPTO_THREAD_lock_new();
  163. if (new->atomic_lock == NULL)
  164. goto err;
  165. memcpy(&new->config, conf, sizeof(*conf));
  166. if (new->config.init_neighborhoods != 0) {
  167. new->wpd.neighborhood_len = new->config.init_neighborhoods;
  168. /* round up to the next power of 2 */
  169. new->wpd.neighborhood_len--;
  170. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 1;
  171. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 2;
  172. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 4;
  173. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 8;
  174. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 16;
  175. new->wpd.neighborhood_len++;
  176. } else {
  177. new->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
  178. }
  179. if (new->config.ht_free_fn == NULL)
  180. new->config.ht_free_fn = internal_free_nop;
  181. new->md = OPENSSL_zalloc(sizeof(*new->md));
  182. if (new->md == NULL)
  183. goto err;
  184. new->md->neighborhoods =
  185. alloc_new_neighborhood_list(new->wpd.neighborhood_len,
  186. &new->md->neighborhood_ptr_to_free);
  187. if (new->md->neighborhoods == NULL)
  188. goto err;
  189. new->md->neighborhood_mask = new->wpd.neighborhood_len - 1;
  190. new->lock = ossl_rcu_lock_new(1, conf->ctx);
  191. if (new->lock == NULL)
  192. goto err;
  193. if (new->config.ht_hash_fn == NULL)
  194. new->config.ht_hash_fn = ossl_fnv1a_hash;
  195. return new;
  196. err:
  197. CRYPTO_THREAD_lock_free(new->atomic_lock);
  198. ossl_rcu_lock_free(new->lock);
  199. if (new->md != NULL)
  200. OPENSSL_free(new->md->neighborhood_ptr_to_free);
  201. OPENSSL_free(new->md);
  202. OPENSSL_free(new);
  203. return NULL;
  204. }
  205. void ossl_ht_read_lock(HT *htable)
  206. {
  207. ossl_rcu_read_lock(htable->lock);
  208. }
  209. void ossl_ht_read_unlock(HT *htable)
  210. {
  211. ossl_rcu_read_unlock(htable->lock);
  212. }
  213. void ossl_ht_write_lock(HT *htable)
  214. {
  215. ossl_rcu_write_lock(htable->lock);
  216. htable->wpd.need_sync = 0;
  217. }
  218. void ossl_ht_write_unlock(HT *htable)
  219. {
  220. int need_sync = htable->wpd.need_sync;
  221. htable->wpd.need_sync = 0;
  222. ossl_rcu_write_unlock(htable->lock);
  223. if (need_sync)
  224. ossl_synchronize_rcu(htable->lock);
  225. }
  226. static void free_oldmd(void *arg)
  227. {
  228. struct ht_mutable_data_st *oldmd = arg;
  229. size_t i, j;
  230. size_t neighborhood_len = (size_t)oldmd->neighborhood_mask + 1;
  231. struct ht_internal_value_st *v;
  232. for (i = 0; i < neighborhood_len; i++) {
  233. PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[i + 1]);
  234. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  235. if (oldmd->neighborhoods[i].entries[j].value != NULL) {
  236. v = oldmd->neighborhoods[i].entries[j].value;
  237. v->ht->config.ht_free_fn((HT_VALUE *)v);
  238. free_value(v);
  239. }
  240. }
  241. }
  242. OPENSSL_free(oldmd->neighborhood_ptr_to_free);
  243. OPENSSL_free(oldmd);
  244. }
  245. static int ossl_ht_flush_internal(HT *h)
  246. {
  247. struct ht_mutable_data_st *newmd = NULL;
  248. struct ht_mutable_data_st *oldmd = NULL;
  249. newmd = OPENSSL_zalloc(sizeof(*newmd));
  250. if (newmd == NULL)
  251. return 0;
  252. newmd->neighborhoods = alloc_new_neighborhood_list(DEFAULT_NEIGH_LEN,
  253. &newmd->neighborhood_ptr_to_free);
  254. if (newmd->neighborhoods == NULL) {
  255. OPENSSL_free(newmd);
  256. return 0;
  257. }
  258. newmd->neighborhood_mask = DEFAULT_NEIGH_LEN - 1;
  259. /* Swap the old and new mutable data sets */
  260. oldmd = ossl_rcu_deref(&h->md);
  261. ossl_rcu_assign_ptr(&h->md, &newmd);
  262. /* Set the number of entries to 0 */
  263. h->wpd.value_count = 0;
  264. h->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
  265. ossl_rcu_call(h->lock, free_oldmd, oldmd);
  266. h->wpd.need_sync = 1;
  267. return 1;
  268. }
  269. int ossl_ht_flush(HT *h)
  270. {
  271. return ossl_ht_flush_internal(h);
  272. }
  273. void ossl_ht_free(HT *h)
  274. {
  275. if (h == NULL)
  276. return;
  277. ossl_ht_write_lock(h);
  278. ossl_ht_flush_internal(h);
  279. ossl_ht_write_unlock(h);
  280. /* Freeing the lock does a final sync for us */
  281. CRYPTO_THREAD_lock_free(h->atomic_lock);
  282. ossl_rcu_lock_free(h->lock);
  283. OPENSSL_free(h->md->neighborhood_ptr_to_free);
  284. OPENSSL_free(h->md);
  285. OPENSSL_free(h);
  286. return;
  287. }
  288. size_t ossl_ht_count(HT *h)
  289. {
  290. size_t count;
  291. count = h->wpd.value_count;
  292. return count;
  293. }
  294. void ossl_ht_foreach_until(HT *h, int (*cb)(HT_VALUE *obj, void *arg),
  295. void *arg)
  296. {
  297. size_t i, j;
  298. struct ht_mutable_data_st *md;
  299. md = ossl_rcu_deref(&h->md);
  300. for (i = 0; i < md->neighborhood_mask + 1; i++) {
  301. PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]);
  302. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  303. if (md->neighborhoods[i].entries[j].value != NULL) {
  304. if (!cb((HT_VALUE *)md->neighborhoods[i].entries[j].value, arg))
  305. goto out;
  306. }
  307. }
  308. }
  309. out:
  310. return;
  311. }
  312. HT_VALUE_LIST *ossl_ht_filter(HT *h, size_t max_len,
  313. int (*filter)(HT_VALUE *obj, void *arg),
  314. void *arg)
  315. {
  316. struct ht_mutable_data_st *md;
  317. HT_VALUE_LIST *list = OPENSSL_zalloc(sizeof(HT_VALUE_LIST)
  318. + (sizeof(HT_VALUE *) * max_len));
  319. size_t i, j;
  320. struct ht_internal_value_st *v;
  321. if (list == NULL)
  322. return NULL;
  323. /*
  324. * The list array lives just beyond the end of
  325. * the struct
  326. */
  327. list->list = (HT_VALUE **)(list + 1);
  328. md = ossl_rcu_deref(&h->md);
  329. for (i = 0; i < md->neighborhood_mask + 1; i++) {
  330. PREFETCH_NEIGHBORHOOD(md->neighborhoods[i+1]);
  331. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  332. v = md->neighborhoods[i].entries[j].value;
  333. if (v != NULL && filter((HT_VALUE *)v, arg)) {
  334. list->list[list->list_len++] = (HT_VALUE *)v;
  335. if (list->list_len == max_len)
  336. goto out;
  337. }
  338. }
  339. }
  340. out:
  341. return list;
  342. }
  343. void ossl_ht_value_list_free(HT_VALUE_LIST *list)
  344. {
  345. OPENSSL_free(list);
  346. }
  347. static int compare_hash(uint64_t hash1, uint64_t hash2)
  348. {
  349. return (hash1 == hash2);
  350. }
  351. static void free_old_neigh_table(void *arg)
  352. {
  353. struct ht_mutable_data_st *oldmd = arg;
  354. OPENSSL_free(oldmd->neighborhood_ptr_to_free);
  355. OPENSSL_free(oldmd);
  356. }
  357. /*
  358. * Increase hash table bucket list
  359. * must be called with write_lock held
  360. */
  361. static int grow_hashtable(HT *h, size_t oldsize)
  362. {
  363. struct ht_mutable_data_st *newmd;
  364. struct ht_mutable_data_st *oldmd = ossl_rcu_deref(&h->md);
  365. int rc = 0;
  366. uint64_t oldi, oldj, newi, newj;
  367. uint64_t oldhash;
  368. struct ht_internal_value_st *oldv;
  369. int rehashed;
  370. size_t newsize = oldsize * 2;
  371. if (h->config.lockless_reads)
  372. goto out;
  373. if ((newmd = OPENSSL_zalloc(sizeof(*newmd))) == NULL)
  374. goto out;
  375. /* bucket list is always a power of 2 */
  376. newmd->neighborhoods = alloc_new_neighborhood_list(oldsize * 2,
  377. &newmd->neighborhood_ptr_to_free);
  378. if (newmd->neighborhoods == NULL)
  379. goto out_free;
  380. /* being a power of 2 makes for easy mask computation */
  381. newmd->neighborhood_mask = (newsize - 1);
  382. /*
  383. * Now we need to start rehashing entries
  384. * Note we don't need to use atomics here as the new
  385. * mutable data hasn't been published
  386. */
  387. for (oldi = 0; oldi < h->wpd.neighborhood_len; oldi++) {
  388. PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[oldi + 1]);
  389. for (oldj = 0; oldj < NEIGHBORHOOD_LEN; oldj++) {
  390. oldv = oldmd->neighborhoods[oldi].entries[oldj].value;
  391. if (oldv == NULL)
  392. continue;
  393. oldhash = oldmd->neighborhoods[oldi].entries[oldj].hash;
  394. newi = oldhash & newmd->neighborhood_mask;
  395. rehashed = 0;
  396. for (newj = 0; newj < NEIGHBORHOOD_LEN; newj++) {
  397. if (newmd->neighborhoods[newi].entries[newj].value == NULL) {
  398. newmd->neighborhoods[newi].entries[newj].value = oldv;
  399. newmd->neighborhoods[newi].entries[newj].hash = oldhash;
  400. rehashed = 1;
  401. break;
  402. }
  403. }
  404. if (rehashed == 0) {
  405. /* we ran out of space in a neighborhood, grow again */
  406. OPENSSL_free(newmd->neighborhoods);
  407. OPENSSL_free(newmd);
  408. return grow_hashtable(h, newsize);
  409. }
  410. }
  411. }
  412. /*
  413. * Now that our entries are all hashed into the new bucket list
  414. * update our bucket_len and target_max_load
  415. */
  416. h->wpd.neighborhood_len = newsize;
  417. /*
  418. * Now we replace the old mutable data with the new
  419. */
  420. ossl_rcu_assign_ptr(&h->md, &newmd);
  421. ossl_rcu_call(h->lock, free_old_neigh_table, oldmd);
  422. h->wpd.need_sync = 1;
  423. /*
  424. * And we're done
  425. */
  426. rc = 1;
  427. out:
  428. return rc;
  429. out_free:
  430. OPENSSL_free(newmd->neighborhoods);
  431. OPENSSL_free(newmd);
  432. goto out;
  433. }
  434. static void free_old_ht_value(void *arg)
  435. {
  436. HT_VALUE *h = (HT_VALUE *)arg;
  437. /*
  438. * Note, this is only called on replacement,
  439. * the caller is responsible for freeing the
  440. * held data, we just need to free the wrapping
  441. * struct here
  442. */
  443. OPENSSL_free(h);
  444. }
  445. static ossl_inline int match_key(HT_KEY *a, HT_KEY *b)
  446. {
  447. /*
  448. * keys match if they are both present, the same size
  449. * and compare equal in memory
  450. */
  451. PREFETCH(a->keybuf);
  452. PREFETCH(b->keybuf);
  453. if (a->keybuf != NULL && b->keybuf != NULL && a->keysize == b->keysize)
  454. return !memcmp(a->keybuf, b->keybuf, a->keysize);
  455. return 1;
  456. }
  457. static int ossl_ht_insert_locked(HT *h, uint64_t hash,
  458. struct ht_internal_value_st *newval,
  459. HT_VALUE **olddata)
  460. {
  461. struct ht_mutable_data_st *md = h->md;
  462. uint64_t neigh_idx_start = hash & md->neighborhood_mask;
  463. uint64_t neigh_idx = neigh_idx_start;
  464. size_t j;
  465. uint64_t ihash;
  466. HT_VALUE *ival;
  467. size_t empty_idx = SIZE_MAX;
  468. int lockless_reads = h->config.lockless_reads;
  469. do {
  470. PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
  471. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  472. ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
  473. if (ival == NULL) {
  474. empty_idx = j;
  475. /* lockless_reads implies no deletion, we can break out */
  476. if (lockless_reads)
  477. goto not_found;
  478. continue;
  479. }
  480. if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
  481. &ihash, h->atomic_lock))
  482. return 0;
  483. if (compare_hash(hash, ihash) && match_key(&newval->value.key,
  484. &ival->key)) {
  485. if (olddata == NULL) {
  486. /* This would insert a duplicate -> fail */
  487. return 0;
  488. }
  489. /* Do a replacement */
  490. if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[j].hash,
  491. hash, h->atomic_lock))
  492. return 0;
  493. *olddata = (HT_VALUE *)md->neighborhoods[neigh_idx].entries[j].value;
  494. ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[j].value,
  495. &newval);
  496. ossl_rcu_call(h->lock, free_old_ht_value, *olddata);
  497. h->wpd.need_sync = 1;
  498. return 1;
  499. }
  500. }
  501. if (!lockless_reads)
  502. break;
  503. /* Continue search in subsequent neighborhoods */
  504. neigh_idx = (neigh_idx + 1) & md->neighborhood_mask;
  505. } while (neigh_idx != neigh_idx_start);
  506. not_found:
  507. /* If we get to here, its just an insert */
  508. if (empty_idx == SIZE_MAX)
  509. return -1; /* out of space */
  510. if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[empty_idx].hash,
  511. hash, h->atomic_lock))
  512. return 0;
  513. h->wpd.value_count++;
  514. ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[empty_idx].value,
  515. &newval);
  516. return 1;
  517. }
  518. static struct ht_internal_value_st *alloc_new_value(HT *h, HT_KEY *key,
  519. void *data,
  520. uintptr_t *type)
  521. {
  522. struct ht_internal_value_st *tmp;
  523. size_t nvsize = sizeof(*tmp);
  524. if (h->config.collision_check == 1)
  525. nvsize += key->keysize;
  526. tmp = OPENSSL_malloc(nvsize);
  527. if (tmp == NULL)
  528. return NULL;
  529. tmp->ht = h;
  530. tmp->value.value = data;
  531. tmp->value.type_id = type;
  532. tmp->value.key.keybuf = NULL;
  533. if (h->config.collision_check) {
  534. tmp->value.key.keybuf = (uint8_t *)(tmp + 1);
  535. tmp->value.key.keysize = key->keysize;
  536. memcpy(tmp->value.key.keybuf, key->keybuf, key->keysize);
  537. }
  538. return tmp;
  539. }
  540. static void free_value(struct ht_internal_value_st *v)
  541. {
  542. OPENSSL_free(v);
  543. }
  544. int ossl_ht_insert(HT *h, HT_KEY *key, HT_VALUE *data, HT_VALUE **olddata)
  545. {
  546. struct ht_internal_value_st *newval = NULL;
  547. uint64_t hash;
  548. int rc = 0;
  549. int i;
  550. if (data->value == NULL)
  551. goto out;
  552. newval = alloc_new_value(h, key, data->value, data->type_id);
  553. if (newval == NULL)
  554. goto out;
  555. /*
  556. * we have to take our lock here to prevent other changes
  557. * to the bucket list
  558. */
  559. hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
  560. for (i = 0;
  561. (rc = ossl_ht_insert_locked(h, hash, newval, olddata)) == -1
  562. && i < 4;
  563. ++i)
  564. if (!grow_hashtable(h, h->wpd.neighborhood_len)) {
  565. rc = -1;
  566. break;
  567. }
  568. if (rc <= 0)
  569. free_value(newval);
  570. out:
  571. return rc;
  572. }
  573. HT_VALUE *ossl_ht_get(HT *h, HT_KEY *key)
  574. {
  575. struct ht_mutable_data_st *md;
  576. uint64_t hash;
  577. uint64_t neigh_idx_start;
  578. uint64_t neigh_idx;
  579. struct ht_internal_value_st *ival = NULL;
  580. size_t j;
  581. uint64_t ehash;
  582. int lockless_reads = h->config.lockless_reads;
  583. hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
  584. md = ossl_rcu_deref(&h->md);
  585. neigh_idx = neigh_idx_start = hash & md->neighborhood_mask;
  586. do {
  587. PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
  588. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  589. ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
  590. if (ival == NULL) {
  591. if (lockless_reads)
  592. /* lockless_reads implies no deletion, we can break out */
  593. return NULL;
  594. continue;
  595. }
  596. if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
  597. &ehash, h->atomic_lock))
  598. return NULL;
  599. if (compare_hash(hash, ehash) && match_key(&ival->value.key, key))
  600. return (HT_VALUE *)ival;
  601. }
  602. if (!lockless_reads)
  603. break;
  604. /* Continue search in subsequent neighborhoods */
  605. neigh_idx = (neigh_idx + 1) & md->neighborhood_mask;
  606. } while (neigh_idx != neigh_idx_start);
  607. return NULL;
  608. }
  609. static void free_old_entry(void *arg)
  610. {
  611. struct ht_internal_value_st *v = arg;
  612. v->ht->config.ht_free_fn((HT_VALUE *)v);
  613. free_value(v);
  614. }
  615. int ossl_ht_delete(HT *h, HT_KEY *key)
  616. {
  617. uint64_t hash;
  618. uint64_t neigh_idx;
  619. size_t j;
  620. struct ht_internal_value_st *v = NULL;
  621. HT_VALUE *nv = NULL;
  622. int rc = 0;
  623. if (h->config.lockless_reads)
  624. return 0;
  625. hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
  626. neigh_idx = hash & h->md->neighborhood_mask;
  627. PREFETCH_NEIGHBORHOOD(h->md->neighborhoods[neigh_idx]);
  628. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  629. v = (struct ht_internal_value_st *)h->md->neighborhoods[neigh_idx].entries[j].value;
  630. if (v == NULL)
  631. continue;
  632. if (compare_hash(hash, h->md->neighborhoods[neigh_idx].entries[j].hash)
  633. && match_key(key, &v->value.key)) {
  634. if (!CRYPTO_atomic_store(&h->md->neighborhoods[neigh_idx].entries[j].hash,
  635. 0, h->atomic_lock))
  636. break;
  637. h->wpd.value_count--;
  638. ossl_rcu_assign_ptr(&h->md->neighborhoods[neigh_idx].entries[j].value,
  639. &nv);
  640. rc = 1;
  641. break;
  642. }
  643. }
  644. if (rc == 1) {
  645. ossl_rcu_call(h->lock, free_old_entry, v);
  646. h->wpd.need_sync = 1;
  647. }
  648. return rc;
  649. }