hashtable.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748
  1. /*
  2. * Copyright 2024-2026 The OpenSSL Project Authors. All Rights Reserved.
  3. *
  4. * Licensed under the Apache License 2.0 (the "License"). You may not use
  5. * this file except in compliance with the License. You can obtain a copy
  6. * in the file LICENSE in the source distribution or at
  7. * https://www.openssl.org/source/license.html
  8. *
  9. *
  10. *
  11. * Notes On hash table design and layout
  12. * This hashtable uses a hopscotch algorithm to do indexing. The data structure
  13. * looks as follows:
  14. *
  15. * hash +--------------+
  16. * value+------->+ HT_VALUE |
  17. * + +--------------+
  18. * +-------+
  19. * | |
  20. * +---------------------------------------------------------+
  21. * | | | | | |
  22. * | entry | entry | entry | entry | |
  23. * | | | | | |
  24. * +---------------------------------------------------------+
  25. * | | |
  26. * | | |
  27. * +---------------------------------------------------------+
  28. * | + + +
  29. * | neighborhood[0] neighborhood[1] |
  30. * | |
  31. * | |
  32. * +---------------------------------------------------------+
  33. * |
  34. * +
  35. * neighborhoods
  36. *
  37. * On lookup/insert/delete, the items key is hashed to a 64 bit value
  38. * and the result is masked to provide an index into the neighborhoods
  39. * table. Once a neighborhood is determined, an in-order search is done
  40. * of the elements in the neighborhood indexes entries for a matching hash
  41. * value, if found, the corresponding HT_VALUE is used for the respective
  42. * operation. The number of entries in a neighborhood is determined at build
  43. * time based on the cacheline size of the target CPU. The intent is for a
  44. * neighborhood to have all entries in the neighborhood fit into a single cache
  45. * line to speed up lookups. If all entries in a neighborhood are in use at the
  46. * time of an insert, the table is expanded and rehashed.
  47. *
  48. * Lockless reads hash table is based on the same design but does not
  49. * allow growing and deletion. Thus subsequent neighborhoods are always
  50. * searched for a match until an empty entry is found.
  51. */
  52. #include <string.h>
  53. #include <internal/rcu.h>
  54. #include <internal/hashtable.h>
  55. #include <internal/hashfunc.h>
  56. #include <openssl/rand.h>
  57. /*
  58. * gcc defines __SANITIZE_THREAD__
  59. * but clang uses the feature attributes api
  60. * map the latter to the former
  61. */
  62. #if defined(__clang__) && defined(__has_feature)
  63. #if __has_feature(thread_sanitizer)
  64. #define __SANITIZE_THREADS__
  65. #endif
  66. #endif
  67. #ifdef __SANITIZE_THREADS__
  68. #include <sanitizer/tsan_interface.h>
  69. #endif
  70. #include "internal/numbers.h"
  71. /*
  72. * When we do a lookup/insert/delete, there is a high likelihood
  73. * that we will iterate over at least part of the neighborhood list
  74. * As such, because we design a neighborhood entry to fit into a single
  75. * cache line it is advantageous, when supported to fetch the entire
  76. * structure for faster lookups
  77. */
  78. #if defined(__GNUC__) || defined(__CLANG__)
  79. #define PREFETCH_NEIGHBORHOOD(x) __builtin_prefetch(x.entries)
  80. #define PREFETCH(x) __builtin_prefetch(x)
  81. #define ALIGN __attribute__((aligned(8)))
  82. #else
  83. #define PREFETCH_NEIGHBORHOOD(x)
  84. #define PREFETCH(x)
  85. #define ALIGN
  86. #endif
  87. /*
  88. * Define our neighborhood list length
  89. * Note: It should always be a power of 2
  90. */
  91. #define DEFAULT_NEIGH_LEN_LOG 4
  92. #define DEFAULT_NEIGH_LEN (1 << DEFAULT_NEIGH_LEN_LOG)
  93. /*
  94. * For now assume cache line size is 64 bytes
  95. */
  96. #define CACHE_LINE_BYTES 64
  97. #define CACHE_LINE_ALIGNMENT CACHE_LINE_BYTES
  98. #define NEIGHBORHOOD_LEN (CACHE_LINE_BYTES / sizeof(struct ht_neighborhood_entry_st))
  99. /*
  100. * Defines our chains of values
  101. */
  102. struct ht_internal_value_st {
  103. HT_VALUE value;
  104. HT *ht;
  105. };
  106. struct ht_neighborhood_entry_st {
  107. uint64_t hash;
  108. struct ht_internal_value_st *value;
  109. } ALIGN;
  110. struct ht_neighborhood_st {
  111. struct ht_neighborhood_entry_st entries[NEIGHBORHOOD_LEN];
  112. };
  113. /*
  114. * Updates to data in this struct
  115. * require an rcu sync after modification
  116. * prior to free
  117. */
  118. struct ht_mutable_data_st {
  119. struct ht_neighborhood_st *neighborhoods;
  120. void *neighborhood_ptr_to_free;
  121. uint64_t neighborhood_mask;
  122. };
  123. /*
  124. * Private data may be updated on the write
  125. * side only, and so do not require rcu sync
  126. */
  127. struct ht_write_private_data_st {
  128. size_t neighborhood_len;
  129. size_t value_count;
  130. int need_sync;
  131. };
  132. struct ht_internal_st {
  133. HT_CONFIG config;
  134. CRYPTO_RCU_LOCK *lock;
  135. CRYPTO_RWLOCK *atomic_lock;
  136. struct ht_mutable_data_st *md;
  137. struct ht_write_private_data_st wpd;
  138. };
  139. static void free_value(struct ht_internal_value_st *v);
  140. static struct ht_neighborhood_st *alloc_new_neighborhood_list(size_t len,
  141. void **freeptr)
  142. {
  143. struct ht_neighborhood_st *ret;
  144. ret = OPENSSL_aligned_alloc(sizeof(struct ht_neighborhood_st) * len,
  145. CACHE_LINE_BYTES, freeptr);
  146. /* fall back to regular malloc */
  147. if (ret == NULL) {
  148. ret = *freeptr = OPENSSL_malloc(sizeof(struct ht_neighborhood_st) * len);
  149. if (ret == NULL)
  150. return NULL;
  151. }
  152. memset(ret, 0, sizeof(struct ht_neighborhood_st) * len);
  153. return ret;
  154. }
  155. static void internal_free_nop(HT_VALUE *v)
  156. {
  157. return;
  158. }
  159. HT *ossl_ht_new(const HT_CONFIG *conf)
  160. {
  161. HT *new = OPENSSL_zalloc(sizeof(*new));
  162. if (new == NULL)
  163. return NULL;
  164. new->atomic_lock = CRYPTO_THREAD_lock_new();
  165. if (new->atomic_lock == NULL)
  166. goto err;
  167. memcpy(&new->config, conf, sizeof(*conf));
  168. if (new->config.init_neighborhoods != 0) {
  169. new->wpd.neighborhood_len = new->config.init_neighborhoods;
  170. /* round up to the next power of 2 */
  171. new->wpd.neighborhood_len--;
  172. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 1;
  173. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 2;
  174. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 4;
  175. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 8;
  176. new->wpd.neighborhood_len |= new->wpd.neighborhood_len >> 16;
  177. new->wpd.neighborhood_len++;
  178. } else {
  179. new->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
  180. }
  181. if (new->config.ht_free_fn == NULL)
  182. new->config.ht_free_fn = internal_free_nop;
  183. new->md = OPENSSL_zalloc(sizeof(*new->md));
  184. if (new->md == NULL)
  185. goto err;
  186. new->md->neighborhoods = alloc_new_neighborhood_list(new->wpd.neighborhood_len,
  187. &new->md->neighborhood_ptr_to_free);
  188. if (new->md->neighborhoods == NULL)
  189. goto err;
  190. new->md->neighborhood_mask = new->wpd.neighborhood_len - 1;
  191. new->lock = ossl_rcu_lock_new(1, conf->ctx);
  192. if (new->lock == NULL)
  193. goto err;
  194. if (new->config.ht_hash_fn == NULL)
  195. new->config.ht_hash_fn = ossl_fnv1a_hash;
  196. return new;
  197. err:
  198. CRYPTO_THREAD_lock_free(new->atomic_lock);
  199. ossl_rcu_lock_free(new->lock);
  200. if (new->md != NULL)
  201. OPENSSL_free(new->md->neighborhood_ptr_to_free);
  202. OPENSSL_free(new->md);
  203. OPENSSL_free(new);
  204. return NULL;
  205. }
  206. void ossl_ht_read_lock(HT *htable)
  207. {
  208. ossl_rcu_read_lock(htable->lock);
  209. }
  210. void ossl_ht_read_unlock(HT *htable)
  211. {
  212. ossl_rcu_read_unlock(htable->lock);
  213. }
  214. void ossl_ht_write_lock(HT *htable)
  215. {
  216. ossl_rcu_write_lock(htable->lock);
  217. htable->wpd.need_sync = 0;
  218. }
  219. void ossl_ht_write_unlock(HT *htable)
  220. {
  221. int need_sync = htable->wpd.need_sync;
  222. htable->wpd.need_sync = 0;
  223. ossl_rcu_write_unlock(htable->lock);
  224. if (need_sync)
  225. ossl_synchronize_rcu(htable->lock);
  226. }
  227. static void free_oldmd(void *arg)
  228. {
  229. struct ht_mutable_data_st *oldmd = arg;
  230. size_t i, j;
  231. size_t neighborhood_len = (size_t)oldmd->neighborhood_mask + 1;
  232. struct ht_internal_value_st *v;
  233. for (i = 0; i < neighborhood_len; i++) {
  234. PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[i + 1]);
  235. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  236. if (oldmd->neighborhoods[i].entries[j].value != NULL) {
  237. v = oldmd->neighborhoods[i].entries[j].value;
  238. v->ht->config.ht_free_fn((HT_VALUE *)v);
  239. free_value(v);
  240. }
  241. }
  242. }
  243. OPENSSL_free(oldmd->neighborhood_ptr_to_free);
  244. OPENSSL_free(oldmd);
  245. }
  246. static int ossl_ht_flush_internal(HT *h)
  247. {
  248. struct ht_mutable_data_st *newmd = NULL;
  249. struct ht_mutable_data_st *oldmd = NULL;
  250. newmd = OPENSSL_zalloc(sizeof(*newmd));
  251. if (newmd == NULL)
  252. return 0;
  253. newmd->neighborhoods = alloc_new_neighborhood_list(DEFAULT_NEIGH_LEN,
  254. &newmd->neighborhood_ptr_to_free);
  255. if (newmd->neighborhoods == NULL) {
  256. OPENSSL_free(newmd);
  257. return 0;
  258. }
  259. newmd->neighborhood_mask = DEFAULT_NEIGH_LEN - 1;
  260. /* Swap the old and new mutable data sets */
  261. oldmd = ossl_rcu_deref(&h->md);
  262. ossl_rcu_assign_ptr(&h->md, &newmd);
  263. /* Set the number of entries to 0 */
  264. h->wpd.value_count = 0;
  265. h->wpd.neighborhood_len = DEFAULT_NEIGH_LEN;
  266. ossl_rcu_call(h->lock, free_oldmd, oldmd);
  267. h->wpd.need_sync = 1;
  268. return 1;
  269. }
  270. int ossl_ht_flush(HT *h)
  271. {
  272. return ossl_ht_flush_internal(h);
  273. }
  274. void ossl_ht_free(HT *h)
  275. {
  276. if (h == NULL)
  277. return;
  278. ossl_ht_write_lock(h);
  279. ossl_ht_flush_internal(h);
  280. ossl_ht_write_unlock(h);
  281. /* Freeing the lock does a final sync for us */
  282. CRYPTO_THREAD_lock_free(h->atomic_lock);
  283. ossl_rcu_lock_free(h->lock);
  284. OPENSSL_free(h->md->neighborhood_ptr_to_free);
  285. OPENSSL_free(h->md);
  286. OPENSSL_free(h);
  287. return;
  288. }
  289. size_t ossl_ht_count(HT *h)
  290. {
  291. size_t count;
  292. count = h->wpd.value_count;
  293. return count;
  294. }
  295. void ossl_ht_foreach_until(HT *h, int (*cb)(HT_VALUE *obj, void *arg),
  296. void *arg)
  297. {
  298. size_t i, j;
  299. struct ht_mutable_data_st *md;
  300. md = ossl_rcu_deref(&h->md);
  301. for (i = 0; i < md->neighborhood_mask + 1; i++) {
  302. PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]);
  303. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  304. if (md->neighborhoods[i].entries[j].value != NULL) {
  305. if (!cb((HT_VALUE *)md->neighborhoods[i].entries[j].value, arg))
  306. goto out;
  307. }
  308. }
  309. }
  310. out:
  311. return;
  312. }
  313. HT_VALUE_LIST *ossl_ht_filter(HT *h, size_t max_len,
  314. int (*filter)(HT_VALUE *obj, void *arg),
  315. void *arg)
  316. {
  317. struct ht_mutable_data_st *md;
  318. HT_VALUE_LIST *list = OPENSSL_zalloc(sizeof(HT_VALUE_LIST)
  319. + (sizeof(HT_VALUE *) * max_len));
  320. size_t i, j;
  321. struct ht_internal_value_st *v;
  322. if (list == NULL)
  323. return NULL;
  324. /*
  325. * The list array lives just beyond the end of
  326. * the struct
  327. */
  328. list->list = (HT_VALUE **)(list + 1);
  329. md = ossl_rcu_deref(&h->md);
  330. for (i = 0; i < md->neighborhood_mask + 1; i++) {
  331. PREFETCH_NEIGHBORHOOD(md->neighborhoods[i + 1]);
  332. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  333. v = md->neighborhoods[i].entries[j].value;
  334. if (v != NULL && filter((HT_VALUE *)v, arg)) {
  335. list->list[list->list_len++] = (HT_VALUE *)v;
  336. if (list->list_len == max_len)
  337. goto out;
  338. }
  339. }
  340. }
  341. out:
  342. return list;
  343. }
  344. void ossl_ht_value_list_free(HT_VALUE_LIST *list)
  345. {
  346. OPENSSL_free(list);
  347. }
  348. static int compare_hash(uint64_t hash1, uint64_t hash2)
  349. {
  350. return (hash1 == hash2);
  351. }
  352. static void free_old_neigh_table(void *arg)
  353. {
  354. struct ht_mutable_data_st *oldmd = arg;
  355. OPENSSL_free(oldmd->neighborhood_ptr_to_free);
  356. OPENSSL_free(oldmd);
  357. }
  358. /*
  359. * Increase hash table bucket list
  360. * must be called with write_lock held
  361. */
  362. static int grow_hashtable(HT *h, size_t oldsize)
  363. {
  364. struct ht_mutable_data_st *newmd;
  365. struct ht_mutable_data_st *oldmd = ossl_rcu_deref(&h->md);
  366. int rc = 0;
  367. uint64_t oldi, oldj, newi, newj;
  368. uint64_t oldhash;
  369. struct ht_internal_value_st *oldv;
  370. int rehashed;
  371. size_t newsize = oldsize * 2;
  372. if (h->config.lockless_reads)
  373. goto out;
  374. if ((newmd = OPENSSL_zalloc(sizeof(*newmd))) == NULL)
  375. goto out;
  376. /* bucket list is always a power of 2 */
  377. newmd->neighborhoods = alloc_new_neighborhood_list(oldsize * 2,
  378. &newmd->neighborhood_ptr_to_free);
  379. if (newmd->neighborhoods == NULL)
  380. goto out_free;
  381. /* being a power of 2 makes for easy mask computation */
  382. newmd->neighborhood_mask = (newsize - 1);
  383. /*
  384. * Now we need to start rehashing entries
  385. * Note we don't need to use atomics here as the new
  386. * mutable data hasn't been published
  387. */
  388. for (oldi = 0; oldi < h->wpd.neighborhood_len; oldi++) {
  389. PREFETCH_NEIGHBORHOOD(oldmd->neighborhoods[oldi + 1]);
  390. for (oldj = 0; oldj < NEIGHBORHOOD_LEN; oldj++) {
  391. oldv = oldmd->neighborhoods[oldi].entries[oldj].value;
  392. if (oldv == NULL)
  393. continue;
  394. oldhash = oldmd->neighborhoods[oldi].entries[oldj].hash;
  395. newi = oldhash & newmd->neighborhood_mask;
  396. rehashed = 0;
  397. for (newj = 0; newj < NEIGHBORHOOD_LEN; newj++) {
  398. if (newmd->neighborhoods[newi].entries[newj].value == NULL) {
  399. newmd->neighborhoods[newi].entries[newj].value = oldv;
  400. newmd->neighborhoods[newi].entries[newj].hash = oldhash;
  401. rehashed = 1;
  402. break;
  403. }
  404. }
  405. if (rehashed == 0) {
  406. /* we ran out of space in a neighborhood, grow again */
  407. OPENSSL_free(newmd->neighborhoods);
  408. OPENSSL_free(newmd);
  409. return grow_hashtable(h, newsize);
  410. }
  411. }
  412. }
  413. /*
  414. * Now that our entries are all hashed into the new bucket list
  415. * update our bucket_len and target_max_load
  416. */
  417. h->wpd.neighborhood_len = newsize;
  418. /*
  419. * Now we replace the old mutable data with the new
  420. */
  421. ossl_rcu_assign_ptr(&h->md, &newmd);
  422. ossl_rcu_call(h->lock, free_old_neigh_table, oldmd);
  423. h->wpd.need_sync = 1;
  424. /*
  425. * And we're done
  426. */
  427. rc = 1;
  428. out:
  429. return rc;
  430. out_free:
  431. OPENSSL_free(newmd->neighborhoods);
  432. OPENSSL_free(newmd);
  433. goto out;
  434. }
  435. static void free_old_ht_value(void *arg)
  436. {
  437. HT_VALUE *h = (HT_VALUE *)arg;
  438. /*
  439. * Note, this is only called on replacement,
  440. * the caller is responsible for freeing the
  441. * held data, we just need to free the wrapping
  442. * struct here
  443. */
  444. OPENSSL_free(h);
  445. }
  446. static ossl_inline int match_key(HT_KEY *a, HT_KEY *b)
  447. {
  448. /*
  449. * keys match if they are both present, the same size
  450. * and compare equal in memory
  451. */
  452. PREFETCH(a->keybuf);
  453. PREFETCH(b->keybuf);
  454. if (a->keybuf != NULL && b->keybuf != NULL && a->keysize == b->keysize)
  455. return !memcmp(a->keybuf, b->keybuf, a->keysize);
  456. return 1;
  457. }
  458. static int ossl_ht_insert_locked(HT *h, uint64_t hash,
  459. struct ht_internal_value_st *newval,
  460. HT_VALUE **olddata)
  461. {
  462. struct ht_mutable_data_st *md = h->md;
  463. uint64_t neigh_idx_start = hash & md->neighborhood_mask;
  464. uint64_t neigh_idx = neigh_idx_start;
  465. size_t j;
  466. uint64_t ihash;
  467. HT_VALUE *ival;
  468. size_t empty_idx = SIZE_MAX;
  469. int lockless_reads = h->config.lockless_reads;
  470. do {
  471. PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
  472. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  473. ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
  474. if (ival == NULL) {
  475. empty_idx = j;
  476. /* lockless_reads implies no deletion, we can break out */
  477. if (lockless_reads)
  478. goto not_found;
  479. continue;
  480. }
  481. if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
  482. &ihash, h->atomic_lock))
  483. return 0;
  484. if (compare_hash(hash, ihash) && match_key(&newval->value.key, &ival->key)) {
  485. if (olddata == NULL) {
  486. /* This would insert a duplicate -> fail */
  487. return 0;
  488. }
  489. /* Do a replacement */
  490. if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[j].hash,
  491. hash, h->atomic_lock))
  492. return 0;
  493. *olddata = (HT_VALUE *)md->neighborhoods[neigh_idx].entries[j].value;
  494. ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[j].value,
  495. &newval);
  496. ossl_rcu_call(h->lock, free_old_ht_value, *olddata);
  497. h->wpd.need_sync = 1;
  498. return 1;
  499. }
  500. }
  501. if (!lockless_reads)
  502. break;
  503. /* Continue search in subsequent neighborhoods */
  504. neigh_idx = (neigh_idx + 1) & md->neighborhood_mask;
  505. } while (neigh_idx != neigh_idx_start);
  506. not_found:
  507. /* If we get to here, its just an insert */
  508. if (empty_idx == SIZE_MAX)
  509. return -1; /* out of space */
  510. if (!CRYPTO_atomic_store(&md->neighborhoods[neigh_idx].entries[empty_idx].hash,
  511. hash, h->atomic_lock))
  512. return 0;
  513. h->wpd.value_count++;
  514. ossl_rcu_assign_ptr(&md->neighborhoods[neigh_idx].entries[empty_idx].value,
  515. &newval);
  516. return 1;
  517. }
  518. static struct ht_internal_value_st *alloc_new_value(HT *h, HT_KEY *key,
  519. void *data,
  520. uintptr_t *type)
  521. {
  522. struct ht_internal_value_st *tmp;
  523. size_t nvsize = sizeof(*tmp);
  524. if (h->config.collision_check == 1)
  525. nvsize += key->keysize;
  526. tmp = OPENSSL_malloc(nvsize);
  527. if (tmp == NULL)
  528. return NULL;
  529. tmp->ht = h;
  530. tmp->value.value = data;
  531. tmp->value.type_id = type;
  532. tmp->value.key.keybuf = NULL;
  533. if (h->config.collision_check) {
  534. tmp->value.key.keybuf = (uint8_t *)(tmp + 1);
  535. tmp->value.key.keysize = key->keysize;
  536. memcpy(tmp->value.key.keybuf, key->keybuf, key->keysize);
  537. }
  538. return tmp;
  539. }
  540. static void free_value(struct ht_internal_value_st *v)
  541. {
  542. OPENSSL_free(v);
  543. }
  544. int ossl_ht_insert(HT *h, HT_KEY *key, HT_VALUE *data, HT_VALUE **olddata)
  545. {
  546. struct ht_internal_value_st *newval = NULL;
  547. uint64_t hash;
  548. int rc = 0;
  549. int i;
  550. if (data->value == NULL)
  551. goto out;
  552. newval = alloc_new_value(h, key, data->value, data->type_id);
  553. if (newval == NULL)
  554. goto out;
  555. /*
  556. * we have to take our lock here to prevent other changes
  557. * to the bucket list
  558. */
  559. hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
  560. for (i = 0;
  561. (rc = ossl_ht_insert_locked(h, hash, newval, olddata)) == -1
  562. && i < 4;
  563. ++i)
  564. if (!grow_hashtable(h, h->wpd.neighborhood_len)) {
  565. rc = -1;
  566. break;
  567. }
  568. if (rc <= 0)
  569. free_value(newval);
  570. out:
  571. return rc;
  572. }
  573. HT_VALUE *ossl_ht_get(HT *h, HT_KEY *key)
  574. {
  575. struct ht_mutable_data_st *md;
  576. uint64_t hash;
  577. uint64_t neigh_idx_start;
  578. uint64_t neigh_idx;
  579. struct ht_internal_value_st *ival = NULL;
  580. size_t j;
  581. uint64_t ehash;
  582. int lockless_reads = h->config.lockless_reads;
  583. hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
  584. md = ossl_rcu_deref(&h->md);
  585. neigh_idx = neigh_idx_start = hash & md->neighborhood_mask;
  586. do {
  587. PREFETCH_NEIGHBORHOOD(md->neighborhoods[neigh_idx]);
  588. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  589. ival = ossl_rcu_deref(&md->neighborhoods[neigh_idx].entries[j].value);
  590. if (ival == NULL) {
  591. if (lockless_reads)
  592. /* lockless_reads implies no deletion, we can break out */
  593. return NULL;
  594. continue;
  595. }
  596. if (!CRYPTO_atomic_load(&md->neighborhoods[neigh_idx].entries[j].hash,
  597. &ehash, h->atomic_lock))
  598. return NULL;
  599. if (compare_hash(hash, ehash) && match_key(&ival->value.key, key))
  600. return (HT_VALUE *)ival;
  601. }
  602. if (!lockless_reads)
  603. break;
  604. /* Continue search in subsequent neighborhoods */
  605. neigh_idx = (neigh_idx + 1) & md->neighborhood_mask;
  606. } while (neigh_idx != neigh_idx_start);
  607. return NULL;
  608. }
  609. static void free_old_entry(void *arg)
  610. {
  611. struct ht_internal_value_st *v = arg;
  612. v->ht->config.ht_free_fn((HT_VALUE *)v);
  613. free_value(v);
  614. }
  615. int ossl_ht_delete(HT *h, HT_KEY *key)
  616. {
  617. uint64_t hash;
  618. uint64_t neigh_idx;
  619. size_t j;
  620. struct ht_internal_value_st *v = NULL;
  621. HT_VALUE *nv = NULL;
  622. int rc = 0;
  623. if (h->config.lockless_reads)
  624. return 0;
  625. hash = h->config.ht_hash_fn(key->keybuf, key->keysize);
  626. neigh_idx = hash & h->md->neighborhood_mask;
  627. PREFETCH_NEIGHBORHOOD(h->md->neighborhoods[neigh_idx]);
  628. for (j = 0; j < NEIGHBORHOOD_LEN; j++) {
  629. v = (struct ht_internal_value_st *)h->md->neighborhoods[neigh_idx].entries[j].value;
  630. if (v == NULL)
  631. continue;
  632. if (compare_hash(hash, h->md->neighborhoods[neigh_idx].entries[j].hash)
  633. && match_key(key, &v->value.key)) {
  634. if (!CRYPTO_atomic_store(&h->md->neighborhoods[neigh_idx].entries[j].hash,
  635. 0, h->atomic_lock))
  636. break;
  637. h->wpd.value_count--;
  638. ossl_rcu_assign_ptr(&h->md->neighborhoods[neigh_idx].entries[j].value,
  639. &nv);
  640. rc = 1;
  641. break;
  642. }
  643. }
  644. if (rc == 1) {
  645. ossl_rcu_call(h->lock, free_old_entry, v);
  646. h->wpd.need_sync = 1;
  647. }
  648. return rc;
  649. }