cache.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246
  1. /** BEGIN COPYRIGHT BLOCK
  2. * This Program is free software; you can redistribute it and/or modify it under
  3. * the terms of the GNU General Public License as published by the Free Software
  4. * Foundation; version 2 of the License.
  5. *
  6. * This Program is distributed in the hope that it will be useful, but WITHOUT
  7. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  8. * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  9. *
  10. * You should have received a copy of the GNU General Public License along with
  11. * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
  12. * Place, Suite 330, Boston, MA 02111-1307 USA.
  13. *
  14. * In addition, as a special exception, Red Hat, Inc. gives You the additional
  15. * right to link the code of this Program with code not covered under the GNU
  16. * General Public License ("Non-GPL Code") and to distribute linked combinations
  17. * including the two, subject to the limitations in this paragraph. Non-GPL Code
  18. * permitted under this exception must only link to the code of this Program
  19. * through those well defined interfaces identified in the file named EXCEPTION
  20. * found in the source code files (the "Approved Interfaces"). The files of
  21. * Non-GPL Code may instantiate templates or use macros or inline functions from
  22. * the Approved Interfaces without causing the resulting work to be covered by
  23. * the GNU General Public License. Only Red Hat, Inc. may make changes or
  24. * additions to the list of Approved Interfaces. You must obey the GNU General
  25. * Public License in all respects for all of the Program code and other code used
  26. * in conjunction with the Program except the Non-GPL Code covered by this
  27. * exception. If you modify this file, you may extend this exception to your
  28. * version of the file, but you are not obligated to do so. If you do not wish to
  29. * provide this exception without modification, you must delete this exception
  30. * statement from your version and license this file solely under the GPL without
  31. * exception.
  32. *
  33. *
  34. * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
  35. * Copyright (C) 2005 Red Hat, Inc.
  36. * All rights reserved.
  37. * END COPYRIGHT BLOCK **/
  38. #ifdef HAVE_CONFIG_H
  39. # include <config.h>
  40. #endif
  41. /* cache.c - routines to maintain an in-core cache of entries */
  42. #include "back-ldbm.h"
  43. #ifdef DEBUG
  44. #define LDAP_CACHE_DEBUG
  45. /* #define LDAP_CACHE_DEBUG_LRU */ /* causes slowdown */
  46. #endif
  47. /* cache can't get any smaller than this (in bytes) */
  48. #define MINCACHESIZE (size_t)200000
  49. /* don't let hash be smaller than this # of slots */
  50. #define MINHASHSIZE 1024
  51. /*
  52. * the cache has three entry points (ways to find things):
  53. *
  54. * by entry e.g., if you already have an entry from the cache
  55. * and want to delete it. (really by entry ptr)
  56. * by dn e.g., when looking for the base object of a search
  57. * by id e.g., for search candidates
  58. * by uniqueid
  59. *
  60. * these correspond to three different avl trees that are maintained.
  61. * those avl trees are being destroyed as we speak.
  62. */
  63. #ifdef LDAP_CACHE_DEBUG
  64. #define ASSERT(_x) do { \
  65. if (!(_x)) { \
  66. LDAPDebug(LDAP_DEBUG_ANY, "BAD CACHE ASSERTION at %s/%d: %s\n", \
  67. __FILE__, __LINE__, #_x); \
  68. *(char *)0L = 23; \
  69. } \
  70. } while (0)
  71. #define LOG(_a, _x1, _x2, _x3) LDAPDebug(LDAP_DEBUG_CACHE, _a, _x1, _x2, _x3)
  72. #else
  73. #define ASSERT(_x) ;
  74. #define LOG(_a, _x1, _x2, _x3) ;
  75. #endif
  76. /***** tiny hashtable implementation *****/
  77. #define HASH_VALUE(_key, _keylen) \
  78. ((ht->hashfn == NULL) ? (*(unsigned int *)(_key)) : \
  79. ((*ht->hashfn)(_key, _keylen)))
  80. #define HASH_NEXT(ht, entry) (*(void **)((char *)(entry) + (ht)->offset))
  81. static int entry_same_id(const void *e, const void *k)
  82. {
  83. return (((struct backentry *)e)->ep_id == *(ID *)k);
  84. }
  85. static unsigned long dn_hash(const void *key, size_t keylen)
  86. {
  87. unsigned char *x = (unsigned char *)key;
  88. ssize_t i;
  89. unsigned long val = 0;
  90. for (i = keylen-1; i >= 0; i--)
  91. val += ((val << 5) + (*x++)) & 0xffffffff;
  92. return val;
  93. }
  94. #ifdef UUIDCACHE_ON
  95. static unsigned long uuid_hash(const void *key, size_t keylen)
  96. {
  97. unsigned char *x = (unsigned char *)key;
  98. size_t i;
  99. unsigned long val = 0;
  100. for (i = 0; i < keylen; i++, x++) {
  101. char c = (*x <= '9' ? (*x - '0') : (*x - 'A' + 10));
  102. val = ((val << 4) ^ (val >> 28) ^ c) & 0xffffffff;
  103. }
  104. return val;
  105. }
  106. static int entry_same_uuid(const void *e, const void *k)
  107. {
  108. struct backentry *be = (struct backentry *)e;
  109. const char *uuid = slapi_entry_get_uniqueid(be->ep_entry);
  110. return (strcmp(uuid, (char *)k) == 0);
  111. }
  112. #endif
  113. static int entry_same_dn(const void *e, const void *k)
  114. {
  115. struct backentry *be = (struct backentry *)e;
  116. const char *ndn = slapi_sdn_get_ndn(backentry_get_sdn(be));
  117. return (strcmp(ndn, (char *)k) == 0);
  118. }
  119. Hashtable *new_hash(u_long size, u_long offset, HashFn hfn,
  120. HashTestFn tfn)
  121. {
  122. static u_long prime[] = { 3, 5, 7, 11, 13, 17, 19 };
  123. Hashtable *ht;
  124. int ok = 0, i;
  125. if (size < MINHASHSIZE)
  126. size = MINHASHSIZE;
  127. /* move up to nearest relative prime (it's a statistical thing) */
  128. size |= 1;
  129. do {
  130. ok = 1;
  131. for (i = 0; i < (sizeof(prime) / sizeof(prime[0])); i++)
  132. if (!(size % prime[i]))
  133. ok = 0;
  134. if (!ok)
  135. size += 2;
  136. } while (!ok);
  137. ht = (Hashtable*)slapi_ch_calloc(1, sizeof(Hashtable) + size*sizeof(void *));
  138. if (!ht)
  139. return NULL;
  140. ht->size = size;
  141. ht->offset = offset;
  142. ht->hashfn = hfn;
  143. ht->testfn = tfn;
  144. /* calloc zeroes out the slots automagically */
  145. return ht;
  146. }
  147. /* adds an entry to the hash -- returns 1 on success, 0 if the key was
  148. * already there (filled into 'alt' if 'alt' is not NULL)
  149. */
  150. int add_hash(Hashtable *ht, void *key, size_t keylen, void *entry,
  151. void **alt)
  152. {
  153. u_long val, slot;
  154. void *e;
  155. val = HASH_VALUE(key, keylen);
  156. slot = (val % ht->size);
  157. /* first, check if this key is already in the table */
  158. e = ht->slot[slot];
  159. while (e) {
  160. if ((*ht->testfn)(e, key)) {
  161. /* ack! already in! */
  162. if (alt)
  163. *alt = e;
  164. return 0;
  165. }
  166. e = HASH_NEXT(ht, e);
  167. }
  168. /* ok, it's not already there, so add it */
  169. HASH_NEXT(ht, entry) = ht->slot[slot];
  170. ht->slot[slot] = entry;
  171. return 1;
  172. }
  173. /* returns 1 if the item was found, and puts a ptr to it in 'entry' */
  174. int find_hash(Hashtable *ht, const void *key, size_t keylen, void **entry)
  175. {
  176. u_long val, slot;
  177. void *e;
  178. val = HASH_VALUE(key, keylen);
  179. slot = (val % ht->size);
  180. e = ht->slot[slot];
  181. while (e) {
  182. if ((*ht->testfn)(e, key)) {
  183. *entry = e;
  184. return 1;
  185. }
  186. e = HASH_NEXT(ht, e);
  187. }
  188. /* no go */
  189. *entry = NULL;
  190. return 0;
  191. }
  192. /* returns 1 if the item was found and removed */
  193. int remove_hash(Hashtable *ht, const void *key, size_t keylen)
  194. {
  195. u_long val, slot;
  196. void *e, *laste = NULL;
  197. val = HASH_VALUE(key, keylen);
  198. slot = (val % ht->size);
  199. e = ht->slot[slot];
  200. while (e) {
  201. if ((*ht->testfn)(e, key)) {
  202. /* remove this one */
  203. if (laste)
  204. HASH_NEXT(ht, laste) = HASH_NEXT(ht, e);
  205. else
  206. ht->slot[slot] = HASH_NEXT(ht, e);
  207. HASH_NEXT(ht, e) = NULL;
  208. return 1;
  209. }
  210. laste = e;
  211. e = HASH_NEXT(ht, e);
  212. }
  213. /* nope */
  214. return 0;
  215. }
  216. /* hashtable distribution stats --
  217. * slots: # of slots in the hashtable
  218. * total_entries: # of entries in the hashtable
  219. * max_entries_per_slot: highest number of chained entries in a single slot
  220. * slot_stats: if X is the number of entries in a given slot, then
  221. * slot_stats[X] will hold the number of slots that held X entries
  222. */
  223. static void hash_stats(Hashtable *ht, u_long *slots, int *total_entries,
  224. int *max_entries_per_slot, int **slot_stats)
  225. {
  226. #define MAX_SLOT_STATS 50
  227. u_long i;
  228. int x;
  229. void *e;
  230. *slot_stats = (int *)slapi_ch_malloc(MAX_SLOT_STATS * sizeof(int));
  231. for (i = 0; i < MAX_SLOT_STATS; i++)
  232. (*slot_stats)[i] = 0;
  233. *slots = ht->size;
  234. *max_entries_per_slot = 0;
  235. *total_entries = 0;
  236. for (i = 0; i < ht->size; i++) {
  237. e = ht->slot[i];
  238. x = 0;
  239. while (e) {
  240. x++;
  241. (*total_entries)++;
  242. e = HASH_NEXT(ht, e);
  243. }
  244. if (x < MAX_SLOT_STATS)
  245. (*slot_stats)[x]++;
  246. if (x > *max_entries_per_slot)
  247. *max_entries_per_slot = x;
  248. }
  249. }
  250. /***** add/remove entries to/from the LRU list *****/
  251. #ifdef LDAP_CACHE_DEBUG_LRU
  252. /* for debugging -- painstakingly verify the lru list is ok -- if 'in' is
  253. * true, then entry 'e' should be in the list right now; otherwise, it
  254. * should NOT be in the list.
  255. */
  256. static void lru_verify(struct cache *cache, struct backentry *e, int in)
  257. {
  258. int is_in = 0;
  259. int count = 0;
  260. struct backentry *ep;
  261. ep = cache->c_lruhead;
  262. while (ep) {
  263. count++;
  264. if (ep == e) {
  265. is_in = 1;
  266. }
  267. if (ep->ep_lruprev) {
  268. ASSERT(ep->ep_lruprev->ep_lrunext == ep);
  269. } else {
  270. ASSERT(ep == cache->c_lruhead);
  271. }
  272. if (ep->ep_lrunext) {
  273. ASSERT(ep->ep_lrunext->ep_lruprev == ep);
  274. } else {
  275. ASSERT(ep == cache->c_lrutail);
  276. }
  277. ep = ep->ep_lrunext;
  278. }
  279. ASSERT(is_in == in);
  280. }
  281. #endif
  282. /* assume lock is held */
  283. static void lru_detach(struct cache *cache, struct backentry *e)
  284. {
  285. #ifdef LDAP_CACHE_DEBUG_LRU
  286. lru_verify(cache, e, 1);
  287. #endif
  288. if (e->ep_lruprev)
  289. {
  290. e->ep_lruprev->ep_lrunext = NULL;
  291. cache->c_lrutail = e->ep_lruprev;
  292. }
  293. else
  294. {
  295. cache->c_lruhead = NULL;
  296. cache->c_lrutail = NULL;
  297. }
  298. #ifdef LDAP_CACHE_DEBUG_LRU
  299. lru_verify(cache, e, 0);
  300. #endif
  301. }
  302. /* assume lock is held */
  303. static void lru_delete(struct cache *cache, struct backentry *e)
  304. {
  305. #ifdef LDAP_CACHE_DEBUG_LRU
  306. lru_verify(cache, e, 1);
  307. #endif
  308. if (e->ep_lruprev)
  309. e->ep_lruprev->ep_lrunext = e->ep_lrunext;
  310. else
  311. cache->c_lruhead = e->ep_lrunext;
  312. if (e->ep_lrunext)
  313. e->ep_lrunext->ep_lruprev = e->ep_lruprev;
  314. else
  315. cache->c_lrutail = e->ep_lruprev;
  316. #ifdef LDAP_CACHE_DEBUG_LRU
  317. e->ep_lrunext = e->ep_lruprev = NULL;
  318. lru_verify(cache, e, 0);
  319. #endif
  320. }
  321. /* assume lock is held */
  322. static void lru_add(struct cache *cache, struct backentry *e)
  323. {
  324. #ifdef LDAP_CACHE_DEBUG_LRU
  325. lru_verify(cache, e, 0);
  326. #endif
  327. e->ep_lruprev = NULL;
  328. e->ep_lrunext = cache->c_lruhead;
  329. cache->c_lruhead = e;
  330. if (e->ep_lrunext)
  331. e->ep_lrunext->ep_lruprev = e;
  332. if (! cache->c_lrutail)
  333. cache->c_lrutail = e;
  334. #ifdef LDAP_CACHE_DEBUG_LRU
  335. lru_verify(cache, e, 1);
  336. #endif
  337. }
  338. /***** cache overhead *****/
  339. static int cache_remove_int(struct cache *cache, struct backentry *e);
  340. static void cache_make_hashes(struct cache *cache)
  341. {
  342. u_long hashsize = (cache->c_maxentries > 0) ? cache->c_maxentries :
  343. (cache->c_maxsize/512);
  344. cache->c_dntable = new_hash(hashsize,
  345. HASHLOC(struct backentry, ep_dn_link),
  346. dn_hash, entry_same_dn);
  347. cache->c_idtable = new_hash(hashsize,
  348. HASHLOC(struct backentry, ep_id_link),
  349. NULL, entry_same_id);
  350. #ifdef UUIDCACHE_ON
  351. cache->c_uuidtable = new_hash(hashsize,
  352. HASHLOC(struct backentry, ep_uuid_link),
  353. uuid_hash, entry_same_uuid);
  354. #endif
  355. }
  356. /* initialize the cache */
  357. int cache_init(struct cache *cache, size_t maxsize, long maxentries)
  358. {
  359. LDAPDebug(LDAP_DEBUG_TRACE, "=> cache_init\n", 0, 0, 0);
  360. cache->c_maxsize = maxsize;
  361. cache->c_maxentries = maxentries;
  362. cache->c_cursize = slapi_counter_new();
  363. cache->c_curentries = 0;
  364. if (config_get_slapi_counters()) {
  365. cache->c_hits = slapi_counter_new();
  366. cache->c_tries = slapi_counter_new();
  367. } else {
  368. cache->c_hits = NULL;
  369. cache->c_tries = NULL;
  370. }
  371. cache->c_lruhead = cache->c_lrutail = NULL;
  372. cache_make_hashes(cache);
  373. if (((cache->c_mutex = PR_NewLock()) == NULL) ||
  374. ((cache->c_emutexalloc_mutex = PR_NewLock()) == NULL)) {
  375. LDAPDebug(LDAP_DEBUG_ANY, "ldbm: cache_init: PR_NewLock failed\n",
  376. 0, 0, 0);
  377. return 0;
  378. }
  379. LDAPDebug(LDAP_DEBUG_TRACE, "<= cache_init\n", 0, 0, 0);
  380. return 1;
  381. }
  382. #define CACHE_FULL(cache) \
  383. ((slapi_counter_get_value((cache)->c_cursize) > (cache)->c_maxsize) || \
  384. (((cache)->c_maxentries > 0) && \
  385. ((cache)->c_curentries > (cache)->c_maxentries)))
  386. /* clear out the cache to make room for new entries
  387. * you must be holding cache->c_mutex !!
  388. * return a pointer on the list of entries that get kicked out
  389. * of the cache.
  390. * These entries should be freed outside of the cache->c_mutex
  391. */
  392. static struct backentry * cache_flush(struct cache *cache)
  393. {
  394. struct backentry *e = NULL;
  395. LOG("=> cache_flush\n", 0, 0, 0);
  396. /* all entries on the LRU list are guaranteed to have a refcnt = 0
  397. * (iow, nobody's using them), so just delete from the tail down
  398. * until the cache is a managable size again.
  399. * (cache->c_mutex is locked when we enter this)
  400. */
  401. while ((cache->c_lrutail != NULL) && CACHE_FULL(cache)) {
  402. if (e == NULL)
  403. {
  404. e = cache->c_lrutail;
  405. }
  406. else
  407. {
  408. e = e->ep_lruprev;
  409. }
  410. ASSERT(e->ep_refcnt == 0);
  411. e->ep_refcnt++;
  412. if (cache_remove_int(cache, e) < 0) {
  413. LDAPDebug(LDAP_DEBUG_ANY, "cache flush: unable to delete entry\n",
  414. 0, 0, 0);
  415. break;
  416. }
  417. if(e == cache->c_lruhead) {
  418. break;
  419. }
  420. }
  421. if (e)
  422. lru_detach(cache, e);
  423. LOG("<= cache_flush (down to %lu entries, %lu bytes)\n", cache->c_curentries,
  424. slapi_counter_get_value(cache->c_cursize), 0);
  425. return e;
  426. }
  427. /* remove everything from the cache */
  428. static void cache_clear_int(struct cache *cache)
  429. {
  430. struct backentry *eflush = NULL;
  431. struct backentry *eflushtemp = NULL;
  432. size_t size = cache->c_maxsize;
  433. cache->c_maxsize = 0;
  434. eflush = cache_flush(cache);
  435. while (eflush)
  436. {
  437. eflushtemp = eflush->ep_lrunext;
  438. backentry_free(&eflush);
  439. eflush = eflushtemp;
  440. }
  441. cache->c_maxsize = size;
  442. if (cache->c_curentries > 0) {
  443. LDAPDebug(LDAP_DEBUG_ANY, "somehow, there are still %ld entries "
  444. "in the entry cache. :/\n", cache->c_curentries, 0, 0);
  445. }
  446. }
  447. void cache_clear(struct cache *cache)
  448. {
  449. PR_Lock(cache->c_mutex);
  450. cache_clear_int(cache);
  451. PR_Unlock(cache->c_mutex);
  452. }
  453. static void erase_cache(struct cache *cache)
  454. {
  455. cache_clear_int(cache);
  456. slapi_ch_free((void **)&cache->c_dntable);
  457. slapi_ch_free((void **)&cache->c_idtable);
  458. #ifdef UUIDCACHE_ON
  459. slapi_ch_free((void **)&cache->c_uuidtable);
  460. #endif
  461. }
  462. /* to be used on shutdown or when destroying a backend instance */
  463. void cache_destroy_please(struct cache *cache)
  464. {
  465. erase_cache(cache);
  466. PR_DestroyLock(cache->c_mutex);
  467. PR_DestroyLock(cache->c_emutexalloc_mutex);
  468. }
  469. void cache_set_max_size(struct cache *cache, size_t bytes)
  470. {
  471. struct backentry *eflush = NULL;
  472. struct backentry *eflushtemp = NULL;
  473. if (bytes < MINCACHESIZE) {
  474. bytes = MINCACHESIZE;
  475. LDAPDebug(LDAP_DEBUG_ANY,
  476. "WARNING -- Minimum cache size is %lu -- rounding up\n",
  477. MINCACHESIZE, 0, 0);
  478. }
  479. PR_Lock(cache->c_mutex);
  480. cache->c_maxsize = bytes;
  481. LOG("entry cache size set to %lu\n", bytes, 0, 0);
  482. /* check for full cache, and clear out if necessary */
  483. if (CACHE_FULL(cache))
  484. eflush = cache_flush(cache);
  485. while (eflush)
  486. {
  487. eflushtemp = eflush->ep_lrunext;
  488. backentry_free(&eflush);
  489. eflush = eflushtemp;
  490. }
  491. if (cache->c_curentries < 50) {
  492. /* there's hardly anything left in the cache -- clear it out and
  493. * resize the hashtables for efficiency.
  494. */
  495. erase_cache(cache);
  496. cache_make_hashes(cache);
  497. }
  498. PR_Unlock(cache->c_mutex);
  499. if (! dblayer_is_cachesize_sane(&bytes)) {
  500. LDAPDebug(LDAP_DEBUG_ANY,
  501. "WARNING -- Possible CONFIGURATION ERROR -- cachesize "
  502. "(%lu) may be configured to use more than the available "
  503. "physical memory.\n", bytes, 0, 0);
  504. }
  505. }
  506. void cache_set_max_entries(struct cache *cache, long entries)
  507. {
  508. struct backentry *eflush = NULL;
  509. struct backentry *eflushtemp = NULL;
  510. /* this is a dumb remnant of pre-5.0 servers, where the cache size
  511. * was given in # entries instead of memory footprint. hopefully,
  512. * we can eventually drop this.
  513. */
  514. PR_Lock(cache->c_mutex);
  515. cache->c_maxentries = entries;
  516. if (entries >= 0) {
  517. LOG("entry cache entry-limit set to %lu\n", entries, 0, 0);
  518. } else {
  519. LOG("entry cache entry-limit turned off\n", 0, 0, 0);
  520. }
  521. /* check for full cache, and clear out if necessary */
  522. if (CACHE_FULL(cache))
  523. eflush = cache_flush(cache);
  524. PR_Unlock(cache->c_mutex);
  525. while (eflush)
  526. {
  527. eflushtemp = eflush->ep_lrunext;
  528. backentry_free(&eflush);
  529. eflush = eflushtemp;
  530. }
  531. }
  532. size_t cache_get_max_size(struct cache *cache)
  533. {
  534. size_t n;
  535. PR_Lock(cache->c_mutex);
  536. n = cache->c_maxsize;
  537. PR_Unlock(cache->c_mutex);
  538. return n;
  539. }
  540. long cache_get_max_entries(struct cache *cache)
  541. {
  542. long n;
  543. PR_Lock(cache->c_mutex);
  544. n = cache->c_maxentries;
  545. PR_Unlock(cache->c_mutex);
  546. return n;
  547. }
  548. /* determine the general size of a cache entry */
  549. static size_t cache_entry_size(struct backentry *e)
  550. {
  551. size_t size = 0;
  552. if (e->ep_entry)
  553. size += slapi_entry_size(e->ep_entry);
  554. if (e->ep_vlventry)
  555. size += slapi_entry_size(e->ep_vlventry);
  556. /* cannot size ep_mutexp (PRLock) */
  557. size += sizeof(struct backentry);
  558. return size;
  559. }
  560. /* the monitor code wants to be able to safely fetch the cache stats --
  561. * if it ever wants to pull out more info, we might want to change all
  562. * these u_long *'s to a struct
  563. */
  564. void cache_get_stats(struct cache *cache, PRUint64 *hits, PRUint64 *tries,
  565. long *nentries, long *maxentries,
  566. size_t *size, size_t *maxsize)
  567. {
  568. PR_Lock(cache->c_mutex);
  569. if (hits) *hits = slapi_counter_get_value(cache->c_hits);
  570. if (tries) *tries = slapi_counter_get_value(cache->c_tries);
  571. if (nentries) *nentries = cache->c_curentries;
  572. if (maxentries) *maxentries = cache->c_maxentries;
  573. if (size) *size = slapi_counter_get_value(cache->c_cursize);
  574. if (maxsize) *maxsize = cache->c_maxsize;
  575. PR_Unlock(cache->c_mutex);
  576. }
  577. void cache_debug_hash(struct cache *cache, char **out)
  578. {
  579. u_long slots;
  580. int total_entries, max_entries_per_slot, *slot_stats;
  581. int i, j;
  582. Hashtable *ht;
  583. char *name;
  584. PR_Lock(cache->c_mutex);
  585. *out = (char *)slapi_ch_malloc(1024);
  586. **out = 0;
  587. for (i = 0; i < 3; i++) {
  588. if (i > 0)
  589. sprintf(*out + strlen(*out), "; ");
  590. switch(i) {
  591. case 0:
  592. ht = cache->c_dntable;
  593. name = "dn";
  594. break;
  595. case 1:
  596. ht = cache->c_idtable;
  597. name = "id";
  598. break;
  599. #ifdef UUIDCACHE_ON
  600. case 2:
  601. default:
  602. ht = cache->c_uuidtable;
  603. name = "uuid";
  604. break;
  605. #endif
  606. }
  607. hash_stats(ht, &slots, &total_entries, &max_entries_per_slot,
  608. &slot_stats);
  609. sprintf(*out + strlen(*out), "%s hash: %lu slots, %d entries (%d max "
  610. "entries per slot) -- ", name, slots, total_entries,
  611. max_entries_per_slot);
  612. for (j = 0; j <= max_entries_per_slot; j++)
  613. sprintf(*out + strlen(*out), "%d[%d] ", j, slot_stats[j]);
  614. slapi_ch_free((void **)&slot_stats);
  615. }
  616. PR_Unlock(cache->c_mutex);
  617. }
  618. /***** general-purpose cache stuff *****/
  619. /* remove an entry from the cache */
  620. /* you must be holding c_mutex !! */
  621. static int cache_remove_int(struct cache *cache, struct backentry *e)
  622. {
  623. int ret = 1; /* assume not in cache */
  624. const char *ndn;
  625. #ifdef UUIDCACHE_ON
  626. const char *uuid;
  627. #endif
  628. LOG("=> cache_remove (%s)\n", backentry_get_ndn(e), 0, 0);
  629. if (e->ep_state & ENTRY_STATE_NOTINCACHE)
  630. {
  631. return ret;
  632. }
  633. /* remove from all hashtables -- this function may be called from places
  634. * where the entry isn't in all the tables yet, so we don't care if any
  635. * of these return errors.
  636. */
  637. ndn = slapi_sdn_get_ndn(backentry_get_sdn(e));
  638. if (remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn)))
  639. {
  640. ret = 0;
  641. }
  642. else
  643. {
  644. LOG("remove %s from dn hash failed\n", ndn, 0, 0);
  645. }
  646. if (remove_hash(cache->c_idtable, &(e->ep_id), sizeof(ID)))
  647. {
  648. ret = 0;
  649. }
  650. else
  651. {
  652. LOG("remove %d from id hash failed\n", e->ep_id, 0, 0);
  653. }
  654. #ifdef UUIDCACHE_ON
  655. uuid = slapi_entry_get_uniqueid(e->ep_entry);
  656. if (remove_hash(cache->c_uuidtable, (void *)uuid, strlen(uuid)))
  657. {
  658. ret = 0;
  659. }
  660. else
  661. {
  662. LOG("remove %d from uuid hash failed\n", uuid, 0, 0);
  663. }
  664. #endif
  665. if (ret == 0) {
  666. /* won't be on the LRU list since it has a refcount on it */
  667. /* adjust cache size */
  668. slapi_counter_subtract(cache->c_cursize, e->size);
  669. cache->c_curentries--;
  670. LOG("<= cache_remove (size %lu): cache now %lu entries, %lu bytes\n",
  671. e->size, cache->c_curentries,
  672. slapi_counter_get_value(cache->c_cursize));
  673. }
  674. /* mark for deletion (will be erased when refcount drops to zero) */
  675. e->ep_state |= ENTRY_STATE_DELETED;
  676. LOG("<= cache_remove: %d\n", ret, 0, 0);
  677. return ret;
  678. }
  679. /* remove an entry from the cache.
  680. * you must have a refcount on e (iow, fetched via cache_find_*). the
  681. * entry is removed from the cache, but NOT freed! you are responsible
  682. * for freeing the entry yourself when done with it, preferrably via
  683. * cache_return (called AFTER cache_remove). some code still does this
  684. * via backentry_free, which is okay, as long as you know you're the only
  685. * thread holding a reference to the deleted entry.
  686. * returns: 0 on success
  687. * 1 if the entry wasn't in the cache at all (not even partially)
  688. */
  689. int cache_remove(struct cache *cache, struct backentry *e)
  690. {
  691. int ret;
  692. PR_Lock(cache->c_mutex);
  693. ASSERT(e->ep_refcnt > 0);
  694. ret = cache_remove_int(cache, e);
  695. PR_Unlock(cache->c_mutex);
  696. return ret;
  697. }
  698. /* replace an entry in the cache.
  699. * returns: 0 on success
  700. * 1 if the entry wasn't in the cache
  701. */
  702. int cache_replace(struct cache *cache, struct backentry *olde,
  703. struct backentry *newe)
  704. {
  705. int found;
  706. const char *oldndn;
  707. const char *newndn;
  708. #ifdef UUIDCACHE_ON
  709. const char *olduuid;
  710. const char *newuuid;
  711. #endif
  712. LOG("=> cache_replace (%s) -> (%s)\n", backentry_get_ndn(olde),
  713. backentry_get_ndn(newe), 0);
  714. /* remove from all hashtables -- this function may be called from places
  715. * where the entry isn't in all the tables yet, so we don't care if any
  716. * of these return errors.
  717. */
  718. oldndn = slapi_sdn_get_ndn(backentry_get_sdn(olde));
  719. #ifdef UUIDCACHE_ON
  720. olduuid = slapi_entry_get_uniqueid(olde->ep_entry);
  721. newuuid = slapi_entry_get_uniqueid(newe->ep_entry);
  722. #endif
  723. newndn = slapi_sdn_get_ndn(backentry_get_sdn(newe));
  724. PR_Lock(cache->c_mutex);
  725. /*
  726. * First, remove the old entry from all the hashtables.
  727. * If the old entry is in cache but not in at least one of the
  728. * cache tables, operation error
  729. */
  730. if ( (olde->ep_state & ENTRY_STATE_NOTINCACHE) == 0 ) {
  731. found = remove_hash(cache->c_dntable, (void *)oldndn, strlen(oldndn));
  732. found &= remove_hash(cache->c_idtable, &(olde->ep_id), sizeof(ID));
  733. #ifdef UUIDCACHE_ON
  734. found &= remove_hash(cache->c_uuidtable, (void *)olduuid, strlen(olduuid));
  735. #endif
  736. if (!found) {
  737. LOG("cache replace: cache index tables out of sync\n", 0, 0, 0);
  738. PR_Unlock(cache->c_mutex);
  739. return 1;
  740. }
  741. }
  742. if (! entry_same_dn(newe, (void *)oldndn) &&
  743. (newe->ep_state & ENTRY_STATE_NOTINCACHE) == 0) {
  744. /* if we're doing a modrdn, the new entry can be in the dn table
  745. * already, so we need to remove that too.
  746. */
  747. if (remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn)))
  748. {
  749. slapi_counter_subtract(cache->c_cursize, newe->size);
  750. cache->c_curentries--;
  751. LOG("cache replace remove entry size %lu\n", newe->size, 0, 0);
  752. }
  753. }
  754. /* now, add the new entry to the hashtables */
  755. /* (probably don't need such extensive error handling, once this has been
  756. * tested enough that we believe it works.)
  757. */
  758. if (!add_hash(cache->c_dntable, (void *)newndn, strlen(newndn), newe, NULL)) {
  759. LOG("cache replace: can't add dn\n", 0, 0, 0);
  760. PR_Unlock(cache->c_mutex);
  761. return 1;
  762. }
  763. if (!add_hash(cache->c_idtable, &(newe->ep_id), sizeof(ID), newe, NULL)) {
  764. LOG("cache replace: can't add id\n", 0, 0, 0);
  765. remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn));
  766. PR_Unlock(cache->c_mutex);
  767. return 1;
  768. }
  769. #ifdef UUIDCACHE_ON
  770. if (newuuid && !add_hash(cache->c_uuidtable, (void *)newuuid, strlen(newuuid),
  771. newe, NULL)) {
  772. LOG("cache replace: can't add uuid\n", 0, 0, 0);
  773. remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn));
  774. remove_hash(cache->c_idtable, &(newe->ep_id), sizeof(ID));
  775. PR_Unlock(cache->c_mutex);
  776. return 1;
  777. }
  778. #endif
  779. /* adjust cache meta info */
  780. newe->ep_refcnt = 1;
  781. newe->size = cache_entry_size(newe);
  782. slapi_counter_add(cache->c_cursize, newe->size - olde->size);
  783. olde->ep_state = ENTRY_STATE_DELETED;
  784. newe->ep_state = 0;
  785. PR_Unlock(cache->c_mutex);
  786. LOG("<= cache_replace OK, cache size now %lu cache count now %ld\n",
  787. slapi_counter_get_value(cache->c_cursize), cache->c_curentries, 0);
  788. return 0;
  789. }
  790. /* call this when you're done with an entry that was fetched via one of
  791. * the cache_find_* calls.
  792. */
  793. void cache_return(struct cache *cache, struct backentry **bep)
  794. {
  795. struct backentry *eflush = NULL;
  796. struct backentry *eflushtemp = NULL;
  797. struct backentry *e;
  798. if (NULL == bep || NULL == *bep)
  799. {
  800. LOG("=> cache_return (null entry)\n", 0, 0, 0);
  801. return;
  802. }
  803. e = *bep;
  804. LOG("=> cache_return (%s) entry count: %d, entry in cache:%ld\n", backentry_get_ndn(e), e->ep_refcnt, cache->c_curentries);
  805. PR_Lock(cache->c_mutex);
  806. if (e->ep_state & ENTRY_STATE_NOTINCACHE)
  807. {
  808. backentry_free(bep);
  809. }
  810. else
  811. {
  812. ASSERT(e->ep_refcnt > 0);
  813. if (! --e->ep_refcnt) {
  814. if (e->ep_state & ENTRY_STATE_DELETED) {
  815. backentry_free(bep);
  816. } else {
  817. lru_add(cache, e);
  818. /* the cache might be overfull... */
  819. if (CACHE_FULL(cache))
  820. eflush = cache_flush(cache);
  821. }
  822. }
  823. }
  824. PR_Unlock(cache->c_mutex);
  825. while (eflush)
  826. {
  827. eflushtemp = eflush->ep_lrunext;
  828. backentry_free(&eflush);
  829. eflush = eflushtemp;
  830. }
  831. }
  832. /* lookup entry by DN (assume cache lock is held) */
  833. struct backentry *cache_find_dn(struct cache *cache, const char *dn, unsigned long ndnlen)
  834. {
  835. struct backentry *e;
  836. LOG("=> cache_find_dn (%s)\n", dn, 0, 0);
  837. /*entry normalized by caller (dn2entry.c) */
  838. PR_Lock(cache->c_mutex);
  839. if (find_hash(cache->c_dntable, (void *)dn, ndnlen, (void **)&e)) {
  840. /* need to check entry state */
  841. if (e->ep_state != 0) {
  842. /* entry is deleted or not fully created yet */
  843. PR_Unlock(cache->c_mutex);
  844. LOG("<= cache_find_dn (NOT FOUND)\n", 0, 0, 0);
  845. return NULL;
  846. }
  847. if (e->ep_refcnt == 0)
  848. lru_delete(cache, e);
  849. e->ep_refcnt++;
  850. PR_Unlock(cache->c_mutex);
  851. slapi_counter_increment(cache->c_hits);
  852. } else {
  853. PR_Unlock(cache->c_mutex);
  854. }
  855. slapi_counter_increment(cache->c_tries);
  856. LOG("<= cache_find_dn (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
  857. return e;
  858. }
  859. /* lookup an entry in the cache by its id# (you must return it later) */
  860. struct backentry *cache_find_id(struct cache *cache, ID id)
  861. {
  862. struct backentry *e;
  863. LOG("=> cache_find_id (%lu)\n", (u_long)id, 0, 0);
  864. PR_Lock(cache->c_mutex);
  865. if (find_hash(cache->c_idtable, &id, sizeof(ID), (void **)&e)) {
  866. /* need to check entry state */
  867. if (e->ep_state != 0) {
  868. /* entry is deleted or not fully created yet */
  869. PR_Unlock(cache->c_mutex);
  870. LOG("<= cache_find_id (NOT FOUND)\n", 0, 0, 0);
  871. return NULL;
  872. }
  873. if (e->ep_refcnt == 0)
  874. lru_delete(cache, e);
  875. e->ep_refcnt++;
  876. PR_Unlock(cache->c_mutex);
  877. slapi_counter_increment(cache->c_hits);
  878. } else {
  879. PR_Unlock(cache->c_mutex);
  880. }
  881. slapi_counter_increment(cache->c_tries);
  882. LOG("<= cache_find_id (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
  883. return e;
  884. }
  885. #ifdef UUIDCACHE_ON
  886. /* lookup an entry in the cache by it's uuid (you must return it later) */
  887. struct backentry *cache_find_uuid(struct cache *cache, const char *uuid)
  888. {
  889. struct backentry *e;
  890. LOG("=> cache_find_uuid (%s)\n", uuid, 0, 0);
  891. PR_Lock(cache->c_mutex);
  892. if (find_hash(cache->c_uuidtable, uuid, strlen(uuid), (void **)&e)) {
  893. /* need to check entry state */
  894. if (e->ep_state != 0) {
  895. /* entry is deleted or not fully created yet */
  896. PR_Unlock(cache->c_mutex);
  897. LOG("<= cache_find_uuid (NOT FOUND)\n", 0, 0, 0);
  898. return NULL;
  899. }
  900. if (e->ep_refcnt == 0)
  901. lru_delete(cache, e);
  902. e->ep_refcnt++;
  903. PR_Unlock(cache->c_mutex);
  904. slapi_counter_increment(cache->c_hits);
  905. } else {
  906. PR_Unlock(cache->c_mutex);
  907. }
  908. slapi_counter_increment(cache->c_tries);
  909. LOG("<= cache_find_uuid (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
  910. return e;
  911. }
  912. #endif
  913. /* add an entry to the cache */
  914. static int cache_add_int(struct cache *cache, struct backentry *e, int state,
  915. struct backentry **alt)
  916. {
  917. struct backentry *eflush = NULL;
  918. struct backentry *eflushtemp = NULL;
  919. const char *ndn = slapi_sdn_get_ndn(backentry_get_sdn(e));
  920. #ifdef UUIDCACHE_ON
  921. const char *uuid = slapi_entry_get_uniqueid(e->ep_entry);
  922. #endif
  923. struct backentry *my_alt;
  924. int already_in = 0;
  925. LOG("=> cache_add_int( \"%s\", %ld )\n", backentry_get_ndn(e),
  926. e->ep_id, 0);
  927. PR_Lock(cache->c_mutex);
  928. if (! add_hash(cache->c_dntable, (void *)ndn, strlen(ndn), e,
  929. (void **)&my_alt)) {
  930. LOG("entry \"%s\" already in dn cache\n", backentry_get_ndn(e), 0, 0);
  931. /* add_hash filled in 'my_alt' if necessary */
  932. if (my_alt == e)
  933. {
  934. if ((e->ep_state & ENTRY_STATE_CREATING) && (state == 0))
  935. {
  936. /* attempting to "add" an entry that's already in the cache,
  937. * and the old entry was a placeholder and the new one isn't?
  938. * sounds like a confirmation of a previous add!
  939. */
  940. LOG("confirming a previous add\n", 0, 0, 0);
  941. already_in = 1;
  942. }
  943. else
  944. {
  945. /* the entry already in the cache and either one of these:
  946. * 1) ep_state: CREATING && state: CREATING
  947. * ==> keep protecting the entry; increase the refcnt
  948. * 2) ep_state: 0 && state: CREATING
  949. * ==> change the state to CREATING (protect it);
  950. * increase the refcnt
  951. * 3) ep_state: 0 && state: 0
  952. * ==> increase the refcnt
  953. */
  954. if (e->ep_refcnt == 0)
  955. lru_delete(cache, e);
  956. e->ep_refcnt++;
  957. e->ep_state = state; /* might be CREATING */
  958. /* returning 1 (entry already existed), but don't set to alt
  959. * to prevent that the caller accidentally thinks the existing
  960. * entry is not the same one the caller has and releases it.
  961. */
  962. PR_Unlock(cache->c_mutex);
  963. return 1;
  964. }
  965. }
  966. else
  967. {
  968. if (my_alt->ep_state & ENTRY_STATE_CREATING)
  969. {
  970. LOG("the entry is reserved\n", 0, 0, 0);
  971. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  972. PR_Unlock(cache->c_mutex);
  973. return -1;
  974. }
  975. else if (state != 0)
  976. {
  977. LOG("the entry already exists. cannot reserve it.\n", 0, 0, 0);
  978. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  979. PR_Unlock(cache->c_mutex);
  980. return -1;
  981. }
  982. else
  983. {
  984. if (alt) {
  985. *alt = my_alt;
  986. if ((*alt)->ep_refcnt == 0)
  987. lru_delete(cache, *alt);
  988. (*alt)->ep_refcnt++;
  989. }
  990. PR_Unlock(cache->c_mutex);
  991. return 1;
  992. }
  993. }
  994. }
  995. /* creating an entry with ENTRY_STATE_CREATING just creates a stub
  996. * which is only stored in the dn table (basically, reserving the dn) --
  997. * doing an add later with state==0 will "confirm" the add
  998. */
  999. if (state == 0) {
  1000. /* neither of these should fail, or something is very wrong. */
  1001. if (! add_hash(cache->c_idtable, &(e->ep_id), sizeof(ID), e, NULL)) {
  1002. LOG("entry %s already in id cache!\n", backentry_get_ndn(e), 0, 0);
  1003. if (already_in) {
  1004. /* there's a bug in the implementatin of 'modify' and 'modrdn'
  1005. * that i'm working around here. basically they do a
  1006. * tentative add of the new (modified) entry, which places
  1007. * the new entry in the cache, indexed only by dn.
  1008. *
  1009. * later they call id2entry_add() on the new entry, which
  1010. * "adds" the new entry to the cache. unfortunately, that
  1011. * add will fail, since the old entry is still in the cache,
  1012. * and both the old and new entries have the same ID and UUID.
  1013. *
  1014. * i catch that here, and just return 0 for success, without
  1015. * messing with either entry. a later cache_replace() will
  1016. * remove the old entry and add the new one, and all will be
  1017. * fine (i think).
  1018. */
  1019. LOG("<= cache_add_int (ignoring)\n", 0, 0, 0);
  1020. PR_Unlock(cache->c_mutex);
  1021. return 0;
  1022. }
  1023. remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn));
  1024. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  1025. PR_Unlock(cache->c_mutex);
  1026. return -1;
  1027. }
  1028. #ifdef UUIDCACHE_ON
  1029. if (uuid) {
  1030. /* (only insert entries with a uuid) */
  1031. if (! add_hash(cache->c_uuidtable, (void *)uuid, strlen(uuid), e,
  1032. NULL)) {
  1033. LOG("entry %s already in uuid cache!\n", backentry_get_ndn(e),
  1034. 0, 0);
  1035. remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn));
  1036. remove_hash(cache->c_idtable, &(e->ep_id), sizeof(ID));
  1037. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  1038. PR_Unlock(cache->c_mutex);
  1039. return -1;
  1040. }
  1041. }
  1042. #endif
  1043. }
  1044. e->ep_state = state;
  1045. if (! already_in) {
  1046. e->ep_refcnt = 1;
  1047. e->size = cache_entry_size(e);
  1048. slapi_counter_add(cache->c_cursize, e->size);
  1049. cache->c_curentries++;
  1050. /* don't add to lru since refcnt = 1 */
  1051. LOG("added entry of size %lu -> total now %lu out of max %lu\n",
  1052. e->size, slapi_counter_get_value(cache->c_cursize), cache->c_maxsize);
  1053. if (cache->c_maxentries >= 0) {
  1054. LOG(" total entries %ld out of %ld\n",
  1055. cache->c_curentries, cache->c_maxentries, 0);
  1056. }
  1057. /* check for full cache, and clear out if necessary */
  1058. if (CACHE_FULL(cache))
  1059. eflush = cache_flush(cache);
  1060. }
  1061. PR_Unlock(cache->c_mutex);
  1062. while (eflush)
  1063. {
  1064. eflushtemp = eflush->ep_lrunext;
  1065. backentry_free(&eflush);
  1066. eflush = eflushtemp;
  1067. }
  1068. LOG("<= cache_add_int OK\n", 0, 0, 0);
  1069. return 0;
  1070. }
  1071. /* create an entry in the cache, and increase its refcount (you must
  1072. * return it when you're done).
  1073. * returns: 0 entry has been created & locked
  1074. * 1 entry already existed
  1075. * -1 something bad happened
  1076. *
  1077. * if 'alt' is not NULL, and the entry is found to already exist in the
  1078. * cache, a refcounted pointer to that entry will be placed in 'alt'.
  1079. * (this means code which suffered from race conditions between multiple
  1080. * entry modifiers can now work.)
  1081. */
  1082. int cache_add(struct cache *cache, struct backentry *e,
  1083. struct backentry **alt)
  1084. {
  1085. return cache_add_int(cache, e, 0, alt);
  1086. }
  1087. /* same as above, but add it tentatively: nobody else can use this entry
  1088. * from the cache until you later call cache_add.
  1089. */
  1090. int cache_add_tentative(struct cache *cache, struct backentry *e,
  1091. struct backentry **alt)
  1092. {
  1093. return cache_add_int(cache, e, ENTRY_STATE_CREATING, alt);
  1094. }
  1095. /* locks an entry so that it can be modified (you should have gotten the
  1096. * entry via cache_find_*).
  1097. * returns 0 on success, 1 if the entry is scheduled for deletion.
  1098. */
  1099. int cache_lock_entry(struct cache *cache, struct backentry *e)
  1100. {
  1101. LOG("=> cache_lock_entry (%s)\n", backentry_get_ndn(e), 0, 0);
  1102. if (! e->ep_mutexp) {
  1103. /* make sure only one thread does this */
  1104. PR_Lock(cache->c_emutexalloc_mutex);
  1105. if (! e->ep_mutexp)
  1106. e->ep_mutexp = PR_NewLock();
  1107. PR_Unlock(cache->c_emutexalloc_mutex);
  1108. }
  1109. /* wait on entry lock (done w/o holding the cache lock) */
  1110. PR_Lock(e->ep_mutexp);
  1111. /* make sure entry hasn't been deleted now */
  1112. PR_Lock(cache->c_mutex);
  1113. if (e->ep_state & (ENTRY_STATE_DELETED|ENTRY_STATE_NOTINCACHE)) {
  1114. PR_Unlock(cache->c_mutex);
  1115. PR_Unlock(e->ep_mutexp);
  1116. LOG("<= cache_lock_entry (DELETED)\n", 0, 0, 0);
  1117. return 1;
  1118. }
  1119. PR_Unlock(cache->c_mutex);
  1120. LOG("<= cache_lock_entry (FOUND)\n", 0, 0, 0);
  1121. return 0;
  1122. }
  1123. /* the opposite of above */
  1124. void cache_unlock_entry(struct cache *cache, struct backentry *e)
  1125. {
  1126. LOG("=> cache_unlock_entry\n", 0, 0, 0);
  1127. PR_Unlock(e->ep_mutexp);
  1128. }