cache.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250
  1. /** BEGIN COPYRIGHT BLOCK
  2. * This Program is free software; you can redistribute it and/or modify it under
  3. * the terms of the GNU General Public License as published by the Free Software
  4. * Foundation; version 2 of the License.
  5. *
  6. * This Program is distributed in the hope that it will be useful, but WITHOUT
  7. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  8. * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  9. *
  10. * You should have received a copy of the GNU General Public License along with
  11. * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
  12. * Place, Suite 330, Boston, MA 02111-1307 USA.
  13. *
  14. * In addition, as a special exception, Red Hat, Inc. gives You the additional
  15. * right to link the code of this Program with code not covered under the GNU
  16. * General Public License ("Non-GPL Code") and to distribute linked combinations
  17. * including the two, subject to the limitations in this paragraph. Non-GPL Code
  18. * permitted under this exception must only link to the code of this Program
  19. * through those well defined interfaces identified in the file named EXCEPTION
  20. * found in the source code files (the "Approved Interfaces"). The files of
  21. * Non-GPL Code may instantiate templates or use macros or inline functions from
  22. * the Approved Interfaces without causing the resulting work to be covered by
  23. * the GNU General Public License. Only Red Hat, Inc. may make changes or
  24. * additions to the list of Approved Interfaces. You must obey the GNU General
  25. * Public License in all respects for all of the Program code and other code used
  26. * in conjunction with the Program except the Non-GPL Code covered by this
  27. * exception. If you modify this file, you may extend this exception to your
  28. * version of the file, but you are not obligated to do so. If you do not wish to
  29. * provide this exception without modification, you must delete this exception
  30. * statement from your version and license this file solely under the GPL without
  31. * exception.
  32. *
  33. *
  34. * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
  35. * Copyright (C) 2005 Red Hat, Inc.
  36. * All rights reserved.
  37. * END COPYRIGHT BLOCK **/
  38. #ifdef HAVE_CONFIG_H
  39. # include <config.h>
  40. #endif
  41. /* cache.c - routines to maintain an in-core cache of entries */
  42. #include "back-ldbm.h"
  43. #ifdef DEBUG
  44. #define LDAP_CACHE_DEBUG
  45. /* #define LDAP_CACHE_DEBUG_LRU */ /* causes slowdown */
  46. #endif
  47. /* cache can't get any smaller than this (in bytes) */
  48. #define MINCACHESIZE (size_t)512000
  49. /* don't let hash be smaller than this # of slots */
  50. #define MINHASHSIZE 1024
  51. /*
  52. * the cache has three entry points (ways to find things):
  53. *
  54. * by entry e.g., if you already have an entry from the cache
  55. * and want to delete it. (really by entry ptr)
  56. * by dn e.g., when looking for the base object of a search
  57. * by id e.g., for search candidates
  58. * by uniqueid
  59. *
  60. * these correspond to three different avl trees that are maintained.
  61. * those avl trees are being destroyed as we speak.
  62. */
  63. #ifdef LDAP_CACHE_DEBUG
  64. #define ASSERT(_x) do { \
  65. if (!(_x)) { \
  66. LDAPDebug(LDAP_DEBUG_ANY, "BAD CACHE ASSERTION at %s/%d: %s\n", \
  67. __FILE__, __LINE__, #_x); \
  68. *(char *)0L = 23; \
  69. } \
  70. } while (0)
  71. #define LOG(_a, _x1, _x2, _x3) LDAPDebug(LDAP_DEBUG_CACHE, _a, _x1, _x2, _x3)
  72. #else
  73. #define ASSERT(_x) ;
  74. #define LOG(_a, _x1, _x2, _x3) ;
  75. #endif
  76. /***** tiny hashtable implementation *****/
  77. #define HASH_VALUE(_key, _keylen) \
  78. ((ht->hashfn == NULL) ? (*(unsigned int *)(_key)) : \
  79. ((*ht->hashfn)(_key, _keylen)))
  80. #define HASH_NEXT(ht, entry) (*(void **)((char *)(entry) + (ht)->offset))
  81. static int entry_same_id(const void *e, const void *k)
  82. {
  83. return (((struct backentry *)e)->ep_id == *(ID *)k);
  84. }
  85. static unsigned long dn_hash(const void *key, size_t keylen)
  86. {
  87. unsigned char *x = (unsigned char *)key;
  88. ssize_t i;
  89. unsigned long val = 0;
  90. for (i = keylen-1; i >= 0; i--)
  91. val += ((val << 5) + (*x++)) & 0xffffffff;
  92. return val;
  93. }
  94. #ifdef UUIDCACHE_ON
  95. static unsigned long uuid_hash(const void *key, size_t keylen)
  96. {
  97. unsigned char *x = (unsigned char *)key;
  98. size_t i;
  99. unsigned long val = 0;
  100. for (i = 0; i < keylen; i++, x++) {
  101. char c = (*x <= '9' ? (*x - '0') : (*x - 'A' + 10));
  102. val = ((val << 4) ^ (val >> 28) ^ c) & 0xffffffff;
  103. }
  104. return val;
  105. }
  106. static int entry_same_uuid(const void *e, const void *k)
  107. {
  108. struct backentry *be = (struct backentry *)e;
  109. const char *uuid = slapi_entry_get_uniqueid(be->ep_entry);
  110. return (strcmp(uuid, (char *)k) == 0);
  111. }
  112. #endif
  113. static int entry_same_dn(const void *e, const void *k)
  114. {
  115. struct backentry *be = (struct backentry *)e;
  116. const char *ndn = slapi_sdn_get_ndn(backentry_get_sdn(be));
  117. return (strcmp(ndn, (char *)k) == 0);
  118. }
  119. Hashtable *new_hash(u_long size, u_long offset, HashFn hfn,
  120. HashTestFn tfn)
  121. {
  122. static u_long prime[] = { 3, 5, 7, 11, 13, 17, 19 };
  123. Hashtable *ht;
  124. int ok = 0, i;
  125. if (size < MINHASHSIZE)
  126. size = MINHASHSIZE;
  127. /* move up to nearest relative prime (it's a statistical thing) */
  128. size |= 1;
  129. do {
  130. ok = 1;
  131. for (i = 0; i < (sizeof(prime) / sizeof(prime[0])); i++)
  132. if (!(size % prime[i]))
  133. ok = 0;
  134. if (!ok)
  135. size += 2;
  136. } while (!ok);
  137. ht = (Hashtable*)slapi_ch_calloc(1, sizeof(Hashtable) + size*sizeof(void *));
  138. if (!ht)
  139. return NULL;
  140. ht->size = size;
  141. ht->offset = offset;
  142. ht->hashfn = hfn;
  143. ht->testfn = tfn;
  144. /* calloc zeroes out the slots automagically */
  145. return ht;
  146. }
  147. /* adds an entry to the hash -- returns 1 on success, 0 if the key was
  148. * already there (filled into 'alt' if 'alt' is not NULL)
  149. */
  150. int add_hash(Hashtable *ht, void *key, size_t keylen, void *entry,
  151. void **alt)
  152. {
  153. u_long val, slot;
  154. void *e;
  155. val = HASH_VALUE(key, keylen);
  156. slot = (val % ht->size);
  157. /* first, check if this key is already in the table */
  158. e = ht->slot[slot];
  159. while (e) {
  160. if ((*ht->testfn)(e, key)) {
  161. /* ack! already in! */
  162. if (alt)
  163. *alt = e;
  164. return 0;
  165. }
  166. e = HASH_NEXT(ht, e);
  167. }
  168. /* ok, it's not already there, so add it */
  169. HASH_NEXT(ht, entry) = ht->slot[slot];
  170. ht->slot[slot] = entry;
  171. return 1;
  172. }
  173. /* returns 1 if the item was found, and puts a ptr to it in 'entry' */
  174. int find_hash(Hashtable *ht, const void *key, size_t keylen, void **entry)
  175. {
  176. u_long val, slot;
  177. void *e;
  178. val = HASH_VALUE(key, keylen);
  179. slot = (val % ht->size);
  180. e = ht->slot[slot];
  181. while (e) {
  182. if ((*ht->testfn)(e, key)) {
  183. *entry = e;
  184. return 1;
  185. }
  186. e = HASH_NEXT(ht, e);
  187. }
  188. /* no go */
  189. *entry = NULL;
  190. return 0;
  191. }
  192. /* returns 1 if the item was found and removed */
  193. int remove_hash(Hashtable *ht, const void *key, size_t keylen)
  194. {
  195. u_long val, slot;
  196. void *e, *laste = NULL;
  197. val = HASH_VALUE(key, keylen);
  198. slot = (val % ht->size);
  199. e = ht->slot[slot];
  200. while (e) {
  201. if ((*ht->testfn)(e, key)) {
  202. /* remove this one */
  203. if (laste)
  204. HASH_NEXT(ht, laste) = HASH_NEXT(ht, e);
  205. else
  206. ht->slot[slot] = HASH_NEXT(ht, e);
  207. HASH_NEXT(ht, e) = NULL;
  208. return 1;
  209. }
  210. laste = e;
  211. e = HASH_NEXT(ht, e);
  212. }
  213. /* nope */
  214. return 0;
  215. }
  216. /* hashtable distribution stats --
  217. * slots: # of slots in the hashtable
  218. * total_entries: # of entries in the hashtable
  219. * max_entries_per_slot: highest number of chained entries in a single slot
  220. * slot_stats: if X is the number of entries in a given slot, then
  221. * slot_stats[X] will hold the number of slots that held X entries
  222. */
  223. static void hash_stats(Hashtable *ht, u_long *slots, int *total_entries,
  224. int *max_entries_per_slot, int **slot_stats)
  225. {
  226. #define MAX_SLOT_STATS 50
  227. u_long i;
  228. int x;
  229. void *e;
  230. *slot_stats = (int *)slapi_ch_malloc(MAX_SLOT_STATS * sizeof(int));
  231. for (i = 0; i < MAX_SLOT_STATS; i++)
  232. (*slot_stats)[i] = 0;
  233. *slots = ht->size;
  234. *max_entries_per_slot = 0;
  235. *total_entries = 0;
  236. for (i = 0; i < ht->size; i++) {
  237. e = ht->slot[i];
  238. x = 0;
  239. while (e) {
  240. x++;
  241. (*total_entries)++;
  242. e = HASH_NEXT(ht, e);
  243. }
  244. if (x < MAX_SLOT_STATS)
  245. (*slot_stats)[x]++;
  246. if (x > *max_entries_per_slot)
  247. *max_entries_per_slot = x;
  248. }
  249. }
  250. /***** add/remove entries to/from the LRU list *****/
  251. #ifdef LDAP_CACHE_DEBUG_LRU
  252. /* for debugging -- painstakingly verify the lru list is ok -- if 'in' is
  253. * true, then entry 'e' should be in the list right now; otherwise, it
  254. * should NOT be in the list.
  255. */
  256. static void lru_verify(struct cache *cache, struct backentry *e, int in)
  257. {
  258. int is_in = 0;
  259. int count = 0;
  260. struct backentry *ep;
  261. ep = cache->c_lruhead;
  262. while (ep) {
  263. count++;
  264. if (ep == e) {
  265. is_in = 1;
  266. }
  267. if (ep->ep_lruprev) {
  268. ASSERT(ep->ep_lruprev->ep_lrunext == ep);
  269. } else {
  270. ASSERT(ep == cache->c_lruhead);
  271. }
  272. if (ep->ep_lrunext) {
  273. ASSERT(ep->ep_lrunext->ep_lruprev == ep);
  274. } else {
  275. ASSERT(ep == cache->c_lrutail);
  276. }
  277. ep = ep->ep_lrunext;
  278. }
  279. ASSERT(is_in == in);
  280. }
  281. #endif
  282. /* assume lock is held */
  283. static void lru_detach(struct cache *cache, struct backentry *e)
  284. {
  285. #ifdef LDAP_CACHE_DEBUG_LRU
  286. lru_verify(cache, e, 1);
  287. #endif
  288. if (e->ep_lruprev)
  289. {
  290. e->ep_lruprev->ep_lrunext = NULL;
  291. cache->c_lrutail = e->ep_lruprev;
  292. }
  293. else
  294. {
  295. cache->c_lruhead = NULL;
  296. cache->c_lrutail = NULL;
  297. }
  298. #ifdef LDAP_CACHE_DEBUG_LRU
  299. lru_verify(cache, e, 0);
  300. #endif
  301. }
  302. /* assume lock is held */
  303. static void lru_delete(struct cache *cache, struct backentry *e)
  304. {
  305. #ifdef LDAP_CACHE_DEBUG_LRU
  306. lru_verify(cache, e, 1);
  307. #endif
  308. if (e->ep_lruprev)
  309. e->ep_lruprev->ep_lrunext = e->ep_lrunext;
  310. else
  311. cache->c_lruhead = e->ep_lrunext;
  312. if (e->ep_lrunext)
  313. e->ep_lrunext->ep_lruprev = e->ep_lruprev;
  314. else
  315. cache->c_lrutail = e->ep_lruprev;
  316. #ifdef LDAP_CACHE_DEBUG_LRU
  317. e->ep_lrunext = e->ep_lruprev = NULL;
  318. lru_verify(cache, e, 0);
  319. #endif
  320. }
  321. /* assume lock is held */
  322. static void lru_add(struct cache *cache, struct backentry *e)
  323. {
  324. #ifdef LDAP_CACHE_DEBUG_LRU
  325. lru_verify(cache, e, 0);
  326. #endif
  327. e->ep_lruprev = NULL;
  328. e->ep_lrunext = cache->c_lruhead;
  329. cache->c_lruhead = e;
  330. if (e->ep_lrunext)
  331. e->ep_lrunext->ep_lruprev = e;
  332. if (! cache->c_lrutail)
  333. cache->c_lrutail = e;
  334. #ifdef LDAP_CACHE_DEBUG_LRU
  335. lru_verify(cache, e, 1);
  336. #endif
  337. }
  338. /***** cache overhead *****/
  339. static int cache_remove_int(struct cache *cache, struct backentry *e);
  340. static void cache_make_hashes(struct cache *cache)
  341. {
  342. u_long hashsize = (cache->c_maxentries > 0) ? cache->c_maxentries :
  343. (cache->c_maxsize/512);
  344. cache->c_dntable = new_hash(hashsize,
  345. HASHLOC(struct backentry, ep_dn_link),
  346. dn_hash, entry_same_dn);
  347. cache->c_idtable = new_hash(hashsize,
  348. HASHLOC(struct backentry, ep_id_link),
  349. NULL, entry_same_id);
  350. #ifdef UUIDCACHE_ON
  351. cache->c_uuidtable = new_hash(hashsize,
  352. HASHLOC(struct backentry, ep_uuid_link),
  353. uuid_hash, entry_same_uuid);
  354. #endif
  355. }
  356. /* initialize the cache */
  357. int cache_init(struct cache *cache, size_t maxsize, long maxentries)
  358. {
  359. LDAPDebug(LDAP_DEBUG_TRACE, "=> cache_init\n", 0, 0, 0);
  360. cache->c_maxsize = maxsize;
  361. cache->c_maxentries = maxentries;
  362. cache->c_cursize = slapi_counter_new();
  363. cache->c_curentries = 0;
  364. if (config_get_slapi_counters()) {
  365. cache->c_hits = slapi_counter_new();
  366. cache->c_tries = slapi_counter_new();
  367. } else {
  368. cache->c_hits = NULL;
  369. cache->c_tries = NULL;
  370. }
  371. cache->c_lruhead = cache->c_lrutail = NULL;
  372. cache_make_hashes(cache);
  373. if (((cache->c_mutex = PR_NewLock()) == NULL) ||
  374. ((cache->c_emutexalloc_mutex = PR_NewLock()) == NULL)) {
  375. LDAPDebug(LDAP_DEBUG_ANY, "ldbm: cache_init: PR_NewLock failed\n",
  376. 0, 0, 0);
  377. return 0;
  378. }
  379. LDAPDebug(LDAP_DEBUG_TRACE, "<= cache_init\n", 0, 0, 0);
  380. return 1;
  381. }
  382. #define CACHE_FULL(cache) \
  383. ((slapi_counter_get_value((cache)->c_cursize) > (cache)->c_maxsize) || \
  384. (((cache)->c_maxentries > 0) && \
  385. ((cache)->c_curentries > (cache)->c_maxentries)))
  386. /* clear out the cache to make room for new entries
  387. * you must be holding cache->c_mutex !!
  388. * return a pointer on the list of entries that get kicked out
  389. * of the cache.
  390. * These entries should be freed outside of the cache->c_mutex
  391. */
  392. static struct backentry * cache_flush(struct cache *cache)
  393. {
  394. struct backentry *e = NULL;
  395. LOG("=> cache_flush\n", 0, 0, 0);
  396. /* all entries on the LRU list are guaranteed to have a refcnt = 0
  397. * (iow, nobody's using them), so just delete from the tail down
  398. * until the cache is a managable size again.
  399. * (cache->c_mutex is locked when we enter this)
  400. */
  401. while ((cache->c_lrutail != NULL) && CACHE_FULL(cache)) {
  402. if (e == NULL)
  403. {
  404. e = cache->c_lrutail;
  405. }
  406. else
  407. {
  408. e = e->ep_lruprev;
  409. }
  410. ASSERT(e->ep_refcnt == 0);
  411. e->ep_refcnt++;
  412. if (cache_remove_int(cache, e) < 0) {
  413. LDAPDebug(LDAP_DEBUG_ANY, "cache flush: unable to delete entry\n",
  414. 0, 0, 0);
  415. break;
  416. }
  417. if(e == cache->c_lruhead) {
  418. break;
  419. }
  420. }
  421. if (e)
  422. lru_detach(cache, e);
  423. LOG("<= cache_flush (down to %lu entries, %lu bytes)\n", cache->c_curentries,
  424. slapi_counter_get_value(cache->c_cursize), 0);
  425. return e;
  426. }
  427. /* remove everything from the cache */
  428. static void cache_clear_int(struct cache *cache)
  429. {
  430. struct backentry *eflush = NULL;
  431. struct backentry *eflushtemp = NULL;
  432. size_t size = cache->c_maxsize;
  433. cache->c_maxsize = 0;
  434. eflush = cache_flush(cache);
  435. while (eflush)
  436. {
  437. eflushtemp = eflush->ep_lrunext;
  438. backentry_free(&eflush);
  439. eflush = eflushtemp;
  440. }
  441. cache->c_maxsize = size;
  442. if (cache->c_curentries > 0) {
  443. LDAPDebug(LDAP_DEBUG_ANY, "somehow, there are still %ld entries "
  444. "in the entry cache. :/\n", cache->c_curentries, 0, 0);
  445. }
  446. }
  447. void cache_clear(struct cache *cache)
  448. {
  449. PR_Lock(cache->c_mutex);
  450. cache_clear_int(cache);
  451. PR_Unlock(cache->c_mutex);
  452. }
  453. static void erase_cache(struct cache *cache)
  454. {
  455. cache_clear_int(cache);
  456. slapi_ch_free((void **)&cache->c_dntable);
  457. slapi_ch_free((void **)&cache->c_idtable);
  458. #ifdef UUIDCACHE_ON
  459. slapi_ch_free((void **)&cache->c_uuidtable);
  460. #endif
  461. }
  462. /* to be used on shutdown or when destroying a backend instance */
  463. void cache_destroy_please(struct cache *cache)
  464. {
  465. erase_cache(cache);
  466. PR_DestroyLock(cache->c_mutex);
  467. PR_DestroyLock(cache->c_emutexalloc_mutex);
  468. }
  469. void cache_set_max_size(struct cache *cache, size_t bytes)
  470. {
  471. struct backentry *eflush = NULL;
  472. struct backentry *eflushtemp = NULL;
  473. if (bytes < MINCACHESIZE) {
  474. bytes = MINCACHESIZE;
  475. LDAPDebug(LDAP_DEBUG_ANY,
  476. "WARNING -- Minimum cache size is %lu -- rounding up\n",
  477. MINCACHESIZE, 0, 0);
  478. }
  479. PR_Lock(cache->c_mutex);
  480. cache->c_maxsize = bytes;
  481. LOG("entry cache size set to %lu\n", bytes, 0, 0);
  482. /* check for full cache, and clear out if necessary */
  483. if (CACHE_FULL(cache))
  484. eflush = cache_flush(cache);
  485. while (eflush)
  486. {
  487. eflushtemp = eflush->ep_lrunext;
  488. backentry_free(&eflush);
  489. eflush = eflushtemp;
  490. }
  491. if (cache->c_curentries < 50) {
  492. /* there's hardly anything left in the cache -- clear it out and
  493. * resize the hashtables for efficiency.
  494. */
  495. erase_cache(cache);
  496. cache_make_hashes(cache);
  497. }
  498. PR_Unlock(cache->c_mutex);
  499. if (! dblayer_is_cachesize_sane(&bytes)) {
  500. LDAPDebug(LDAP_DEBUG_ANY,
  501. "WARNING -- Possible CONFIGURATION ERROR -- cachesize "
  502. "(%lu) may be configured to use more than the available "
  503. "physical memory.\n", bytes, 0, 0);
  504. }
  505. }
  506. void cache_set_max_entries(struct cache *cache, long entries)
  507. {
  508. struct backentry *eflush = NULL;
  509. struct backentry *eflushtemp = NULL;
  510. /* this is a dumb remnant of pre-5.0 servers, where the cache size
  511. * was given in # entries instead of memory footprint. hopefully,
  512. * we can eventually drop this.
  513. */
  514. PR_Lock(cache->c_mutex);
  515. cache->c_maxentries = entries;
  516. if (entries >= 0) {
  517. LOG("entry cache entry-limit set to %lu\n", entries, 0, 0);
  518. } else {
  519. LOG("entry cache entry-limit turned off\n", 0, 0, 0);
  520. }
  521. /* check for full cache, and clear out if necessary */
  522. if (CACHE_FULL(cache))
  523. eflush = cache_flush(cache);
  524. PR_Unlock(cache->c_mutex);
  525. while (eflush)
  526. {
  527. eflushtemp = eflush->ep_lrunext;
  528. backentry_free(&eflush);
  529. eflush = eflushtemp;
  530. }
  531. }
  532. size_t cache_get_max_size(struct cache *cache)
  533. {
  534. size_t n;
  535. PR_Lock(cache->c_mutex);
  536. n = cache->c_maxsize;
  537. PR_Unlock(cache->c_mutex);
  538. return n;
  539. }
  540. long cache_get_max_entries(struct cache *cache)
  541. {
  542. long n;
  543. PR_Lock(cache->c_mutex);
  544. n = cache->c_maxentries;
  545. PR_Unlock(cache->c_mutex);
  546. return n;
  547. }
  548. /* determine the general size of a cache entry */
  549. static size_t cache_entry_size(struct backentry *e)
  550. {
  551. size_t size = 0;
  552. if (e->ep_entry)
  553. size += slapi_entry_size(e->ep_entry);
  554. if (e->ep_vlventry)
  555. size += slapi_entry_size(e->ep_vlventry);
  556. /* cannot size ep_mutexp (PRLock) */
  557. size += sizeof(struct backentry);
  558. return size;
  559. }
  560. /* the monitor code wants to be able to safely fetch the cache stats --
  561. * if it ever wants to pull out more info, we might want to change all
  562. * these u_long *'s to a struct
  563. */
  564. void cache_get_stats(struct cache *cache, PRUint64 *hits, PRUint64 *tries,
  565. long *nentries, long *maxentries,
  566. size_t *size, size_t *maxsize)
  567. {
  568. PR_Lock(cache->c_mutex);
  569. if (hits) *hits = slapi_counter_get_value(cache->c_hits);
  570. if (tries) *tries = slapi_counter_get_value(cache->c_tries);
  571. if (nentries) *nentries = cache->c_curentries;
  572. if (maxentries) *maxentries = cache->c_maxentries;
  573. if (size) *size = slapi_counter_get_value(cache->c_cursize);
  574. if (maxsize) *maxsize = cache->c_maxsize;
  575. PR_Unlock(cache->c_mutex);
  576. }
  577. void cache_debug_hash(struct cache *cache, char **out)
  578. {
  579. u_long slots;
  580. int total_entries, max_entries_per_slot, *slot_stats;
  581. int i, j;
  582. Hashtable *ht;
  583. char *name;
  584. PR_Lock(cache->c_mutex);
  585. *out = (char *)slapi_ch_malloc(1024);
  586. **out = 0;
  587. for (i = 0; i < 3; i++) {
  588. if (i > 0)
  589. sprintf(*out + strlen(*out), "; ");
  590. switch(i) {
  591. case 0:
  592. ht = cache->c_dntable;
  593. name = "dn";
  594. break;
  595. case 1:
  596. ht = cache->c_idtable;
  597. name = "id";
  598. break;
  599. #ifdef UUIDCACHE_ON
  600. case 2:
  601. default:
  602. ht = cache->c_uuidtable;
  603. name = "uuid";
  604. break;
  605. #endif
  606. }
  607. hash_stats(ht, &slots, &total_entries, &max_entries_per_slot,
  608. &slot_stats);
  609. sprintf(*out + strlen(*out), "%s hash: %lu slots, %d entries (%d max "
  610. "entries per slot) -- ", name, slots, total_entries,
  611. max_entries_per_slot);
  612. for (j = 0; j <= max_entries_per_slot; j++)
  613. sprintf(*out + strlen(*out), "%d[%d] ", j, slot_stats[j]);
  614. slapi_ch_free((void **)&slot_stats);
  615. }
  616. PR_Unlock(cache->c_mutex);
  617. }
  618. /***** general-purpose cache stuff *****/
  619. /* remove an entry from the cache */
  620. /* you must be holding c_mutex !! */
  621. static int cache_remove_int(struct cache *cache, struct backentry *e)
  622. {
  623. int ret = 1; /* assume not in cache */
  624. const char *ndn;
  625. #ifdef UUIDCACHE_ON
  626. const char *uuid;
  627. #endif
  628. LOG("=> cache_remove (%s)\n", backentry_get_ndn(e), 0, 0);
  629. if (e->ep_state & ENTRY_STATE_NOTINCACHE)
  630. {
  631. return ret;
  632. }
  633. /* remove from all hashtables -- this function may be called from places
  634. * where the entry isn't in all the tables yet, so we don't care if any
  635. * of these return errors.
  636. */
  637. ndn = slapi_sdn_get_ndn(backentry_get_sdn(e));
  638. if (remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn)))
  639. {
  640. ret = 0;
  641. }
  642. else
  643. {
  644. LOG("remove %s from dn hash failed\n", ndn, 0, 0);
  645. }
  646. if (remove_hash(cache->c_idtable, &(e->ep_id), sizeof(ID)))
  647. {
  648. ret = 0;
  649. }
  650. else
  651. {
  652. LOG("remove %d from id hash failed\n", e->ep_id, 0, 0);
  653. }
  654. #ifdef UUIDCACHE_ON
  655. uuid = slapi_entry_get_uniqueid(e->ep_entry);
  656. if (remove_hash(cache->c_uuidtable, (void *)uuid, strlen(uuid)))
  657. {
  658. ret = 0;
  659. }
  660. else
  661. {
  662. LOG("remove %d from uuid hash failed\n", uuid, 0, 0);
  663. }
  664. #endif
  665. if (ret == 0) {
  666. /* won't be on the LRU list since it has a refcount on it */
  667. /* adjust cache size */
  668. slapi_counter_subtract(cache->c_cursize, e->size);
  669. cache->c_curentries--;
  670. LOG("<= cache_remove (size %lu): cache now %lu entries, %lu bytes\n",
  671. e->size, cache->c_curentries,
  672. slapi_counter_get_value(cache->c_cursize));
  673. }
  674. /* mark for deletion (will be erased when refcount drops to zero) */
  675. e->ep_state |= ENTRY_STATE_DELETED;
  676. LOG("<= cache_remove: %d\n", ret, 0, 0);
  677. return ret;
  678. }
  679. /* remove an entry from the cache.
  680. * you must have a refcount on e (iow, fetched via cache_find_*). the
  681. * entry is removed from the cache, but NOT freed! you are responsible
  682. * for freeing the entry yourself when done with it, preferrably via
  683. * cache_return (called AFTER cache_remove). some code still does this
  684. * via backentry_free, which is okay, as long as you know you're the only
  685. * thread holding a reference to the deleted entry.
  686. * returns: 0 on success
  687. * 1 if the entry wasn't in the cache at all (not even partially)
  688. */
  689. int cache_remove(struct cache *cache, struct backentry *e)
  690. {
  691. int ret;
  692. PR_Lock(cache->c_mutex);
  693. ASSERT(e->ep_refcnt > 0);
  694. ret = cache_remove_int(cache, e);
  695. PR_Unlock(cache->c_mutex);
  696. return ret;
  697. }
  698. /* replace an entry in the cache.
  699. * returns: 0 on success
  700. * 1 if the entry wasn't in the cache
  701. */
  702. int cache_replace(struct cache *cache, struct backentry *olde,
  703. struct backentry *newe)
  704. {
  705. int found;
  706. const char *oldndn;
  707. const char *newndn;
  708. #ifdef UUIDCACHE_ON
  709. const char *olduuid;
  710. const char *newuuid;
  711. #endif
  712. LOG("=> cache_replace (%s) -> (%s)\n", backentry_get_ndn(olde),
  713. backentry_get_ndn(newe), 0);
  714. /* remove from all hashtables -- this function may be called from places
  715. * where the entry isn't in all the tables yet, so we don't care if any
  716. * of these return errors.
  717. */
  718. oldndn = slapi_sdn_get_ndn(backentry_get_sdn(olde));
  719. #ifdef UUIDCACHE_ON
  720. olduuid = slapi_entry_get_uniqueid(olde->ep_entry);
  721. newuuid = slapi_entry_get_uniqueid(newe->ep_entry);
  722. #endif
  723. newndn = slapi_sdn_get_ndn(backentry_get_sdn(newe));
  724. PR_Lock(cache->c_mutex);
  725. /*
  726. * First, remove the old entry from all the hashtables.
  727. * If the old entry is in cache but not in at least one of the
  728. * cache tables, operation error
  729. */
  730. if ( (olde->ep_state & ENTRY_STATE_NOTINCACHE) == 0 ) {
  731. found = remove_hash(cache->c_dntable, (void *)oldndn, strlen(oldndn));
  732. found &= remove_hash(cache->c_idtable, &(olde->ep_id), sizeof(ID));
  733. #ifdef UUIDCACHE_ON
  734. found &= remove_hash(cache->c_uuidtable, (void *)olduuid, strlen(olduuid));
  735. #endif
  736. if (!found) {
  737. LOG("cache replace: cache index tables out of sync\n", 0, 0, 0);
  738. PR_Unlock(cache->c_mutex);
  739. return 1;
  740. }
  741. }
  742. if (! entry_same_dn(newe, (void *)oldndn) &&
  743. (newe->ep_state & ENTRY_STATE_NOTINCACHE) == 0) {
  744. /* if we're doing a modrdn, the new entry can be in the dn table
  745. * already, so we need to remove that too.
  746. */
  747. if (remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn)))
  748. {
  749. slapi_counter_subtract(cache->c_cursize, newe->size);
  750. cache->c_curentries--;
  751. LOG("cache replace remove entry size %lu\n", newe->size, 0, 0);
  752. }
  753. }
  754. /* now, add the new entry to the hashtables */
  755. /* (probably don't need such extensive error handling, once this has been
  756. * tested enough that we believe it works.)
  757. */
  758. if (!add_hash(cache->c_dntable, (void *)newndn, strlen(newndn), newe, NULL)) {
  759. LOG("cache replace: can't add dn\n", 0, 0, 0);
  760. PR_Unlock(cache->c_mutex);
  761. return 1;
  762. }
  763. if (!add_hash(cache->c_idtable, &(newe->ep_id), sizeof(ID), newe, NULL)) {
  764. LOG("cache replace: can't add id\n", 0, 0, 0);
  765. remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn));
  766. PR_Unlock(cache->c_mutex);
  767. return 1;
  768. }
  769. #ifdef UUIDCACHE_ON
  770. if (newuuid && !add_hash(cache->c_uuidtable, (void *)newuuid, strlen(newuuid),
  771. newe, NULL)) {
  772. LOG("cache replace: can't add uuid\n", 0, 0, 0);
  773. remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn));
  774. remove_hash(cache->c_idtable, &(newe->ep_id), sizeof(ID));
  775. PR_Unlock(cache->c_mutex);
  776. return 1;
  777. }
  778. #endif
  779. /* adjust cache meta info */
  780. newe->ep_refcnt = 1;
  781. newe->size = cache_entry_size(newe);
  782. if (newe->size > olde->size) {
  783. slapi_counter_add(cache->c_cursize, newe->size - olde->size);
  784. } else if (newe->size < olde->size) {
  785. slapi_counter_subtract(cache->c_cursize, olde->size - newe->size);
  786. }
  787. olde->ep_state = ENTRY_STATE_DELETED;
  788. newe->ep_state = 0;
  789. PR_Unlock(cache->c_mutex);
  790. LOG("<= cache_replace OK, cache size now %lu cache count now %ld\n",
  791. slapi_counter_get_value(cache->c_cursize), cache->c_curentries, 0);
  792. return 0;
  793. }
  794. /* call this when you're done with an entry that was fetched via one of
  795. * the cache_find_* calls.
  796. */
  797. void cache_return(struct cache *cache, struct backentry **bep)
  798. {
  799. struct backentry *eflush = NULL;
  800. struct backentry *eflushtemp = NULL;
  801. struct backentry *e;
  802. if (NULL == bep || NULL == *bep)
  803. {
  804. LOG("=> cache_return (null entry)\n", 0, 0, 0);
  805. return;
  806. }
  807. e = *bep;
  808. LOG("=> cache_return (%s) entry count: %d, entry in cache:%ld\n", backentry_get_ndn(e), e->ep_refcnt, cache->c_curentries);
  809. PR_Lock(cache->c_mutex);
  810. if (e->ep_state & ENTRY_STATE_NOTINCACHE)
  811. {
  812. backentry_free(bep);
  813. }
  814. else
  815. {
  816. ASSERT(e->ep_refcnt > 0);
  817. if (! --e->ep_refcnt) {
  818. if (e->ep_state & ENTRY_STATE_DELETED) {
  819. backentry_free(bep);
  820. } else {
  821. lru_add(cache, e);
  822. /* the cache might be overfull... */
  823. if (CACHE_FULL(cache))
  824. eflush = cache_flush(cache);
  825. }
  826. }
  827. }
  828. PR_Unlock(cache->c_mutex);
  829. while (eflush)
  830. {
  831. eflushtemp = eflush->ep_lrunext;
  832. backentry_free(&eflush);
  833. eflush = eflushtemp;
  834. }
  835. }
  836. /* lookup entry by DN (assume cache lock is held) */
  837. struct backentry *cache_find_dn(struct cache *cache, const char *dn, unsigned long ndnlen)
  838. {
  839. struct backentry *e;
  840. LOG("=> cache_find_dn (%s)\n", dn, 0, 0);
  841. /*entry normalized by caller (dn2entry.c) */
  842. PR_Lock(cache->c_mutex);
  843. if (find_hash(cache->c_dntable, (void *)dn, ndnlen, (void **)&e)) {
  844. /* need to check entry state */
  845. if (e->ep_state != 0) {
  846. /* entry is deleted or not fully created yet */
  847. PR_Unlock(cache->c_mutex);
  848. LOG("<= cache_find_dn (NOT FOUND)\n", 0, 0, 0);
  849. return NULL;
  850. }
  851. if (e->ep_refcnt == 0)
  852. lru_delete(cache, e);
  853. e->ep_refcnt++;
  854. PR_Unlock(cache->c_mutex);
  855. slapi_counter_increment(cache->c_hits);
  856. } else {
  857. PR_Unlock(cache->c_mutex);
  858. }
  859. slapi_counter_increment(cache->c_tries);
  860. LOG("<= cache_find_dn (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
  861. return e;
  862. }
  863. /* lookup an entry in the cache by its id# (you must return it later) */
  864. struct backentry *cache_find_id(struct cache *cache, ID id)
  865. {
  866. struct backentry *e;
  867. LOG("=> cache_find_id (%lu)\n", (u_long)id, 0, 0);
  868. PR_Lock(cache->c_mutex);
  869. if (find_hash(cache->c_idtable, &id, sizeof(ID), (void **)&e)) {
  870. /* need to check entry state */
  871. if (e->ep_state != 0) {
  872. /* entry is deleted or not fully created yet */
  873. PR_Unlock(cache->c_mutex);
  874. LOG("<= cache_find_id (NOT FOUND)\n", 0, 0, 0);
  875. return NULL;
  876. }
  877. if (e->ep_refcnt == 0)
  878. lru_delete(cache, e);
  879. e->ep_refcnt++;
  880. PR_Unlock(cache->c_mutex);
  881. slapi_counter_increment(cache->c_hits);
  882. } else {
  883. PR_Unlock(cache->c_mutex);
  884. }
  885. slapi_counter_increment(cache->c_tries);
  886. LOG("<= cache_find_id (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
  887. return e;
  888. }
  889. #ifdef UUIDCACHE_ON
  890. /* lookup an entry in the cache by it's uuid (you must return it later) */
  891. struct backentry *cache_find_uuid(struct cache *cache, const char *uuid)
  892. {
  893. struct backentry *e;
  894. LOG("=> cache_find_uuid (%s)\n", uuid, 0, 0);
  895. PR_Lock(cache->c_mutex);
  896. if (find_hash(cache->c_uuidtable, uuid, strlen(uuid), (void **)&e)) {
  897. /* need to check entry state */
  898. if (e->ep_state != 0) {
  899. /* entry is deleted or not fully created yet */
  900. PR_Unlock(cache->c_mutex);
  901. LOG("<= cache_find_uuid (NOT FOUND)\n", 0, 0, 0);
  902. return NULL;
  903. }
  904. if (e->ep_refcnt == 0)
  905. lru_delete(cache, e);
  906. e->ep_refcnt++;
  907. PR_Unlock(cache->c_mutex);
  908. slapi_counter_increment(cache->c_hits);
  909. } else {
  910. PR_Unlock(cache->c_mutex);
  911. }
  912. slapi_counter_increment(cache->c_tries);
  913. LOG("<= cache_find_uuid (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
  914. return e;
  915. }
  916. #endif
  917. /* add an entry to the cache */
  918. static int cache_add_int(struct cache *cache, struct backentry *e, int state,
  919. struct backentry **alt)
  920. {
  921. struct backentry *eflush = NULL;
  922. struct backentry *eflushtemp = NULL;
  923. const char *ndn = slapi_sdn_get_ndn(backentry_get_sdn(e));
  924. #ifdef UUIDCACHE_ON
  925. const char *uuid = slapi_entry_get_uniqueid(e->ep_entry);
  926. #endif
  927. struct backentry *my_alt;
  928. int already_in = 0;
  929. LOG("=> cache_add_int( \"%s\", %ld )\n", backentry_get_ndn(e),
  930. e->ep_id, 0);
  931. PR_Lock(cache->c_mutex);
  932. if (! add_hash(cache->c_dntable, (void *)ndn, strlen(ndn), e,
  933. (void **)&my_alt)) {
  934. LOG("entry \"%s\" already in dn cache\n", backentry_get_ndn(e), 0, 0);
  935. /* add_hash filled in 'my_alt' if necessary */
  936. if (my_alt == e)
  937. {
  938. if ((e->ep_state & ENTRY_STATE_CREATING) && (state == 0))
  939. {
  940. /* attempting to "add" an entry that's already in the cache,
  941. * and the old entry was a placeholder and the new one isn't?
  942. * sounds like a confirmation of a previous add!
  943. */
  944. LOG("confirming a previous add\n", 0, 0, 0);
  945. already_in = 1;
  946. }
  947. else
  948. {
  949. /* the entry already in the cache and either one of these:
  950. * 1) ep_state: CREATING && state: CREATING
  951. * ==> keep protecting the entry; increase the refcnt
  952. * 2) ep_state: 0 && state: CREATING
  953. * ==> change the state to CREATING (protect it);
  954. * increase the refcnt
  955. * 3) ep_state: 0 && state: 0
  956. * ==> increase the refcnt
  957. */
  958. if (e->ep_refcnt == 0)
  959. lru_delete(cache, e);
  960. e->ep_refcnt++;
  961. e->ep_state = state; /* might be CREATING */
  962. /* returning 1 (entry already existed), but don't set to alt
  963. * to prevent that the caller accidentally thinks the existing
  964. * entry is not the same one the caller has and releases it.
  965. */
  966. PR_Unlock(cache->c_mutex);
  967. return 1;
  968. }
  969. }
  970. else
  971. {
  972. if (my_alt->ep_state & ENTRY_STATE_CREATING)
  973. {
  974. LOG("the entry is reserved\n", 0, 0, 0);
  975. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  976. PR_Unlock(cache->c_mutex);
  977. return -1;
  978. }
  979. else if (state != 0)
  980. {
  981. LOG("the entry already exists. cannot reserve it.\n", 0, 0, 0);
  982. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  983. PR_Unlock(cache->c_mutex);
  984. return -1;
  985. }
  986. else
  987. {
  988. if (alt) {
  989. *alt = my_alt;
  990. if ((*alt)->ep_refcnt == 0)
  991. lru_delete(cache, *alt);
  992. (*alt)->ep_refcnt++;
  993. }
  994. PR_Unlock(cache->c_mutex);
  995. return 1;
  996. }
  997. }
  998. }
  999. /* creating an entry with ENTRY_STATE_CREATING just creates a stub
  1000. * which is only stored in the dn table (basically, reserving the dn) --
  1001. * doing an add later with state==0 will "confirm" the add
  1002. */
  1003. if (state == 0) {
  1004. /* neither of these should fail, or something is very wrong. */
  1005. if (! add_hash(cache->c_idtable, &(e->ep_id), sizeof(ID), e, NULL)) {
  1006. LOG("entry %s already in id cache!\n", backentry_get_ndn(e), 0, 0);
  1007. if (already_in) {
  1008. /* there's a bug in the implementatin of 'modify' and 'modrdn'
  1009. * that i'm working around here. basically they do a
  1010. * tentative add of the new (modified) entry, which places
  1011. * the new entry in the cache, indexed only by dn.
  1012. *
  1013. * later they call id2entry_add() on the new entry, which
  1014. * "adds" the new entry to the cache. unfortunately, that
  1015. * add will fail, since the old entry is still in the cache,
  1016. * and both the old and new entries have the same ID and UUID.
  1017. *
  1018. * i catch that here, and just return 0 for success, without
  1019. * messing with either entry. a later cache_replace() will
  1020. * remove the old entry and add the new one, and all will be
  1021. * fine (i think).
  1022. */
  1023. LOG("<= cache_add_int (ignoring)\n", 0, 0, 0);
  1024. PR_Unlock(cache->c_mutex);
  1025. return 0;
  1026. }
  1027. remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn));
  1028. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  1029. PR_Unlock(cache->c_mutex);
  1030. return -1;
  1031. }
  1032. #ifdef UUIDCACHE_ON
  1033. if (uuid) {
  1034. /* (only insert entries with a uuid) */
  1035. if (! add_hash(cache->c_uuidtable, (void *)uuid, strlen(uuid), e,
  1036. NULL)) {
  1037. LOG("entry %s already in uuid cache!\n", backentry_get_ndn(e),
  1038. 0, 0);
  1039. remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn));
  1040. remove_hash(cache->c_idtable, &(e->ep_id), sizeof(ID));
  1041. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  1042. PR_Unlock(cache->c_mutex);
  1043. return -1;
  1044. }
  1045. }
  1046. #endif
  1047. }
  1048. e->ep_state = state;
  1049. if (! already_in) {
  1050. e->ep_refcnt = 1;
  1051. e->size = cache_entry_size(e);
  1052. slapi_counter_add(cache->c_cursize, e->size);
  1053. cache->c_curentries++;
  1054. /* don't add to lru since refcnt = 1 */
  1055. LOG("added entry of size %lu -> total now %lu out of max %lu\n",
  1056. e->size, slapi_counter_get_value(cache->c_cursize), cache->c_maxsize);
  1057. if (cache->c_maxentries >= 0) {
  1058. LOG(" total entries %ld out of %ld\n",
  1059. cache->c_curentries, cache->c_maxentries, 0);
  1060. }
  1061. /* check for full cache, and clear out if necessary */
  1062. if (CACHE_FULL(cache))
  1063. eflush = cache_flush(cache);
  1064. }
  1065. PR_Unlock(cache->c_mutex);
  1066. while (eflush)
  1067. {
  1068. eflushtemp = eflush->ep_lrunext;
  1069. backentry_free(&eflush);
  1070. eflush = eflushtemp;
  1071. }
  1072. LOG("<= cache_add_int OK\n", 0, 0, 0);
  1073. return 0;
  1074. }
  1075. /* create an entry in the cache, and increase its refcount (you must
  1076. * return it when you're done).
  1077. * returns: 0 entry has been created & locked
  1078. * 1 entry already existed
  1079. * -1 something bad happened
  1080. *
  1081. * if 'alt' is not NULL, and the entry is found to already exist in the
  1082. * cache, a refcounted pointer to that entry will be placed in 'alt'.
  1083. * (this means code which suffered from race conditions between multiple
  1084. * entry modifiers can now work.)
  1085. */
  1086. int cache_add(struct cache *cache, struct backentry *e,
  1087. struct backentry **alt)
  1088. {
  1089. return cache_add_int(cache, e, 0, alt);
  1090. }
  1091. /* same as above, but add it tentatively: nobody else can use this entry
  1092. * from the cache until you later call cache_add.
  1093. */
  1094. int cache_add_tentative(struct cache *cache, struct backentry *e,
  1095. struct backentry **alt)
  1096. {
  1097. return cache_add_int(cache, e, ENTRY_STATE_CREATING, alt);
  1098. }
  1099. /* locks an entry so that it can be modified (you should have gotten the
  1100. * entry via cache_find_*).
  1101. * returns 0 on success, 1 if the entry is scheduled for deletion.
  1102. */
  1103. int cache_lock_entry(struct cache *cache, struct backentry *e)
  1104. {
  1105. LOG("=> cache_lock_entry (%s)\n", backentry_get_ndn(e), 0, 0);
  1106. if (! e->ep_mutexp) {
  1107. /* make sure only one thread does this */
  1108. PR_Lock(cache->c_emutexalloc_mutex);
  1109. if (! e->ep_mutexp)
  1110. e->ep_mutexp = PR_NewLock();
  1111. PR_Unlock(cache->c_emutexalloc_mutex);
  1112. }
  1113. /* wait on entry lock (done w/o holding the cache lock) */
  1114. PR_Lock(e->ep_mutexp);
  1115. /* make sure entry hasn't been deleted now */
  1116. PR_Lock(cache->c_mutex);
  1117. if (e->ep_state & (ENTRY_STATE_DELETED|ENTRY_STATE_NOTINCACHE)) {
  1118. PR_Unlock(cache->c_mutex);
  1119. PR_Unlock(e->ep_mutexp);
  1120. LOG("<= cache_lock_entry (DELETED)\n", 0, 0, 0);
  1121. return 1;
  1122. }
  1123. PR_Unlock(cache->c_mutex);
  1124. LOG("<= cache_lock_entry (FOUND)\n", 0, 0, 0);
  1125. return 0;
  1126. }
  1127. /* the opposite of above */
  1128. void cache_unlock_entry(struct cache *cache, struct backentry *e)
  1129. {
  1130. LOG("=> cache_unlock_entry\n", 0, 0, 0);
  1131. PR_Unlock(e->ep_mutexp);
  1132. }