cache.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993
  1. /** BEGIN COPYRIGHT BLOCK
  2. * This Program is free software; you can redistribute it and/or modify it under
  3. * the terms of the GNU General Public License as published by the Free Software
  4. * Foundation; version 2 of the License.
  5. *
  6. * This Program is distributed in the hope that it will be useful, but WITHOUT
  7. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  8. * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  9. *
  10. * You should have received a copy of the GNU General Public License along with
  11. * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
  12. * Place, Suite 330, Boston, MA 02111-1307 USA.
  13. *
  14. * In addition, as a special exception, Red Hat, Inc. gives You the additional
  15. * right to link the code of this Program with code not covered under the GNU
  16. * General Public License ("Non-GPL Code") and to distribute linked combinations
  17. * including the two, subject to the limitations in this paragraph. Non-GPL Code
  18. * permitted under this exception must only link to the code of this Program
  19. * through those well defined interfaces identified in the file named EXCEPTION
  20. * found in the source code files (the "Approved Interfaces"). The files of
  21. * Non-GPL Code may instantiate templates or use macros or inline functions from
  22. * the Approved Interfaces without causing the resulting work to be covered by
  23. * the GNU General Public License. Only Red Hat, Inc. may make changes or
  24. * additions to the list of Approved Interfaces. You must obey the GNU General
  25. * Public License in all respects for all of the Program code and other code used
  26. * in conjunction with the Program except the Non-GPL Code covered by this
  27. * exception. If you modify this file, you may extend this exception to your
  28. * version of the file, but you are not obligated to do so. If you do not wish to
  29. * provide this exception without modification, you must delete this exception
  30. * statement from your version and license this file solely under the GPL without
  31. * exception.
  32. *
  33. *
  34. * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
  35. * Copyright (C) 2005 Red Hat, Inc.
  36. * All rights reserved.
  37. * END COPYRIGHT BLOCK **/
  38. #ifdef HAVE_CONFIG_H
  39. # include <config.h>
  40. #endif
  41. /* cache.c - routines to maintain an in-core cache of entries */
  42. #include "back-ldbm.h"
  43. #ifdef DEBUG
  44. #define LDAP_CACHE_DEBUG
  45. /* #define LDAP_CACHE_DEBUG_LRU * causes slowdown */
  46. #endif
  47. /* cache can't get any smaller than this (in bytes) */
  48. #define MINCACHESIZE (size_t)512000
  49. /* don't let hash be smaller than this # of slots */
  50. #define MINHASHSIZE 1024
  51. /*
  52. * the cache has three entry points (ways to find things):
  53. *
  54. * by entry e.g., if you already have an entry from the cache
  55. * and want to delete it. (really by entry ptr)
  56. * by dn e.g., when looking for the base object of a search
  57. * by id e.g., for search candidates
  58. * by uniqueid
  59. *
  60. * these correspond to three different avl trees that are maintained.
  61. * those avl trees are being destroyed as we speak.
  62. */
  63. #ifdef LDAP_CACHE_DEBUG
  64. #define ASSERT(_x) do { \
  65. if (!(_x)) { \
  66. LDAPDebug(LDAP_DEBUG_ANY, "BAD CACHE ASSERTION at %s/%d: %s\n", \
  67. __FILE__, __LINE__, #_x); \
  68. *(char *)0L = 23; \
  69. } \
  70. } while (0)
  71. #define LOG(_a, _x1, _x2, _x3) LDAPDebug(LDAP_DEBUG_CACHE, _a, _x1, _x2, _x3)
  72. #else
  73. #define ASSERT(_x) ;
  74. #define LOG(_a, _x1, _x2, _x3) ;
  75. #endif
  76. #define LRU_DETACH(cache, e) lru_detach((cache), (void *)(e))
  77. #define CACHE_LRU_HEAD(cache, type) ((type)((cache)->c_lruhead))
  78. #define CACHE_LRU_TAIL(cache, type) ((type)((cache)->c_lrutail))
  79. #define BACK_LRU_NEXT(entry, type) ((type)((entry)->ep_lrunext))
  80. #define BACK_LRU_PREV(entry, type) ((type)((entry)->ep_lruprev))
  81. /* static functions */
  82. static void entrycache_clear_int(struct cache *cache);
  83. static void entrycache_set_max_size(struct cache *cache, size_t bytes);
  84. static int entrycache_remove_int(struct cache *cache, struct backentry *e);
  85. static void entrycache_return(struct cache *cache, struct backentry **bep);
  86. static int entrycache_replace(struct cache *cache, struct backentry *olde, struct backentry *newe);
  87. static int entrycache_add_int(struct cache *cache, struct backentry *e, int state, struct backentry **alt);
  88. static struct backentry *entrycache_flush(struct cache *cache);
  89. #ifdef LDAP_CACHE_DEBUG_LRU
  90. static void entry_lru_verify(struct cache *cache, struct backentry *e, int in);
  91. #endif
  92. static int dn_same_id(const void *bdn, const void *k);
  93. static void dncache_clear_int(struct cache *cache);
  94. static void dncache_set_max_size(struct cache *cache, size_t bytes);
  95. static int dncache_remove_int(struct cache *cache, struct backdn *dn);
  96. static void dncache_return(struct cache *cache, struct backdn **bdn);
  97. static int dncache_replace(struct cache *cache, struct backdn *olddn, struct backdn *newdn);
  98. static int dncache_add_int(struct cache *cache, struct backdn *bdn, int state, struct backdn **alt);
  99. static struct backdn *dncache_flush(struct cache *cache);
  100. #ifdef LDAP_CACHE_DEBUG_LRU
  101. static void dn_lru_verify(struct cache *cache, struct backdn *dn, int in);
  102. #endif
  103. /***** tiny hashtable implementation *****/
  104. #define HASH_VALUE(_key, _keylen) \
  105. ((ht->hashfn == NULL) ? (*(unsigned int *)(_key)) : \
  106. ((*ht->hashfn)(_key, _keylen)))
  107. #define HASH_NEXT(ht, entry) (*(void **)((char *)(entry) + (ht)->offset))
  108. static int entry_same_id(const void *e, const void *k)
  109. {
  110. return (((struct backentry *)e)->ep_id == *(ID *)k);
  111. }
  112. static unsigned long dn_hash(const void *key, size_t keylen)
  113. {
  114. unsigned char *x = (unsigned char *)key;
  115. ssize_t i;
  116. unsigned long val = 0;
  117. for (i = keylen-1; i >= 0; i--)
  118. val += ((val << 5) + (*x++)) & 0xffffffff;
  119. return val;
  120. }
  121. #ifdef UUIDCACHE_ON
  122. static unsigned long uuid_hash(const void *key, size_t keylen)
  123. {
  124. unsigned char *x = (unsigned char *)key;
  125. size_t i;
  126. unsigned long val = 0;
  127. for (i = 0; i < keylen; i++, x++) {
  128. char c = (*x <= '9' ? (*x - '0') : (*x - 'A' + 10));
  129. val = ((val << 4) ^ (val >> 28) ^ c) & 0xffffffff;
  130. }
  131. return val;
  132. }
  133. static int entry_same_uuid(const void *e, const void *k)
  134. {
  135. struct backentry *be = (struct backentry *)e;
  136. const char *uuid = slapi_entry_get_uniqueid(be->ep_entry);
  137. return (strcmp(uuid, (char *)k) == 0);
  138. }
  139. #endif
  140. static int entry_same_dn(const void *e, const void *k)
  141. {
  142. struct backentry *be = (struct backentry *)e;
  143. const char *ndn = slapi_sdn_get_ndn(backentry_get_sdn(be));
  144. return (strcmp(ndn, (char *)k) == 0);
  145. }
  146. Hashtable *new_hash(u_long size, u_long offset, HashFn hfn,
  147. HashTestFn tfn)
  148. {
  149. static u_long prime[] = { 3, 5, 7, 11, 13, 17, 19 };
  150. Hashtable *ht;
  151. int ok = 0, i;
  152. if (size < MINHASHSIZE)
  153. size = MINHASHSIZE;
  154. /* move up to nearest relative prime (it's a statistical thing) */
  155. size |= 1;
  156. do {
  157. ok = 1;
  158. for (i = 0; i < (sizeof(prime) / sizeof(prime[0])); i++)
  159. if (!(size % prime[i]))
  160. ok = 0;
  161. if (!ok)
  162. size += 2;
  163. } while (!ok);
  164. ht = (Hashtable*)slapi_ch_calloc(1, sizeof(Hashtable) + size*sizeof(void *));
  165. if (!ht)
  166. return NULL;
  167. ht->size = size;
  168. ht->offset = offset;
  169. ht->hashfn = hfn;
  170. ht->testfn = tfn;
  171. /* calloc zeroes out the slots automagically */
  172. return ht;
  173. }
  174. /* adds an entry to the hash -- returns 1 on success, 0 if the key was
  175. * already there (filled into 'alt' if 'alt' is not NULL)
  176. */
  177. int add_hash(Hashtable *ht, void *key, size_t keylen, void *entry,
  178. void **alt)
  179. {
  180. u_long val, slot;
  181. void *e;
  182. val = HASH_VALUE(key, keylen);
  183. slot = (val % ht->size);
  184. /* first, check if this key is already in the table */
  185. e = ht->slot[slot];
  186. while (e) {
  187. if ((*ht->testfn)(e, key)) {
  188. /* ack! already in! */
  189. if (alt)
  190. *alt = e;
  191. return 0;
  192. }
  193. e = HASH_NEXT(ht, e);
  194. }
  195. /* ok, it's not already there, so add it */
  196. HASH_NEXT(ht, entry) = ht->slot[slot];
  197. ht->slot[slot] = entry;
  198. return 1;
  199. }
  200. /* returns 1 if the item was found, and puts a ptr to it in 'entry' */
  201. int find_hash(Hashtable *ht, const void *key, size_t keylen, void **entry)
  202. {
  203. u_long val, slot;
  204. void *e;
  205. val = HASH_VALUE(key, keylen);
  206. slot = (val % ht->size);
  207. e = ht->slot[slot];
  208. while (e) {
  209. if ((*ht->testfn)(e, key)) {
  210. *entry = e;
  211. return 1;
  212. }
  213. e = HASH_NEXT(ht, e);
  214. }
  215. /* no go */
  216. *entry = NULL;
  217. return 0;
  218. }
  219. /* returns 1 if the item was found and removed */
  220. int remove_hash(Hashtable *ht, const void *key, size_t keylen)
  221. {
  222. u_long val, slot;
  223. void *e, *laste = NULL;
  224. val = HASH_VALUE(key, keylen);
  225. slot = (val % ht->size);
  226. e = ht->slot[slot];
  227. while (e) {
  228. if ((*ht->testfn)(e, key)) {
  229. /* remove this one */
  230. if (laste)
  231. HASH_NEXT(ht, laste) = HASH_NEXT(ht, e);
  232. else
  233. ht->slot[slot] = HASH_NEXT(ht, e);
  234. HASH_NEXT(ht, e) = NULL;
  235. return 1;
  236. }
  237. laste = e;
  238. e = HASH_NEXT(ht, e);
  239. }
  240. /* nope */
  241. return 0;
  242. }
  243. #ifdef LDAP_CACHE_DEBUG
  244. void
  245. dump_hash(Hashtable *ht)
  246. {
  247. u_long i;
  248. void *e;
  249. char ep_id[16];
  250. char ep_ids[80];
  251. char *p;
  252. int ids_size = 80;
  253. p = ep_ids;
  254. for (i = 0; i < ht->size; i++) {
  255. int len;
  256. e = ht->slot[i];
  257. if (NULL == e) {
  258. continue;
  259. }
  260. do {
  261. PR_snprintf(ep_id, 16, "%u-%u", ((struct backcommon *)e)->ep_id, ((struct backcommon *)e)->ep_refcnt);
  262. len = strlen(ep_id);
  263. if (ids_size < len + 1) {
  264. LDAPDebug1Arg(LDAP_DEBUG_ANY, "%s\n", ep_ids);
  265. p = ep_ids; ids_size = 80;
  266. }
  267. PR_snprintf(p, ids_size, "%s:", ep_id);
  268. p += len + 1; ids_size -= len + 1;
  269. } while ((e = HASH_NEXT(ht, e)));
  270. }
  271. if (p != ep_ids) {
  272. LDAPDebug1Arg(LDAP_DEBUG_ANY, "%s\n", ep_ids);
  273. }
  274. }
  275. #endif
  276. /* hashtable distribution stats --
  277. * slots: # of slots in the hashtable
  278. * total_entries: # of entries in the hashtable
  279. * max_entries_per_slot: highest number of chained entries in a single slot
  280. * slot_stats: if X is the number of entries in a given slot, then
  281. * slot_stats[X] will hold the number of slots that held X entries
  282. */
  283. static void hash_stats(Hashtable *ht, u_long *slots, int *total_entries,
  284. int *max_entries_per_slot, int **slot_stats)
  285. {
  286. #define MAX_SLOT_STATS 50
  287. u_long i;
  288. int x;
  289. void *e;
  290. *slot_stats = (int *)slapi_ch_malloc(MAX_SLOT_STATS * sizeof(int));
  291. for (i = 0; i < MAX_SLOT_STATS; i++)
  292. (*slot_stats)[i] = 0;
  293. *slots = ht->size;
  294. *max_entries_per_slot = 0;
  295. *total_entries = 0;
  296. for (i = 0; i < ht->size; i++) {
  297. e = ht->slot[i];
  298. x = 0;
  299. while (e) {
  300. x++;
  301. (*total_entries)++;
  302. e = HASH_NEXT(ht, e);
  303. }
  304. if (x < MAX_SLOT_STATS)
  305. (*slot_stats)[x]++;
  306. if (x > *max_entries_per_slot)
  307. *max_entries_per_slot = x;
  308. }
  309. }
  310. /***** add/remove entries to/from the LRU list *****/
  311. #ifdef LDAP_CACHE_DEBUG_LRU
  312. static void
  313. lru_verify(struct cache *cache, void *ptr, int in)
  314. {
  315. struct backcommon *e;
  316. if (NULL == ptr)
  317. {
  318. LOG("=> lru_verify\n<= lru_verify (null entry)\n", 0, 0, 0);
  319. return;
  320. }
  321. e = (struct backcommon *)ptr;
  322. if (CACHE_TYPE_ENTRY == e->ep_type) {
  323. entry_lru_verify(cache, (struct backentry *)e, in);
  324. } else {
  325. dn_lru_verify(cache, (struct backdn *)e, in);
  326. }
  327. }
  328. /* for debugging -- painstakingly verify the lru list is ok -- if 'in' is
  329. * true, then entry 'e' should be in the list right now; otherwise, it
  330. * should NOT be in the list.
  331. */
  332. static void
  333. entry_lru_verify(struct cache *cache, struct backentry *e, int in)
  334. {
  335. int is_in = 0;
  336. int count = 0;
  337. struct backentry *ep;
  338. ep = CACHE_LRU_HEAD(cache, struct backentry *);
  339. while (ep) {
  340. count++;
  341. if (ep == e) {
  342. is_in = 1;
  343. }
  344. if (ep->ep_lruprev) {
  345. ASSERT(BACK_LRU_NEXT(BACK_LRU_PREV(ep, struct backentry *), struct backentry *)== ep);
  346. } else {
  347. ASSERT(ep == CACHE_LRU_HEAD(cache, struct backentry *));
  348. }
  349. if (ep->ep_lrunext) {
  350. ASSERT(BACK_LRU_PREV(BACK_LRU_NEXT(ep, struct backentry *), struct backentry *) == ep);
  351. } else {
  352. ASSERT(ep == CACHE_LRU_TAIL(cache, struct backentry *));
  353. }
  354. ep = BACK_LRU_NEXT(ep, struct backentry *);
  355. }
  356. ASSERT(is_in == in);
  357. }
  358. #endif
  359. /* assume lock is held */
  360. static void lru_detach(struct cache *cache, void *ptr)
  361. {
  362. struct backcommon *e;
  363. if (NULL == ptr)
  364. {
  365. LOG("=> lru_detach\n<= lru_detach (null entry)\n", 0, 0, 0);
  366. return;
  367. }
  368. e = (struct backcommon *)ptr;
  369. #ifdef LDAP_CACHE_DEBUG_LRU
  370. lru_verify(cache, e, 1);
  371. #endif
  372. if (e->ep_lruprev)
  373. {
  374. e->ep_lruprev->ep_lrunext = NULL;
  375. cache->c_lrutail = e->ep_lruprev;
  376. }
  377. else
  378. {
  379. cache->c_lruhead = NULL;
  380. cache->c_lrutail = NULL;
  381. }
  382. #ifdef LDAP_CACHE_DEBUG_LRU
  383. lru_verify(cache, e, 0);
  384. #endif
  385. }
  386. /* assume lock is held */
  387. static void lru_delete(struct cache *cache, void *ptr)
  388. {
  389. struct backcommon *e;
  390. if (NULL == ptr)
  391. {
  392. LOG("=> lru_delete\n<= lru_delete (null entry)\n", 0, 0, 0);
  393. return;
  394. }
  395. e = (struct backcommon *)ptr;
  396. #ifdef LDAP_CACHE_DEBUG_LRU
  397. lru_verify(cache, e, 1);
  398. #endif
  399. if (e->ep_lruprev)
  400. e->ep_lruprev->ep_lrunext = e->ep_lrunext;
  401. else
  402. cache->c_lruhead = e->ep_lrunext;
  403. if (e->ep_lrunext)
  404. e->ep_lrunext->ep_lruprev = e->ep_lruprev;
  405. else
  406. cache->c_lrutail = e->ep_lruprev;
  407. #ifdef LDAP_CACHE_DEBUG_LRU
  408. e->ep_lrunext = e->ep_lruprev = NULL;
  409. lru_verify(cache, e, 0);
  410. #endif
  411. }
  412. /* assume lock is held */
  413. static void lru_add(struct cache *cache, void *ptr)
  414. {
  415. struct backcommon *e;
  416. if (NULL == ptr)
  417. {
  418. LOG("=> lru_add\n<= lru_add (null entry)\n", 0, 0, 0);
  419. return;
  420. }
  421. e = (struct backcommon *)ptr;
  422. #ifdef LDAP_CACHE_DEBUG_LRU
  423. lru_verify(cache, e, 0);
  424. #endif
  425. e->ep_lruprev = NULL;
  426. e->ep_lrunext = cache->c_lruhead;
  427. cache->c_lruhead = e;
  428. if (e->ep_lrunext)
  429. e->ep_lrunext->ep_lruprev = e;
  430. if (! cache->c_lrutail)
  431. cache->c_lrutail = e;
  432. #ifdef LDAP_CACHE_DEBUG_LRU
  433. lru_verify(cache, e, 1);
  434. #endif
  435. }
  436. /***** cache overhead *****/
  437. static void cache_make_hashes(struct cache *cache, int type)
  438. {
  439. u_long hashsize = (cache->c_maxentries > 0) ? cache->c_maxentries :
  440. (cache->c_maxsize/512);
  441. if (CACHE_TYPE_ENTRY == type) {
  442. cache->c_dntable = new_hash(hashsize,
  443. HASHLOC(struct backentry, ep_dn_link),
  444. dn_hash, entry_same_dn);
  445. cache->c_idtable = new_hash(hashsize,
  446. HASHLOC(struct backentry, ep_id_link),
  447. NULL, entry_same_id);
  448. #ifdef UUIDCACHE_ON
  449. cache->c_uuidtable = new_hash(hashsize,
  450. HASHLOC(struct backentry, ep_uuid_link),
  451. uuid_hash, entry_same_uuid);
  452. #endif
  453. } else if (CACHE_TYPE_DN == type) {
  454. cache->c_dntable = NULL;
  455. cache->c_idtable = new_hash(hashsize,
  456. HASHLOC(struct backdn, dn_id_link),
  457. NULL, dn_same_id);
  458. #ifdef UUIDCACHE_ON
  459. cache->c_uuidtable = NULL;
  460. #endif
  461. }
  462. }
  463. /* initialize the cache */
  464. int cache_init(struct cache *cache, size_t maxsize, long maxentries, int type)
  465. {
  466. LDAPDebug(LDAP_DEBUG_TRACE, "=> cache_init\n", 0, 0, 0);
  467. cache->c_maxsize = maxsize;
  468. cache->c_maxentries = maxentries;
  469. cache->c_curentries = 0;
  470. if (config_get_slapi_counters()) {
  471. if (cache->c_cursize) {
  472. slapi_counter_destroy(&cache->c_cursize);
  473. }
  474. cache->c_cursize = slapi_counter_new();
  475. if (cache->c_hits) {
  476. slapi_counter_destroy(&cache->c_hits);
  477. }
  478. cache->c_hits = slapi_counter_new();
  479. if (cache->c_tries) {
  480. slapi_counter_destroy(&cache->c_tries);
  481. }
  482. cache->c_tries = slapi_counter_new();
  483. } else {
  484. LDAPDebug0Args(LDAP_DEBUG_ANY,
  485. "cache_init: slapi counter is not available.\n");
  486. cache->c_cursize = NULL;
  487. cache->c_hits = NULL;
  488. cache->c_tries = NULL;
  489. }
  490. cache->c_lruhead = cache->c_lrutail = NULL;
  491. cache_make_hashes(cache, type);
  492. if (((cache->c_mutex = PR_NewLock()) == NULL) ||
  493. ((cache->c_emutexalloc_mutex = PR_NewLock()) == NULL)) {
  494. LDAPDebug(LDAP_DEBUG_ANY, "ldbm: cache_init: PR_NewLock failed\n",
  495. 0, 0, 0);
  496. return 0;
  497. }
  498. LDAPDebug(LDAP_DEBUG_TRACE, "<= cache_init\n", 0, 0, 0);
  499. return 1;
  500. }
  501. #define CACHE_FULL(cache) \
  502. ((slapi_counter_get_value((cache)->c_cursize) > (cache)->c_maxsize) || \
  503. (((cache)->c_maxentries > 0) && \
  504. ((cache)->c_curentries > (cache)->c_maxentries)))
  505. /* clear out the cache to make room for new entries
  506. * you must be holding cache->c_mutex !!
  507. * return a pointer on the list of entries that get kicked out
  508. * of the cache.
  509. * These entries should be freed outside of the cache->c_mutex
  510. */
  511. static struct backentry *
  512. entrycache_flush(struct cache *cache)
  513. {
  514. struct backentry *e = NULL;
  515. LOG("=> entrycache_flush\n", 0, 0, 0);
  516. /* all entries on the LRU list are guaranteed to have a refcnt = 0
  517. * (iow, nobody's using them), so just delete from the tail down
  518. * until the cache is a managable size again.
  519. * (cache->c_mutex is locked when we enter this)
  520. */
  521. while ((cache->c_lrutail != NULL) && CACHE_FULL(cache)) {
  522. if (e == NULL)
  523. {
  524. e = CACHE_LRU_TAIL(cache, struct backentry *);
  525. }
  526. else
  527. {
  528. e = BACK_LRU_PREV(e, struct backentry *);
  529. }
  530. ASSERT(e->ep_refcnt == 0);
  531. e->ep_refcnt++;
  532. if (entrycache_remove_int(cache, e) < 0) {
  533. LDAPDebug(LDAP_DEBUG_ANY,
  534. "entry cache flush: unable to delete entry\n", 0, 0, 0);
  535. break;
  536. }
  537. if(e == CACHE_LRU_HEAD(cache, struct backentry *)) {
  538. break;
  539. }
  540. }
  541. if (e)
  542. LRU_DETACH(cache, e);
  543. LOG("<= entrycache_flush (down to %lu entries, %lu bytes)\n",
  544. cache->c_curentries, slapi_counter_get_value(cache->c_cursize), 0);
  545. return e;
  546. }
  547. /* remove everything from the cache */
  548. static void entrycache_clear_int(struct cache *cache)
  549. {
  550. struct backentry *eflush = NULL;
  551. struct backentry *eflushtemp = NULL;
  552. size_t size = cache->c_maxsize;
  553. cache->c_maxsize = 0;
  554. eflush = entrycache_flush(cache);
  555. while (eflush)
  556. {
  557. eflushtemp = BACK_LRU_NEXT(eflush, struct backentry *);
  558. backentry_free(&eflush);
  559. eflush = eflushtemp;
  560. }
  561. cache->c_maxsize = size;
  562. if (cache->c_curentries > 0) {
  563. LDAPDebug1Arg(LDAP_DEBUG_ANY,
  564. "entrycache_clear_int: there are still %ld entries "
  565. "in the entry cache.\n", cache->c_curentries);
  566. #ifdef LDAP_CACHE_DEBUG
  567. LDAPDebug0Args(LDAP_DEBUG_ANY, "ID(s) in entry cache:\n");
  568. dump_hash(cache->c_idtable);
  569. #endif
  570. }
  571. }
  572. void cache_clear(struct cache *cache, int type)
  573. {
  574. PR_Lock(cache->c_mutex);
  575. if (CACHE_TYPE_ENTRY == type) {
  576. entrycache_clear_int(cache);
  577. } else if (CACHE_TYPE_DN == type) {
  578. dncache_clear_int(cache);
  579. }
  580. PR_Unlock(cache->c_mutex);
  581. }
  582. static void erase_cache(struct cache *cache, int type)
  583. {
  584. if (CACHE_TYPE_ENTRY == type) {
  585. entrycache_clear_int(cache);
  586. } else if (CACHE_TYPE_DN == type) {
  587. dncache_clear_int(cache);
  588. }
  589. slapi_ch_free((void **)&cache->c_dntable);
  590. slapi_ch_free((void **)&cache->c_idtable);
  591. #ifdef UUIDCACHE_ON
  592. slapi_ch_free((void **)&cache->c_uuidtable);
  593. #endif
  594. }
  595. /* to be used on shutdown or when destroying a backend instance */
  596. void cache_destroy_please(struct cache *cache, int type)
  597. {
  598. erase_cache(cache, type);
  599. slapi_counter_destroy(&cache->c_cursize);
  600. slapi_counter_destroy(&cache->c_hits);
  601. slapi_counter_destroy(&cache->c_tries);
  602. PR_DestroyLock(cache->c_mutex);
  603. PR_DestroyLock(cache->c_emutexalloc_mutex);
  604. }
  605. void cache_set_max_size(struct cache *cache, size_t bytes, int type)
  606. {
  607. if (CACHE_TYPE_ENTRY == type) {
  608. entrycache_set_max_size(cache, bytes);
  609. } else if (CACHE_TYPE_DN == type) {
  610. dncache_set_max_size(cache, bytes);
  611. }
  612. }
  613. static void entrycache_set_max_size(struct cache *cache, size_t bytes)
  614. {
  615. struct backentry *eflush = NULL;
  616. struct backentry *eflushtemp = NULL;
  617. if (bytes < MINCACHESIZE) {
  618. bytes = MINCACHESIZE;
  619. LDAPDebug(LDAP_DEBUG_ANY,
  620. "WARNING -- Minimum cache size is %lu -- rounding up\n",
  621. MINCACHESIZE, 0, 0);
  622. }
  623. PR_Lock(cache->c_mutex);
  624. cache->c_maxsize = bytes;
  625. LOG("entry cache size set to %lu\n", bytes, 0, 0);
  626. /* check for full cache, and clear out if necessary */
  627. if (CACHE_FULL(cache))
  628. eflush = entrycache_flush(cache);
  629. while (eflush)
  630. {
  631. eflushtemp = BACK_LRU_NEXT(eflush, struct backentry *);
  632. backentry_free(&eflush);
  633. eflush = eflushtemp;
  634. }
  635. if (cache->c_curentries < 50) {
  636. /* there's hardly anything left in the cache -- clear it out and
  637. * resize the hashtables for efficiency.
  638. */
  639. erase_cache(cache, CACHE_TYPE_ENTRY);
  640. cache_make_hashes(cache, CACHE_TYPE_ENTRY);
  641. }
  642. PR_Unlock(cache->c_mutex);
  643. if (! dblayer_is_cachesize_sane(&bytes)) {
  644. LDAPDebug(LDAP_DEBUG_ANY,
  645. "WARNING -- Possible CONFIGURATION ERROR -- cachesize "
  646. "(%lu) may be configured to use more than the available "
  647. "physical memory.\n", bytes, 0, 0);
  648. }
  649. }
  650. void cache_set_max_entries(struct cache *cache, long entries)
  651. {
  652. struct backentry *eflush = NULL;
  653. struct backentry *eflushtemp = NULL;
  654. /* this is a dumb remnant of pre-5.0 servers, where the cache size
  655. * was given in # entries instead of memory footprint. hopefully,
  656. * we can eventually drop this.
  657. */
  658. PR_Lock(cache->c_mutex);
  659. cache->c_maxentries = entries;
  660. if (entries >= 0) {
  661. LOG("entry cache entry-limit set to %lu\n", entries, 0, 0);
  662. } else {
  663. LOG("entry cache entry-limit turned off\n", 0, 0, 0);
  664. }
  665. /* check for full cache, and clear out if necessary */
  666. if (CACHE_FULL(cache))
  667. eflush = entrycache_flush(cache);
  668. PR_Unlock(cache->c_mutex);
  669. while (eflush)
  670. {
  671. eflushtemp = BACK_LRU_NEXT(eflush, struct backentry *);
  672. backentry_free(&eflush);
  673. eflush = eflushtemp;
  674. }
  675. }
  676. size_t cache_get_max_size(struct cache *cache)
  677. {
  678. size_t n = 0;
  679. PR_Lock(cache->c_mutex);
  680. n = cache->c_maxsize;
  681. PR_Unlock(cache->c_mutex);
  682. return n;
  683. }
  684. long cache_get_max_entries(struct cache *cache)
  685. {
  686. long n;
  687. PR_Lock(cache->c_mutex);
  688. n = cache->c_maxentries;
  689. PR_Unlock(cache->c_mutex);
  690. return n;
  691. }
  692. /* determine the general size of a cache entry */
  693. static size_t cache_entry_size(struct backentry *e)
  694. {
  695. size_t size = 0;
  696. if (e->ep_entry)
  697. size += slapi_entry_size(e->ep_entry);
  698. if (e->ep_vlventry)
  699. size += slapi_entry_size(e->ep_vlventry);
  700. /* cannot size ep_mutexp (PRLock) */
  701. size += sizeof(struct backentry);
  702. return size;
  703. }
  704. /* the monitor code wants to be able to safely fetch the cache stats --
  705. * if it ever wants to pull out more info, we might want to change all
  706. * these u_long *'s to a struct
  707. */
  708. void cache_get_stats(struct cache *cache, PRUint64 *hits, PRUint64 *tries,
  709. long *nentries, long *maxentries,
  710. size_t *size, size_t *maxsize)
  711. {
  712. PR_Lock(cache->c_mutex);
  713. if (hits) *hits = slapi_counter_get_value(cache->c_hits);
  714. if (tries) *tries = slapi_counter_get_value(cache->c_tries);
  715. if (nentries) *nentries = cache->c_curentries;
  716. if (maxentries) *maxentries = cache->c_maxentries;
  717. if (size) *size = slapi_counter_get_value(cache->c_cursize);
  718. if (maxsize) *maxsize = cache->c_maxsize;
  719. PR_Unlock(cache->c_mutex);
  720. }
  721. void cache_debug_hash(struct cache *cache, char **out)
  722. {
  723. u_long slots;
  724. int total_entries, max_entries_per_slot, *slot_stats;
  725. int i, j;
  726. Hashtable *ht = NULL;
  727. char *name = "unknown";
  728. PR_Lock(cache->c_mutex);
  729. *out = (char *)slapi_ch_malloc(1024);
  730. **out = 0;
  731. for (i = 0; i < 3; i++) {
  732. if (i > 0)
  733. sprintf(*out + strlen(*out), "; ");
  734. switch(i) {
  735. case 0:
  736. ht = cache->c_dntable;
  737. name = "dn";
  738. break;
  739. case 1:
  740. ht = cache->c_idtable;
  741. name = "id";
  742. break;
  743. #ifdef UUIDCACHE_ON
  744. case 2:
  745. default:
  746. ht = cache->c_uuidtable;
  747. name = "uuid";
  748. break;
  749. #endif
  750. }
  751. if (NULL == ht) {
  752. continue;
  753. }
  754. hash_stats(ht, &slots, &total_entries, &max_entries_per_slot,
  755. &slot_stats);
  756. sprintf(*out + strlen(*out), "%s hash: %lu slots, %d items (%d max "
  757. "items per slot) -- ", name, slots, total_entries,
  758. max_entries_per_slot);
  759. for (j = 0; j <= max_entries_per_slot; j++)
  760. sprintf(*out + strlen(*out), "%d[%d] ", j, slot_stats[j]);
  761. slapi_ch_free((void **)&slot_stats);
  762. }
  763. PR_Unlock(cache->c_mutex);
  764. }
  765. /***** general-purpose cache stuff *****/
  766. /* remove an entry from the cache */
  767. /* you must be holding c_mutex !! */
  768. static int
  769. entrycache_remove_int(struct cache *cache, struct backentry *e)
  770. {
  771. int ret = 1; /* assume not in cache */
  772. const char *ndn;
  773. #ifdef UUIDCACHE_ON
  774. const char *uuid;
  775. #endif
  776. LOG("=> entrycache_remove_int (%s) (%u) (%u)\n", backentry_get_ndn(e), e->ep_id, e->ep_refcnt);
  777. if (e->ep_state & ENTRY_STATE_NOTINCACHE)
  778. {
  779. return ret;
  780. }
  781. /* remove from all hashtables -- this function may be called from places
  782. * where the entry isn't in all the tables yet, so we don't care if any
  783. * of these return errors.
  784. */
  785. ndn = slapi_sdn_get_ndn(backentry_get_sdn(e));
  786. if (remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn)))
  787. {
  788. ret = 0;
  789. }
  790. else
  791. {
  792. LOG("remove %s from dn hash failed\n", ndn, 0, 0);
  793. }
  794. /* if entry was added tentatively, it will be in the dntable
  795. but not in the idtable - we cannot just remove it from
  796. the idtable - in the case of modrdn, this will remove
  797. the _real_ entry from the idtable, leading to a cache
  798. imbalance
  799. */
  800. if (!(e->ep_state & ENTRY_STATE_CREATING))
  801. {
  802. if (remove_hash(cache->c_idtable, &(e->ep_id), sizeof(ID)))
  803. {
  804. ret = 0;
  805. }
  806. else
  807. {
  808. LOG("remove %d from id hash failed\n", e->ep_id, 0, 0);
  809. }
  810. }
  811. #ifdef UUIDCACHE_ON
  812. uuid = slapi_entry_get_uniqueid(e->ep_entry);
  813. if (remove_hash(cache->c_uuidtable, (void *)uuid, strlen(uuid)))
  814. {
  815. ret = 0;
  816. }
  817. else
  818. {
  819. LOG("remove %d from uuid hash failed\n", uuid, 0, 0);
  820. }
  821. #endif
  822. if (ret == 0) {
  823. /* won't be on the LRU list since it has a refcount on it */
  824. /* adjust cache size */
  825. slapi_counter_subtract(cache->c_cursize, e->ep_size);
  826. cache->c_curentries--;
  827. LOG("<= entrycache_remove_int (size %lu): cache now %lu entries, "
  828. "%lu bytes\n", e->ep_size, cache->c_curentries,
  829. slapi_counter_get_value(cache->c_cursize));
  830. }
  831. /* mark for deletion (will be erased when refcount drops to zero) */
  832. e->ep_state |= ENTRY_STATE_DELETED;
  833. #if 0
  834. if (slapi_is_loglevel_set(SLAPI_LOG_CACHE)) {
  835. dump_hash(cache->c_idtable);
  836. }
  837. #endif
  838. LOG("<= entrycache_remove_int: %d\n", ret, 0, 0);
  839. return ret;
  840. }
  841. /* remove an entry/a dn from the cache.
  842. * you must have a refcount on e (iow, fetched via cache_find_*). the
  843. * entry is removed from the cache, but NOT freed! you are responsible
  844. * for freeing the entry yourself when done with it, preferrably via
  845. * cache_return (called AFTER cache_remove). some code still does this
  846. * via backentry_free, which is okay, as long as you know you're the only
  847. * thread holding a reference to the deleted entry.
  848. * returns: 0 on success
  849. * 1 if the entry wasn't in the cache at all (not even partially)
  850. */
  851. int cache_remove(struct cache *cache, void *ptr)
  852. {
  853. int ret = 0;
  854. struct backcommon *e;
  855. if (NULL == ptr)
  856. {
  857. LOG("=> lru_remove\n<= lru_remove (null entry)\n", 0, 0, 0);
  858. return ret;
  859. }
  860. e = (struct backcommon *)ptr;
  861. PR_Lock(cache->c_mutex);
  862. if (CACHE_TYPE_ENTRY == e->ep_type) {
  863. ASSERT(e->ep_refcnt > 0);
  864. ret = entrycache_remove_int(cache, (struct backentry *)e);
  865. } else if (CACHE_TYPE_DN == e->ep_type) {
  866. ret = dncache_remove_int(cache, (struct backdn *)e);
  867. }
  868. PR_Unlock(cache->c_mutex);
  869. return ret;
  870. }
  871. /* replace an entry in the cache.
  872. * returns: 0 on success
  873. * 1 if the entry wasn't in the cache
  874. */
  875. int cache_replace(struct cache *cache, void *oldptr, void *newptr)
  876. {
  877. struct backcommon *olde;
  878. if (NULL == oldptr || NULL == newptr)
  879. {
  880. LOG("=> lru_replace\n<= lru_replace (null entry)\n", 0, 0, 0);
  881. return 0;
  882. }
  883. olde = (struct backcommon *)oldptr;
  884. if (CACHE_TYPE_ENTRY == olde->ep_type) {
  885. return entrycache_replace(cache, (struct backentry *)oldptr,
  886. (struct backentry *)newptr);
  887. } else if (CACHE_TYPE_DN == olde->ep_type) {
  888. return dncache_replace(cache, (struct backdn *)oldptr,
  889. (struct backdn *)newptr);
  890. }
  891. return 0;
  892. }
  893. static int entrycache_replace(struct cache *cache, struct backentry *olde,
  894. struct backentry *newe)
  895. {
  896. int found;
  897. const char *oldndn;
  898. const char *newndn;
  899. #ifdef UUIDCACHE_ON
  900. const char *olduuid;
  901. const char *newuuid;
  902. #endif
  903. size_t entry_size = 0;
  904. LOG("=> entrycache_replace (%s) -> (%s)\n", backentry_get_ndn(olde),
  905. backentry_get_ndn(newe), 0);
  906. /* remove from all hashtables -- this function may be called from places
  907. * where the entry isn't in all the tables yet, so we don't care if any
  908. * of these return errors.
  909. */
  910. oldndn = slapi_sdn_get_ndn(backentry_get_sdn(olde));
  911. #ifdef UUIDCACHE_ON
  912. olduuid = slapi_entry_get_uniqueid(olde->ep_entry);
  913. newuuid = slapi_entry_get_uniqueid(newe->ep_entry);
  914. #endif
  915. newndn = slapi_sdn_get_ndn(backentry_get_sdn(newe));
  916. entry_size = cache_entry_size(newe);
  917. PR_Lock(cache->c_mutex);
  918. /*
  919. * First, remove the old entry from all the hashtables.
  920. * If the old entry is in cache but not in at least one of the
  921. * cache tables, operation error
  922. */
  923. if ( (olde->ep_state & ENTRY_STATE_NOTINCACHE) == 0 ) {
  924. int found_in_dn = remove_hash(cache->c_dntable, (void *)oldndn, strlen(oldndn));
  925. int found_in_id = remove_hash(cache->c_idtable, &(olde->ep_id), sizeof(ID));
  926. #ifdef UUIDCACHE_ON
  927. int found_in_uuid = remove_hash(cache->c_uuidtable, (void *)olduuid, strlen(olduuid));
  928. #endif
  929. found = found_in_dn && found_in_id;
  930. #ifdef UUIDCACHE_ON
  931. found = found && found_in_uuid;
  932. #endif
  933. if (!found) {
  934. #ifdef UUIDCACHE_ON
  935. LOG("entry cache replace: cache index tables out of sync - found dn [%d] id [%d] uuid [%d]\n",
  936. found_in_dn, found_in_id, found_in_uuid);
  937. #else
  938. LOG("entry cache replace: cache index tables out of sync - found dn [%d] id [%d]\n",
  939. found_in_dn, found_in_id, 0);
  940. #endif
  941. PR_Unlock(cache->c_mutex);
  942. return 1;
  943. }
  944. }
  945. if (! entry_same_dn(newe, (void *)oldndn) &&
  946. (newe->ep_state & ENTRY_STATE_NOTINCACHE) == 0) {
  947. /* if we're doing a modrdn, the new entry can be in the dn table
  948. * already, so we need to remove that too.
  949. */
  950. if (remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn)))
  951. {
  952. slapi_counter_subtract(cache->c_cursize, newe->ep_size);
  953. cache->c_curentries--;
  954. LOG("entry cache replace remove entry size %lu\n", newe->ep_size, 0, 0);
  955. }
  956. }
  957. /* now, add the new entry to the hashtables */
  958. /* (probably don't need such extensive error handling, once this has been
  959. * tested enough that we believe it works.)
  960. */
  961. if (!add_hash(cache->c_dntable, (void *)newndn, strlen(newndn), newe, NULL)) {
  962. LOG("entry cache replace: can't add dn\n", 0, 0, 0);
  963. PR_Unlock(cache->c_mutex);
  964. return 1;
  965. }
  966. if (!add_hash(cache->c_idtable, &(newe->ep_id), sizeof(ID), newe, NULL)) {
  967. LOG("entry cache replace: can't add id\n", 0, 0, 0);
  968. if(remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn)) == 0){
  969. LOG("entry cache replace: failed to remove dn table\n", 0, 0, 0);
  970. }
  971. PR_Unlock(cache->c_mutex);
  972. return 1;
  973. }
  974. #ifdef UUIDCACHE_ON
  975. if (newuuid && !add_hash(cache->c_uuidtable, (void *)newuuid, strlen(newuuid),
  976. newe, NULL)) {
  977. LOG("entry cache replace: can't add uuid\n", 0, 0, 0);
  978. if(remove_hash(cache->c_dntable, (void *)newndn, strlen(newndn)) == 0){
  979. LOG("entry cache replace: failed to remove dn table(uuid cache)\n", 0, 0, 0);
  980. }
  981. if(remove_hash(cache->c_idtable, &(newe->ep_id), sizeof(ID)) == 0){
  982. LOG("entry cache replace: failed to remove id table(uuid cache)\n", 0, 0, 0);
  983. }
  984. PR_Unlock(cache->c_mutex);
  985. return 1;
  986. }
  987. #endif
  988. /* adjust cache meta info */
  989. newe->ep_refcnt++;
  990. newe->ep_size = entry_size;
  991. if (newe->ep_size > olde->ep_size) {
  992. slapi_counter_add(cache->c_cursize, newe->ep_size - olde->ep_size);
  993. } else if (newe->ep_size < olde->ep_size) {
  994. slapi_counter_subtract(cache->c_cursize, olde->ep_size - newe->ep_size);
  995. }
  996. olde->ep_state = ENTRY_STATE_DELETED;
  997. newe->ep_state = 0;
  998. PR_Unlock(cache->c_mutex);
  999. LOG("<= entrycache_replace OK, cache size now %lu cache count now %ld\n",
  1000. slapi_counter_get_value(cache->c_cursize), cache->c_curentries, 0);
  1001. return 0;
  1002. }
  1003. /* call this when you're done with an entry that was fetched via one of
  1004. * the cache_find_* calls.
  1005. */
  1006. void cache_return(struct cache *cache, void **ptr)
  1007. {
  1008. struct backcommon *bep;
  1009. if (NULL == ptr || NULL == *ptr)
  1010. {
  1011. LOG("=> cache_return\n<= cache_return (null entry)\n", 0, 0, 0);
  1012. return;
  1013. }
  1014. bep = *(struct backcommon **)ptr;
  1015. if (CACHE_TYPE_ENTRY == bep->ep_type) {
  1016. entrycache_return(cache, (struct backentry **)ptr);
  1017. } else if (CACHE_TYPE_DN == bep->ep_type) {
  1018. dncache_return(cache, (struct backdn **)ptr);
  1019. }
  1020. }
  1021. static void
  1022. entrycache_return(struct cache *cache, struct backentry **bep)
  1023. {
  1024. struct backentry *eflush = NULL;
  1025. struct backentry *eflushtemp = NULL;
  1026. struct backentry *e;
  1027. e = *bep;
  1028. LOG("=> entrycache_return (%s) entry count: %d, entry in cache:%ld\n",
  1029. backentry_get_ndn(e), e->ep_refcnt, cache->c_curentries);
  1030. PR_Lock(cache->c_mutex);
  1031. if (e->ep_state & ENTRY_STATE_NOTINCACHE)
  1032. {
  1033. backentry_free(bep);
  1034. }
  1035. else
  1036. {
  1037. ASSERT(e->ep_refcnt > 0);
  1038. if (! --e->ep_refcnt) {
  1039. if (e->ep_state & ENTRY_STATE_DELETED) {
  1040. backentry_free(bep);
  1041. } else {
  1042. lru_add(cache, e);
  1043. /* the cache might be overfull... */
  1044. if (CACHE_FULL(cache))
  1045. eflush = entrycache_flush(cache);
  1046. }
  1047. }
  1048. }
  1049. PR_Unlock(cache->c_mutex);
  1050. while (eflush)
  1051. {
  1052. eflushtemp = BACK_LRU_NEXT(eflush, struct backentry *);
  1053. backentry_free(&eflush);
  1054. eflush = eflushtemp;
  1055. }
  1056. LOG("<= entrycache_return\n", 0, 0, 0);
  1057. }
  1058. /* lookup entry by DN (assume cache lock is held) */
  1059. struct backentry *cache_find_dn(struct cache *cache, const char *dn, unsigned long ndnlen)
  1060. {
  1061. struct backentry *e;
  1062. LOG("=> cache_find_dn (%s)\n", dn, 0, 0);
  1063. /*entry normalized by caller (dn2entry.c) */
  1064. PR_Lock(cache->c_mutex);
  1065. if (find_hash(cache->c_dntable, (void *)dn, ndnlen, (void **)&e)) {
  1066. /* need to check entry state */
  1067. if (e->ep_state != 0) {
  1068. /* entry is deleted or not fully created yet */
  1069. PR_Unlock(cache->c_mutex);
  1070. LOG("<= cache_find_dn (NOT FOUND)\n", 0, 0, 0);
  1071. return NULL;
  1072. }
  1073. if (e->ep_refcnt == 0)
  1074. lru_delete(cache, (void *)e);
  1075. e->ep_refcnt++;
  1076. PR_Unlock(cache->c_mutex);
  1077. slapi_counter_increment(cache->c_hits);
  1078. } else {
  1079. PR_Unlock(cache->c_mutex);
  1080. }
  1081. slapi_counter_increment(cache->c_tries);
  1082. LOG("<= cache_find_dn (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
  1083. return e;
  1084. }
  1085. /* lookup an entry in the cache by its id# (you must return it later) */
  1086. struct backentry *cache_find_id(struct cache *cache, ID id)
  1087. {
  1088. struct backentry *e;
  1089. LOG("=> cache_find_id (%lu)\n", (u_long)id, 0, 0);
  1090. PR_Lock(cache->c_mutex);
  1091. if (find_hash(cache->c_idtable, &id, sizeof(ID), (void **)&e)) {
  1092. /* need to check entry state */
  1093. if (e->ep_state != 0) {
  1094. /* entry is deleted or not fully created yet */
  1095. PR_Unlock(cache->c_mutex);
  1096. LOG("<= cache_find_id (NOT FOUND)\n", 0, 0, 0);
  1097. return NULL;
  1098. }
  1099. if (e->ep_refcnt == 0)
  1100. lru_delete(cache, (void *)e);
  1101. e->ep_refcnt++;
  1102. PR_Unlock(cache->c_mutex);
  1103. slapi_counter_increment(cache->c_hits);
  1104. } else {
  1105. PR_Unlock(cache->c_mutex);
  1106. }
  1107. slapi_counter_increment(cache->c_tries);
  1108. LOG("<= cache_find_id (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
  1109. return e;
  1110. }
  1111. #ifdef UUIDCACHE_ON
  1112. /* lookup an entry in the cache by it's uuid (you must return it later) */
  1113. struct backentry *cache_find_uuid(struct cache *cache, const char *uuid)
  1114. {
  1115. struct backentry *e;
  1116. LOG("=> cache_find_uuid (%s)\n", uuid, 0, 0);
  1117. PR_Lock(cache->c_mutex);
  1118. if (find_hash(cache->c_uuidtable, uuid, strlen(uuid), (void **)&e)) {
  1119. /* need to check entry state */
  1120. if (e->ep_state != 0) {
  1121. /* entry is deleted or not fully created yet */
  1122. PR_Unlock(cache->c_mutex);
  1123. LOG("<= cache_find_uuid (NOT FOUND)\n", 0, 0, 0);
  1124. return NULL;
  1125. }
  1126. if (e->ep_refcnt == 0)
  1127. lru_delete(cache, (void *)e);
  1128. e->ep_refcnt++;
  1129. PR_Unlock(cache->c_mutex);
  1130. slapi_counter_increment(cache->c_hits);
  1131. } else {
  1132. PR_Unlock(cache->c_mutex);
  1133. }
  1134. slapi_counter_increment(cache->c_tries);
  1135. LOG("<= cache_find_uuid (%sFOUND)\n", e ? "" : "NOT ", 0, 0);
  1136. return e;
  1137. }
  1138. #endif
  1139. /* add an entry to the cache */
  1140. static int
  1141. entrycache_add_int(struct cache *cache, struct backentry *e, int state,
  1142. struct backentry **alt)
  1143. {
  1144. struct backentry *eflush = NULL;
  1145. struct backentry *eflushtemp = NULL;
  1146. const char *ndn = slapi_sdn_get_ndn(backentry_get_sdn(e));
  1147. #ifdef UUIDCACHE_ON
  1148. const char *uuid = slapi_entry_get_uniqueid(e->ep_entry);
  1149. #endif
  1150. struct backentry *my_alt;
  1151. size_t entry_size = 0;
  1152. int already_in = 0;
  1153. LOG("=> entrycache_add_int( \"%s\", %ld )\n", backentry_get_ndn(e),
  1154. e->ep_id, 0);
  1155. if(e->ep_size == 0){
  1156. /*
  1157. * This entry has not yet been assigned its size, as it's not in
  1158. * the cache yet. Calculate it outside of the cache lock
  1159. */
  1160. entry_size = cache_entry_size(e);
  1161. } else {
  1162. entry_size = e->ep_size;
  1163. }
  1164. PR_Lock(cache->c_mutex);
  1165. if (! add_hash(cache->c_dntable, (void *)ndn, strlen(ndn), e,
  1166. (void **)&my_alt)) {
  1167. LOG("entry \"%s\" already in dn cache\n", backentry_get_ndn(e), 0, 0);
  1168. /* add_hash filled in 'my_alt' if necessary */
  1169. if (my_alt == e)
  1170. {
  1171. if ((e->ep_state & ENTRY_STATE_CREATING) && (state == 0))
  1172. {
  1173. /* attempting to "add" an entry that's already in the cache,
  1174. * and the old entry was a placeholder and the new one isn't?
  1175. * sounds like a confirmation of a previous add!
  1176. */
  1177. LOG("confirming a previous add\n", 0, 0, 0);
  1178. already_in = 1;
  1179. }
  1180. else
  1181. {
  1182. /* the entry already in the cache and either one of these:
  1183. * 1) ep_state: CREATING && state: CREATING
  1184. * ==> keep protecting the entry; increase the refcnt
  1185. * 2) ep_state: 0 && state: CREATING
  1186. * ==> change the state to CREATING (protect it);
  1187. * increase the refcnt
  1188. * 3) ep_state: 0 && state: 0
  1189. * ==> increase the refcnt
  1190. */
  1191. if (e->ep_refcnt == 0)
  1192. lru_delete(cache, (void *)e);
  1193. e->ep_refcnt++;
  1194. e->ep_state = state; /* might be CREATING */
  1195. /* returning 1 (entry already existed), but don't set to alt
  1196. * to prevent that the caller accidentally thinks the existing
  1197. * entry is not the same one the caller has and releases it.
  1198. */
  1199. PR_Unlock(cache->c_mutex);
  1200. return 1;
  1201. }
  1202. }
  1203. else
  1204. {
  1205. if (my_alt->ep_state & ENTRY_STATE_CREATING)
  1206. {
  1207. LOG("the entry is reserved\n", 0, 0, 0);
  1208. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  1209. PR_Unlock(cache->c_mutex);
  1210. return -1;
  1211. }
  1212. else if (state != 0)
  1213. {
  1214. LOG("the entry already exists. cannot reserve it.\n", 0, 0, 0);
  1215. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  1216. PR_Unlock(cache->c_mutex);
  1217. return -1;
  1218. }
  1219. else
  1220. {
  1221. if (alt) {
  1222. *alt = my_alt;
  1223. if ((*alt)->ep_refcnt == 0)
  1224. lru_delete(cache, (void *)*alt);
  1225. (*alt)->ep_refcnt++;
  1226. }
  1227. PR_Unlock(cache->c_mutex);
  1228. return 1;
  1229. }
  1230. }
  1231. }
  1232. /* creating an entry with ENTRY_STATE_CREATING just creates a stub
  1233. * which is only stored in the dn table (basically, reserving the dn) --
  1234. * doing an add later with state==0 will "confirm" the add
  1235. */
  1236. if (state == 0) {
  1237. /* neither of these should fail, or something is very wrong. */
  1238. if (! add_hash(cache->c_idtable, &(e->ep_id), sizeof(ID), e, NULL)) {
  1239. LOG("entry %s already in id cache!\n", backentry_get_ndn(e), 0, 0);
  1240. if (already_in) {
  1241. /* there's a bug in the implementatin of 'modify' and 'modrdn'
  1242. * that i'm working around here. basically they do a
  1243. * tentative add of the new (modified) entry, which places
  1244. * the new entry in the cache, indexed only by dn.
  1245. *
  1246. * later they call id2entry_add() on the new entry, which
  1247. * "adds" the new entry to the cache. unfortunately, that
  1248. * add will fail, since the old entry is still in the cache,
  1249. * and both the old and new entries have the same ID and UUID.
  1250. *
  1251. * i catch that here, and just return 0 for success, without
  1252. * messing with either entry. a later cache_replace() will
  1253. * remove the old entry and add the new one, and all will be
  1254. * fine (i think).
  1255. */
  1256. LOG("<= entrycache_add_int (ignoring)\n", 0, 0, 0);
  1257. PR_Unlock(cache->c_mutex);
  1258. return 0;
  1259. }
  1260. if(remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn)) == 0){
  1261. LOG("entrycache_add_int: failed to remove dn table\n", 0, 0, 0);
  1262. }
  1263. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  1264. PR_Unlock(cache->c_mutex);
  1265. return -1;
  1266. }
  1267. #ifdef UUIDCACHE_ON
  1268. if (uuid) {
  1269. /* (only insert entries with a uuid) */
  1270. if (! add_hash(cache->c_uuidtable, (void *)uuid, strlen(uuid), e,
  1271. NULL)) {
  1272. LOG("entry %s already in uuid cache!\n", backentry_get_ndn(e),
  1273. 0, 0);
  1274. if(remove_hash(cache->c_dntable, (void *)ndn, strlen(ndn)) == 0){
  1275. LOG("entrycache_add_int: failed to remove dn table(uuid cache)\n", 0, 0, 0);
  1276. }
  1277. if(remove_hash(cache->c_idtable, &(e->ep_id), sizeof(ID)) == 0){
  1278. LOG("entrycache_add_int: failed to remove id table(uuid cache)\n", 0, 0, 0);
  1279. }
  1280. e->ep_state |= ENTRY_STATE_NOTINCACHE;
  1281. PR_Unlock(cache->c_mutex);
  1282. return -1;
  1283. }
  1284. }
  1285. #endif
  1286. }
  1287. e->ep_state = state;
  1288. if (! already_in) {
  1289. e->ep_refcnt = 1;
  1290. e->ep_size = entry_size;
  1291. slapi_counter_add(cache->c_cursize, e->ep_size);
  1292. cache->c_curentries++;
  1293. /* don't add to lru since refcnt = 1 */
  1294. LOG("added entry of size %lu -> total now %lu out of max %lu\n",
  1295. e->ep_size, slapi_counter_get_value(cache->c_cursize), cache->c_maxsize);
  1296. if (cache->c_maxentries >= 0) {
  1297. LOG(" total entries %ld out of %ld\n",
  1298. cache->c_curentries, cache->c_maxentries, 0);
  1299. }
  1300. /* check for full cache, and clear out if necessary */
  1301. if (CACHE_FULL(cache))
  1302. eflush = entrycache_flush(cache);
  1303. }
  1304. PR_Unlock(cache->c_mutex);
  1305. while (eflush)
  1306. {
  1307. eflushtemp = BACK_LRU_NEXT(eflush, struct backentry *);
  1308. backentry_free(&eflush);
  1309. eflush = eflushtemp;
  1310. }
  1311. LOG("<= entrycache_add_int OK\n", 0, 0, 0);
  1312. return 0;
  1313. }
  1314. /* create an entry in the cache, and increase its refcount (you must
  1315. * return it when you're done).
  1316. * returns: 0 entry has been created & locked
  1317. * 1 entry already existed
  1318. * -1 something bad happened
  1319. *
  1320. * if 'alt' is not NULL, and the entry is found to already exist in the
  1321. * cache, a refcounted pointer to that entry will be placed in 'alt'.
  1322. * (this means code which suffered from race conditions between multiple
  1323. * entry modifiers can now work.)
  1324. */
  1325. int cache_add(struct cache *cache, void *ptr, void **alt)
  1326. {
  1327. struct backcommon *e;
  1328. if (NULL == ptr)
  1329. {
  1330. LOG("=> cache_add\n<= cache_add (null entry)\n", 0, 0, 0);
  1331. return 0;
  1332. }
  1333. e = (struct backcommon *)ptr;
  1334. if (CACHE_TYPE_ENTRY == e->ep_type) {
  1335. return entrycache_add_int(cache, (struct backentry *)e,
  1336. 0, (struct backentry **)alt);
  1337. } else if (CACHE_TYPE_DN == e->ep_type) {
  1338. return dncache_add_int(cache, (struct backdn *)e,
  1339. 0, (struct backdn **)alt);
  1340. }
  1341. return 0;
  1342. }
  1343. /* same as above, but add it tentatively: nobody else can use this entry
  1344. * from the cache until you later call cache_add.
  1345. */
  1346. int cache_add_tentative(struct cache *cache, struct backentry *e,
  1347. struct backentry **alt)
  1348. {
  1349. return entrycache_add_int(cache, e, ENTRY_STATE_CREATING, alt);
  1350. }
  1351. void cache_lock(struct cache *cache)
  1352. {
  1353. PR_Lock(cache->c_mutex);
  1354. }
  1355. void cache_unlock(struct cache *cache)
  1356. {
  1357. PR_Unlock(cache->c_mutex);
  1358. }
  1359. /* locks an entry so that it can be modified (you should have gotten the
  1360. * entry via cache_find_*).
  1361. * returns 0 on success,
  1362. * returns 1 if the entry lock could not be created
  1363. * returns 2 (RETRY_CACHE_LOCK) if the entry is scheduled for deletion.
  1364. */
  1365. int cache_lock_entry(struct cache *cache, struct backentry *e)
  1366. {
  1367. LOG("=> cache_lock_entry (%s)\n", backentry_get_ndn(e), 0, 0);
  1368. if (! e->ep_mutexp) {
  1369. /* make sure only one thread does this */
  1370. PR_Lock(cache->c_emutexalloc_mutex);
  1371. if (! e->ep_mutexp) {
  1372. e->ep_mutexp = PR_NewMonitor();
  1373. if (!e->ep_mutexp) {
  1374. LOG("<= cache_lock_entry (DELETED)\n", 0, 0, 0);
  1375. LDAPDebug1Arg(LDAP_DEBUG_ANY,
  1376. "cache_lock_entry: failed to create a lock for %s\n",
  1377. backentry_get_ndn(e));
  1378. LOG("<= cache_lock_entry (FAILED)\n", 0, 0, 0);
  1379. return 1;
  1380. }
  1381. }
  1382. PR_Unlock(cache->c_emutexalloc_mutex);
  1383. }
  1384. /* wait on entry lock (done w/o holding the cache lock) */
  1385. PR_EnterMonitor(e->ep_mutexp);
  1386. /* make sure entry hasn't been deleted now */
  1387. PR_Lock(cache->c_mutex);
  1388. if (e->ep_state & (ENTRY_STATE_DELETED|ENTRY_STATE_NOTINCACHE)) {
  1389. PR_Unlock(cache->c_mutex);
  1390. PR_ExitMonitor(e->ep_mutexp);
  1391. LOG("<= cache_lock_entry (DELETED)\n", 0, 0, 0);
  1392. return RETRY_CACHE_LOCK;
  1393. }
  1394. PR_Unlock(cache->c_mutex);
  1395. LOG("<= cache_lock_entry (FOUND)\n", 0, 0, 0);
  1396. return 0;
  1397. }
  1398. /* the opposite of above */
  1399. void cache_unlock_entry(struct cache *cache, struct backentry *e)
  1400. {
  1401. LOG("=> cache_unlock_entry\n", 0, 0, 0);
  1402. if (PR_ExitMonitor(e->ep_mutexp)) {
  1403. LOG("=> cache_unlock_entry - monitor was not entered!!!\n", 0, 0, 0);
  1404. }
  1405. }
  1406. /* DN cache */
  1407. /* remove everything from the cache */
  1408. static void
  1409. dncache_clear_int(struct cache *cache)
  1410. {
  1411. struct backdn *dnflush = NULL;
  1412. struct backdn *dnflushtemp = NULL;
  1413. size_t size = cache->c_maxsize;
  1414. if (!entryrdn_get_switch()) {
  1415. return;
  1416. }
  1417. cache->c_maxsize = 0;
  1418. dnflush = dncache_flush(cache);
  1419. while (dnflush)
  1420. {
  1421. dnflushtemp = BACK_LRU_NEXT(dnflush, struct backdn *);
  1422. backdn_free(&dnflush);
  1423. dnflush = dnflushtemp;
  1424. }
  1425. cache->c_maxsize = size;
  1426. if (cache->c_curentries > 0) {
  1427. LDAPDebug1Arg(LDAP_DEBUG_ANY,
  1428. "dncache_clear_int: there are still %ld dn's "
  1429. "in the dn cache. :/\n", cache->c_curentries);
  1430. }
  1431. }
  1432. static int
  1433. dn_same_id(const void *bdn, const void *k)
  1434. {
  1435. return (((struct backdn *)bdn)->ep_id == *(ID *)k);
  1436. }
  1437. static void
  1438. dncache_set_max_size(struct cache *cache, size_t bytes)
  1439. {
  1440. struct backdn *dnflush = NULL;
  1441. struct backdn *dnflushtemp = NULL;
  1442. if (!entryrdn_get_switch()) {
  1443. return;
  1444. }
  1445. if (bytes < MINCACHESIZE) {
  1446. bytes = MINCACHESIZE;
  1447. LDAPDebug(LDAP_DEBUG_ANY,
  1448. "WARNING -- Minimum cache size is %lu -- rounding up\n",
  1449. MINCACHESIZE, 0, 0);
  1450. }
  1451. PR_Lock(cache->c_mutex);
  1452. cache->c_maxsize = bytes;
  1453. LOG("entry cache size set to %lu\n", bytes, 0, 0);
  1454. /* check for full cache, and clear out if necessary */
  1455. if (CACHE_FULL(cache)) {
  1456. dnflush = dncache_flush(cache);
  1457. }
  1458. while (dnflush)
  1459. {
  1460. dnflushtemp = BACK_LRU_NEXT(dnflush, struct backdn *);
  1461. backdn_free(&dnflush);
  1462. dnflush = dnflushtemp;
  1463. }
  1464. if (cache->c_curentries < 50) {
  1465. /* there's hardly anything left in the cache -- clear it out and
  1466. * resize the hashtables for efficiency.
  1467. */
  1468. erase_cache(cache, CACHE_TYPE_DN);
  1469. cache_make_hashes(cache, CACHE_TYPE_DN);
  1470. }
  1471. PR_Unlock(cache->c_mutex);
  1472. if (! dblayer_is_cachesize_sane(&bytes)) {
  1473. LDAPDebug1Arg(LDAP_DEBUG_ANY,
  1474. "WARNING -- Possible CONFIGURATION ERROR -- cachesize "
  1475. "(%lu) may be configured to use more than the available "
  1476. "physical memory.\n", bytes);
  1477. }
  1478. }
  1479. /* remove a dn from the cache */
  1480. /* you must be holding c_mutex !! */
  1481. static int
  1482. dncache_remove_int(struct cache *cache, struct backdn *bdn)
  1483. {
  1484. int ret = 1; /* assume not in cache */
  1485. if (!entryrdn_get_switch()) {
  1486. return 0;
  1487. }
  1488. LOG("=> dncache_remove_int (%s)\n", slapi_sdn_get_dn(bdn->dn_sdn), 0, 0);
  1489. if (bdn->ep_state & ENTRY_STATE_NOTINCACHE)
  1490. {
  1491. return ret;
  1492. }
  1493. /* remove from id hashtable */
  1494. if (remove_hash(cache->c_idtable, &(bdn->ep_id), sizeof(ID)))
  1495. {
  1496. ret = 0;
  1497. }
  1498. else
  1499. {
  1500. LOG("remove %d from id hash failed\n", bdn->ep_id, 0, 0);
  1501. }
  1502. if (ret == 0) {
  1503. /* won't be on the LRU list since it has a refcount on it */
  1504. /* adjust cache size */
  1505. slapi_counter_subtract(cache->c_cursize, bdn->ep_size);
  1506. cache->c_curentries--;
  1507. LOG("<= dncache_remove_int (size %lu): cache now %lu dn's, %lu bytes\n",
  1508. bdn->ep_size, cache->c_curentries,
  1509. slapi_counter_get_value(cache->c_cursize));
  1510. }
  1511. /* mark for deletion (will be erased when refcount drops to zero) */
  1512. bdn->ep_state |= ENTRY_STATE_DELETED;
  1513. LOG("<= dncache_remove_int: %d\n", ret, 0, 0);
  1514. return ret;
  1515. }
  1516. static void
  1517. dncache_return(struct cache *cache, struct backdn **bdn)
  1518. {
  1519. struct backdn *dnflush = NULL;
  1520. struct backdn *dnflushtemp = NULL;
  1521. if (!entryrdn_get_switch()) {
  1522. return;
  1523. }
  1524. LOG("=> dncache_return (%s) reference count: %d, dn in cache:%ld\n",
  1525. slapi_sdn_get_dn((*bdn)->dn_sdn), (*bdn)->ep_refcnt, cache->c_curentries);
  1526. PR_Lock(cache->c_mutex);
  1527. if ((*bdn)->ep_state & ENTRY_STATE_NOTINCACHE)
  1528. {
  1529. backdn_free(bdn);
  1530. }
  1531. else
  1532. {
  1533. ASSERT((*bdn)->ep_refcnt > 0);
  1534. if (! --(*bdn)->ep_refcnt) {
  1535. if ((*bdn)->ep_state & ENTRY_STATE_DELETED) {
  1536. backdn_free(bdn);
  1537. } else {
  1538. lru_add(cache, (void *)*bdn);
  1539. /* the cache might be overfull... */
  1540. if (CACHE_FULL(cache)) {
  1541. dnflush = dncache_flush(cache);
  1542. }
  1543. }
  1544. }
  1545. }
  1546. PR_Unlock(cache->c_mutex);
  1547. while (dnflush)
  1548. {
  1549. dnflushtemp = BACK_LRU_NEXT(dnflush, struct backdn *);
  1550. backdn_free(&dnflush);
  1551. dnflush = dnflushtemp;
  1552. }
  1553. }
  1554. /* lookup a dn in the cache by its id# (you must return it later) */
  1555. struct backdn *
  1556. dncache_find_id(struct cache *cache, ID id)
  1557. {
  1558. struct backdn *bdn = NULL;
  1559. if (!entryrdn_get_switch()) {
  1560. return bdn;
  1561. }
  1562. LOG("=> dncache_find_id (%lu)\n", (u_long)id, 0, 0);
  1563. PR_Lock(cache->c_mutex);
  1564. if (find_hash(cache->c_idtable, &id, sizeof(ID), (void **)&bdn)) {
  1565. /* need to check entry state */
  1566. if (bdn->ep_state != 0) {
  1567. /* entry is deleted or not fully created yet */
  1568. PR_Unlock(cache->c_mutex);
  1569. LOG("<= dncache_find_id (NOT FOUND)\n", 0, 0, 0);
  1570. return NULL;
  1571. }
  1572. if (bdn->ep_refcnt == 0)
  1573. lru_delete(cache, (void *)bdn);
  1574. bdn->ep_refcnt++;
  1575. PR_Unlock(cache->c_mutex);
  1576. slapi_counter_increment(cache->c_hits);
  1577. } else {
  1578. PR_Unlock(cache->c_mutex);
  1579. }
  1580. slapi_counter_increment(cache->c_tries);
  1581. LOG("<= cache_find_id (%sFOUND)\n", bdn ? "" : "NOT ", 0, 0);
  1582. return bdn;
  1583. }
  1584. /* add a dn to the cache */
  1585. static int
  1586. dncache_add_int(struct cache *cache, struct backdn *bdn, int state,
  1587. struct backdn **alt)
  1588. {
  1589. struct backdn *dnflush = NULL;
  1590. struct backdn *dnflushtemp = NULL;
  1591. struct backdn *my_alt;
  1592. int already_in = 0;
  1593. if (!entryrdn_get_switch()) {
  1594. return 0;
  1595. }
  1596. LOG("=> dncache_add_int( \"%s\", %ld )\n", slapi_sdn_get_dn(bdn->dn_sdn),
  1597. bdn->ep_id, 0);
  1598. PR_Lock(cache->c_mutex);
  1599. if (! add_hash(cache->c_idtable, &(bdn->ep_id), sizeof(ID), bdn,
  1600. (void **)&my_alt)) {
  1601. LOG("entry %s already in id cache!\n", slapi_sdn_get_dn(bdn->dn_sdn), 0, 0);
  1602. if (my_alt == bdn)
  1603. {
  1604. if ((bdn->ep_state & ENTRY_STATE_CREATING) && (state == 0))
  1605. {
  1606. /* attempting to "add" a dn that's already in the cache,
  1607. * and the old entry was a placeholder and the new one isn't?
  1608. * sounds like a confirmation of a previous add!
  1609. */
  1610. LOG("confirming a previous add\n", 0, 0, 0);
  1611. already_in = 1;
  1612. }
  1613. else
  1614. {
  1615. /* the entry already in the cache and either one of these:
  1616. * 1) ep_state: CREATING && state: CREATING
  1617. * ==> keep protecting the entry; increase the refcnt
  1618. * 2) ep_state: 0 && state: CREATING
  1619. * ==> change the state to CREATING (protect it);
  1620. * increase the refcnt
  1621. * 3) ep_state: 0 && state: 0
  1622. * ==> increase the refcnt
  1623. */
  1624. if (bdn->ep_refcnt == 0)
  1625. lru_delete(cache, (void *)bdn);
  1626. bdn->ep_refcnt++;
  1627. bdn->ep_state = state; /* might be CREATING */
  1628. /* returning 1 (entry already existed), but don't set to alt
  1629. * to prevent that the caller accidentally thinks the existing
  1630. * entry is not the same one the caller has and releases it.
  1631. */
  1632. PR_Unlock(cache->c_mutex);
  1633. return 1;
  1634. }
  1635. }
  1636. else
  1637. {
  1638. if (my_alt->ep_state & ENTRY_STATE_CREATING)
  1639. {
  1640. LOG("the entry is reserved\n", 0, 0, 0);
  1641. bdn->ep_state |= ENTRY_STATE_NOTINCACHE;
  1642. PR_Unlock(cache->c_mutex);
  1643. return -1;
  1644. }
  1645. else if (state != 0)
  1646. {
  1647. LOG("the entry already exists. cannot reserve it.\n", 0, 0, 0);
  1648. bdn->ep_state |= ENTRY_STATE_NOTINCACHE;
  1649. PR_Unlock(cache->c_mutex);
  1650. return -1;
  1651. }
  1652. else
  1653. {
  1654. if (alt) {
  1655. *alt = my_alt;
  1656. if ((*alt)->ep_refcnt == 0)
  1657. lru_delete(cache, (void *)*alt);
  1658. (*alt)->ep_refcnt++;
  1659. }
  1660. PR_Unlock(cache->c_mutex);
  1661. return 1;
  1662. }
  1663. }
  1664. }
  1665. bdn->ep_state = state;
  1666. if (! already_in) {
  1667. bdn->ep_refcnt = 1;
  1668. if (0 == bdn->ep_size) {
  1669. bdn->ep_size = slapi_sdn_get_size(bdn->dn_sdn);
  1670. }
  1671. slapi_counter_add(cache->c_cursize, bdn->ep_size);
  1672. cache->c_curentries++;
  1673. /* don't add to lru since refcnt = 1 */
  1674. LOG("added entry of size %lu -> total now %lu out of max %lu\n",
  1675. bdn->ep_size, slapi_counter_get_value(cache->c_cursize),
  1676. cache->c_maxsize);
  1677. if (cache->c_maxentries >= 0) {
  1678. LOG(" total entries %ld out of %ld\n",
  1679. cache->c_curentries, cache->c_maxentries, 0);
  1680. }
  1681. /* check for full cache, and clear out if necessary */
  1682. if (CACHE_FULL(cache)) {
  1683. dnflush = dncache_flush(cache);
  1684. }
  1685. }
  1686. PR_Unlock(cache->c_mutex);
  1687. while (dnflush)
  1688. {
  1689. dnflushtemp = BACK_LRU_NEXT(dnflush, struct backdn *);
  1690. backdn_free(&dnflush);
  1691. dnflush = dnflushtemp;
  1692. }
  1693. LOG("<= dncache_add_int OK\n", 0, 0, 0);
  1694. return 0;
  1695. }
  1696. static int
  1697. dncache_replace(struct cache *cache, struct backdn *olddn, struct backdn *newdn)
  1698. {
  1699. int found;
  1700. if (!entryrdn_get_switch()) {
  1701. return 0;
  1702. }
  1703. LOG("=> dncache_replace (%s) -> (%s)\n",
  1704. slapi_sdn_get_dn(olddn->dn_sdn), slapi_sdn_get_dn(newdn->dn_sdn), 0);
  1705. /* remove from all hashtable -- this function may be called from places
  1706. * where the entry isn't in all the table yet, so we don't care if any
  1707. * of these return errors.
  1708. */
  1709. PR_Lock(cache->c_mutex);
  1710. /*
  1711. * First, remove the old entry from the hashtable.
  1712. * If the old entry is in cache but not in at least one of the
  1713. * cache tables, operation error
  1714. */
  1715. if ( (olddn->ep_state & ENTRY_STATE_NOTINCACHE) == 0 ) {
  1716. found = remove_hash(cache->c_idtable, &(olddn->ep_id), sizeof(ID));
  1717. if (!found) {
  1718. LOG("dn cache replace: cache index tables out of sync\n", 0, 0, 0);
  1719. PR_Unlock(cache->c_mutex);
  1720. return 1;
  1721. }
  1722. }
  1723. /* now, add the new entry to the hashtables */
  1724. /* (probably don't need such extensive error handling, once this has been
  1725. * tested enough that we believe it works.)
  1726. */
  1727. if (!add_hash(cache->c_idtable, &(newdn->ep_id), sizeof(ID), newdn, NULL)) {
  1728. LOG("dn cache replace: can't add id\n", 0, 0, 0);
  1729. PR_Unlock(cache->c_mutex);
  1730. return 1;
  1731. }
  1732. /* adjust cache meta info */
  1733. newdn->ep_refcnt = 1;
  1734. if (0 == newdn->ep_size) {
  1735. newdn->ep_size = slapi_sdn_get_size(newdn->dn_sdn);
  1736. }
  1737. if (newdn->ep_size > olddn->ep_size) {
  1738. slapi_counter_add(cache->c_cursize, newdn->ep_size - olddn->ep_size);
  1739. } else if (newdn->ep_size < olddn->ep_size) {
  1740. slapi_counter_subtract(cache->c_cursize, olddn->ep_size - newdn->ep_size);
  1741. }
  1742. olddn->ep_state = ENTRY_STATE_DELETED;
  1743. newdn->ep_state = 0;
  1744. PR_Unlock(cache->c_mutex);
  1745. LOG("<= dncache_replace OK, cache size now %lu cache count now %ld\n",
  1746. slapi_counter_get_value(cache->c_cursize), cache->c_curentries, 0);
  1747. return 0;
  1748. }
  1749. static struct backdn *
  1750. dncache_flush(struct cache *cache)
  1751. {
  1752. struct backdn *dn = NULL;
  1753. if (!entryrdn_get_switch()) {
  1754. return dn;
  1755. }
  1756. LOG("=> dncache_flush\n", 0, 0, 0);
  1757. /* all entries on the LRU list are guaranteed to have a refcnt = 0
  1758. * (iow, nobody's using them), so just delete from the tail down
  1759. * until the cache is a managable size again.
  1760. * (cache->c_mutex is locked when we enter this)
  1761. */
  1762. while ((cache->c_lrutail != NULL) && CACHE_FULL(cache)) {
  1763. if (dn == NULL)
  1764. {
  1765. dn = CACHE_LRU_TAIL(cache, struct backdn *);
  1766. }
  1767. else
  1768. {
  1769. dn = BACK_LRU_PREV(dn, struct backdn *);
  1770. }
  1771. ASSERT(dn->ep_refcnt == 0);
  1772. dn->ep_refcnt++;
  1773. if (dncache_remove_int(cache, dn) < 0) {
  1774. LDAPDebug(LDAP_DEBUG_ANY, "dn cache flush: unable to delete entry\n",
  1775. 0, 0, 0);
  1776. break;
  1777. }
  1778. if(dn == CACHE_LRU_HEAD(cache, struct backdn *)) {
  1779. break;
  1780. }
  1781. }
  1782. if (dn)
  1783. LRU_DETACH(cache, dn);
  1784. LOG("<= dncache_flush (down to %lu dns, %lu bytes)\n", cache->c_curentries,
  1785. slapi_counter_get_value(cache->c_cursize), 0);
  1786. return dn;
  1787. }
  1788. #ifdef LDAP_CACHE_DEBUG_LRU
  1789. /* for debugging -- painstakingly verify the lru list is ok -- if 'in' is
  1790. * true, then dn 'dn' should be in the list right now; otherwise, it
  1791. * should NOT be in the list.
  1792. */
  1793. static void
  1794. dn_lru_verify(struct cache *cache, struct backdn *dn, int in)
  1795. {
  1796. int is_in = 0;
  1797. int count = 0;
  1798. struct backdn *dnp;
  1799. dnp = CACHE_LRU_HEAD(cache, struct backdn *);
  1800. while (dnp) {
  1801. count++;
  1802. if (dnp == dn) {
  1803. is_in = 1;
  1804. }
  1805. if (dnp->ep_lruprev) {
  1806. ASSERT(BACK_LRU_NEXT(BACK_LRU_PREV(dnp, struct backdn *), struct backdn *)== dnp);
  1807. } else {
  1808. ASSERT(dnp == CACHE_LRU_HEAD(cache, struct backdn *));
  1809. }
  1810. if (dnp->ep_lrunext) {
  1811. ASSERT(BACK_LRU_PREV(BACK_LRU_NEXT(dnp, struct backdn *), struct backdn *) == dnp);
  1812. } else {
  1813. ASSERT(dnp == CACHE_LRU_TAIL(cache, struct backdn *));
  1814. }
  1815. dnp = BACK_LRU_NEXT(dnp, struct backdn *);
  1816. }
  1817. ASSERT(is_in == in);
  1818. }
  1819. #endif