cl5_clcache.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145
  1. /** BEGIN COPYRIGHT BLOCK
  2. * Copyright (C) 2005 Red Hat, Inc.
  3. * All rights reserved.
  4. *
  5. * License: GPL (version 3 or any later version).
  6. * See LICENSE for details.
  7. * END COPYRIGHT BLOCK **/
  8. #ifdef HAVE_CONFIG_H
  9. #include <config.h>
  10. #endif
  11. #include "errno.h" /* ENOMEM, EVAL used by Berkeley DB */
  12. #include "db.h" /* Berkeley DB */
  13. #include "cl5.h" /* changelog5Config */
  14. #include "cl5_clcache.h"
  15. /* newer bdb uses DB_BUFFER_SMALL instead of ENOMEM as the
  16. error return if the given buffer in which to load a
  17. key or value is too small - if it is not defined, define
  18. it here to ENOMEM
  19. */
  20. #ifndef DB_BUFFER_SMALL
  21. #define DB_BUFFER_SMALL ENOMEM
  22. #endif
  23. /*
  24. * Constants for the buffer pool:
  25. *
  26. * DEFAULT_CLC_BUFFER_PAGE_COUNT
  27. * Little performance boost if it is too small.
  28. *
  29. * DEFAULT_CLC_BUFFER_PAGE_SIZE
  30. * Its value is determined based on the DB requirement that
  31. * the buffer size should be the multiple of 1024.
  32. */
  33. #define DEFAULT_CLC_BUFFER_COUNT_MIN 10
  34. #define DEFAULT_CLC_BUFFER_COUNT_MAX 0
  35. #define DEFAULT_CLC_BUFFER_PAGE_COUNT 32
  36. #define DEFAULT_CLC_BUFFER_PAGE_SIZE 1024
  37. #define WORK_CLC_BUFFER_PAGE_SIZE 8 * DEFAULT_CLC_BUFFER_PAGE_SIZE
  38. enum
  39. {
  40. CLC_STATE_READY = 0, /* ready to iterate */
  41. CLC_STATE_UP_TO_DATE, /* remote RUV already covers the CSN */
  42. CLC_STATE_CSN_GT_RUV, /* local RUV doesn't conver the CSN */
  43. CLC_STATE_NEW_RID, /* unknown RID to local RUVs */
  44. CLC_STATE_UNSAFE_RUV_CHANGE, /* (RUV1 < maxcsn-in-buffer) && (RUV1 < RUV1') */
  45. CLC_STATE_DONE, /* no more change */
  46. CLC_STATE_ABORTING /* abort replication session */
  47. };
  48. typedef struct clc_busy_list CLC_Busy_List;
  49. struct csn_seq_ctrl_block
  50. {
  51. ReplicaId rid; /* RID this block serves */
  52. CSN *consumer_maxcsn; /* Don't send CSN <= this */
  53. CSN *local_maxcsn; /* Don't send CSN > this */
  54. CSN *prev_local_maxcsn; /* Copy of last state at buffer loading */
  55. CSN *local_mincsn; /* Used to determin anchor csn*/
  56. int state; /* CLC_STATE_* */
  57. };
  58. /*
  59. * Each cl5replayiterator acquires a buffer from the buffer pool
  60. * at the beginning of a replication session, and returns it back
  61. * at the end.
  62. */
  63. struct clc_buffer
  64. {
  65. char *buf_agmt_name; /* agreement acquired this buffer */
  66. ReplicaId buf_consumer_rid; /* help checking threshold csn */
  67. const RUV *buf_consumer_ruv; /* used to skip change */
  68. const RUV *buf_local_ruv; /* used to refresh local_maxcsn */
  69. int buf_ignoreConsumerRID; /* how to handle updates from consumer */
  70. int buf_load_cnt; /* number of loads for session */
  71. /*
  72. * fields for retriving data from DB
  73. */
  74. int buf_state;
  75. CSN *buf_current_csn;
  76. int buf_load_flag; /* db flag DB_MULTIPLE_KEY, DB_SET, DB_NEXT */
  77. DBC *buf_cursor;
  78. DBT buf_key; /* current csn string */
  79. DBT buf_data; /* data retrived from db */
  80. void *buf_record_ptr; /* ptr to the current record in data */
  81. CSN *buf_missing_csn; /* used to detect persistent missing of CSN */
  82. CSN *buf_prev_missing_csn; /* used to surpress the repeated messages */
  83. /* fields for control the CSN sequence sent to the consumer */
  84. struct csn_seq_ctrl_block **buf_cscbs;
  85. int buf_num_cscbs; /* number of csn sequence ctrl blocks */
  86. int buf_max_cscbs;
  87. /* fields for debugging stat */
  88. int buf_record_cnt; /* number of changes for session */
  89. int buf_record_skipped; /* number of changes skipped */
  90. int buf_skipped_new_rid; /* number of changes skipped due to new_rid */
  91. int buf_skipped_csn_gt_cons_maxcsn; /* number of changes skipped due to csn greater than consumer maxcsn */
  92. int buf_skipped_up_to_date; /* number of changes skipped due to consumer being up-to-date for the given rid */
  93. int buf_skipped_csn_gt_ruv; /* number of changes skipped due to preceedents are not covered by local RUV snapshot */
  94. int buf_skipped_csn_covered; /* number of changes skipped due to CSNs already covered by consumer RUV */
  95. /*
  96. * fields that should be accessed via bl_lock or pl_lock
  97. */
  98. CLC_Buffer *buf_next; /* next buffer in the same list */
  99. CLC_Busy_List *buf_busy_list; /* which busy list I'm in */
  100. };
  101. /*
  102. * Each changelog has a busy buffer list
  103. */
  104. struct clc_busy_list
  105. {
  106. PRLock *bl_lock;
  107. DB *bl_db; /* changelog db handle */
  108. CLC_Buffer *bl_buffers; /* busy buffers of this list */
  109. CLC_Busy_List *bl_next; /* next busy list in the pool */
  110. };
  111. /*
  112. * Each process has a buffer pool
  113. */
  114. struct clc_pool
  115. {
  116. Slapi_RWLock *pl_lock; /* cl writer and agreements */
  117. CLC_Busy_List *pl_busy_lists; /* busy buffer lists, one list per changelog file */
  118. int pl_buffer_cnt_now; /* total number of buffers */
  119. int pl_buffer_cnt_min; /* free a newly returned buffer if _now > _min */
  120. int pl_buffer_cnt_max; /* no use */
  121. int pl_buffer_default_pages; /* num of pages in a new buffer */
  122. };
  123. /* static variables */
  124. static struct clc_pool *_pool = NULL; /* process's buffer pool */
  125. /* static prototypes */
  126. static int clcache_initial_anchorcsn(CLC_Buffer *buf, int *flag);
  127. static int clcache_adjust_anchorcsn(CLC_Buffer *buf, int *flag);
  128. static void clcache_refresh_consumer_maxcsns(CLC_Buffer *buf);
  129. static int clcache_refresh_local_maxcsns(CLC_Buffer *buf);
  130. static int clcache_skip_change(CLC_Buffer *buf);
  131. static int clcache_load_buffer_bulk(CLC_Buffer *buf, int flag);
  132. static int clcache_open_cursor(DB_TXN *txn, CLC_Buffer *buf, DBC **cursor);
  133. static int clcache_cursor_get(DBC *cursor, CLC_Buffer *buf, int flag);
  134. static struct csn_seq_ctrl_block *clcache_new_cscb(void);
  135. static void clcache_free_cscb(struct csn_seq_ctrl_block **cscb);
  136. static CLC_Buffer *clcache_new_buffer(ReplicaId consumer_rid);
  137. static void clcache_delete_buffer(CLC_Buffer **buf);
  138. static CLC_Busy_List *clcache_new_busy_list(void);
  139. static void clcache_delete_busy_list(CLC_Busy_List **bl);
  140. static int clcache_enqueue_busy_list(DB *db, CLC_Buffer *buf);
  141. static void csn_dup_or_init_by_csn(CSN **csn1, CSN *csn2);
  142. /*
  143. * Initiates the process buffer pool. This should be done
  144. * once and only once when process starts.
  145. */
  146. int
  147. clcache_init(void)
  148. {
  149. if (_pool) {
  150. return 0; /* already initialized */
  151. }
  152. _pool = (struct clc_pool *)slapi_ch_calloc(1, sizeof(struct clc_pool));
  153. _pool->pl_buffer_cnt_min = DEFAULT_CLC_BUFFER_COUNT_MIN;
  154. _pool->pl_buffer_cnt_max = DEFAULT_CLC_BUFFER_COUNT_MAX;
  155. _pool->pl_buffer_default_pages = DEFAULT_CLC_BUFFER_COUNT_MAX;
  156. _pool->pl_lock = slapi_new_rwlock();
  157. return 0;
  158. }
  159. /*
  160. * This is part of a callback function when changelog configuration
  161. * is read or updated.
  162. */
  163. void
  164. clcache_set_config()
  165. {
  166. slapi_rwlock_wrlock(_pool->pl_lock);
  167. _pool->pl_buffer_cnt_max = CL5_DEFAULT_CONFIG_CACHESIZE;
  168. /*
  169. * According to http://www.sleepycat.com/docs/api_c/dbc_get.html,
  170. * data buffer should be a multiple of 1024 bytes in size
  171. * for DB_MULTIPLE_KEY operation.
  172. */
  173. _pool->pl_buffer_default_pages = CL5_DEFAULT_CONFIG_CACHEMEMSIZE / DEFAULT_CLC_BUFFER_PAGE_SIZE + 1;
  174. if (_pool->pl_buffer_default_pages <= 0) { /* this never be true... */
  175. _pool->pl_buffer_default_pages = DEFAULT_CLC_BUFFER_PAGE_COUNT;
  176. }
  177. slapi_rwlock_unlock(_pool->pl_lock);
  178. }
  179. /*
  180. * Gets the pointer to a thread dedicated buffer, or allocates
  181. * a new buffer if there is no buffer allocated yet for this thread.
  182. *
  183. * This is called when a cl5replayiterator is created for
  184. * a replication session.
  185. */
  186. int
  187. clcache_get_buffer(CLC_Buffer **buf, DB *db, ReplicaId consumer_rid, const RUV *consumer_ruv, const RUV *local_ruv)
  188. {
  189. int rc = 0;
  190. int need_new;
  191. if (buf == NULL)
  192. return CL5_BAD_DATA;
  193. *buf = NULL;
  194. /* if the pool was re-initialized, the thread private cache will be invalid,
  195. so we must get a new one */
  196. need_new = (!_pool || !_pool->pl_busy_lists || !_pool->pl_busy_lists->bl_buffers);
  197. if ((!need_new) && (NULL != (*buf = (CLC_Buffer *)get_thread_private_cache()))) {
  198. slapi_log_err(SLAPI_LOG_REPL, get_thread_private_agmtname(),
  199. "clcache_get_buffer - found thread private buffer cache %p\n", *buf);
  200. slapi_log_err(SLAPI_LOG_REPL, get_thread_private_agmtname(),
  201. "clcache_get_buffer - _pool is %p _pool->pl_busy_lists is %p _pool->pl_busy_lists->bl_buffers is %p\n",
  202. _pool, _pool ? _pool->pl_busy_lists : NULL,
  203. (_pool && _pool->pl_busy_lists) ? _pool->pl_busy_lists->bl_buffers : NULL);
  204. (*buf)->buf_state = CLC_STATE_READY;
  205. (*buf)->buf_load_cnt = 0;
  206. (*buf)->buf_record_cnt = 0;
  207. (*buf)->buf_record_skipped = 0;
  208. (*buf)->buf_cursor = NULL;
  209. (*buf)->buf_skipped_new_rid = 0;
  210. (*buf)->buf_skipped_csn_gt_cons_maxcsn = 0;
  211. (*buf)->buf_skipped_up_to_date = 0;
  212. (*buf)->buf_skipped_csn_gt_ruv = 0;
  213. (*buf)->buf_skipped_csn_covered = 0;
  214. (*buf)->buf_cscbs = (struct csn_seq_ctrl_block **)slapi_ch_calloc(MAX_NUM_OF_MASTERS + 1,
  215. sizeof(struct csn_seq_ctrl_block *));
  216. (*buf)->buf_num_cscbs = 0;
  217. (*buf)->buf_max_cscbs = MAX_NUM_OF_MASTERS;
  218. } else {
  219. *buf = clcache_new_buffer(consumer_rid);
  220. if (*buf) {
  221. if (0 == clcache_enqueue_busy_list(db, *buf)) {
  222. set_thread_private_cache((void *)(*buf));
  223. } else {
  224. clcache_delete_buffer(buf);
  225. }
  226. }
  227. }
  228. if (NULL != *buf) {
  229. CSN *c_csn = NULL;
  230. CSN *l_csn = NULL;
  231. (*buf)->buf_consumer_ruv = consumer_ruv;
  232. (*buf)->buf_local_ruv = local_ruv;
  233. (*buf)->buf_load_flag = DB_MULTIPLE_KEY;
  234. ruv_get_largest_csn_for_replica(consumer_ruv, consumer_rid, &c_csn);
  235. ruv_get_largest_csn_for_replica(local_ruv, consumer_rid, &l_csn);
  236. if (l_csn && csn_compare(l_csn, c_csn) > 0) {
  237. /* the supplier has updates for the consumer RID and
  238. * these updates are newer than on the consumer
  239. */
  240. (*buf)->buf_ignoreConsumerRID = 0;
  241. } else {
  242. (*buf)->buf_ignoreConsumerRID = 1;
  243. }
  244. csn_free(&c_csn);
  245. csn_free(&l_csn);
  246. } else {
  247. slapi_log_err(SLAPI_LOG_ERR, get_thread_private_agmtname(),
  248. "clcache_get_buffer - Can't allocate new buffer\n");
  249. rc = CL5_MEMORY_ERROR;
  250. }
  251. return rc;
  252. }
  253. /*
  254. * Returns a buffer back to the buffer pool.
  255. */
  256. void
  257. clcache_return_buffer(CLC_Buffer **buf)
  258. {
  259. int i;
  260. slapi_log_err(SLAPI_LOG_REPL, (*buf)->buf_agmt_name,
  261. "clcache_return_buffer - session end: state=%d load=%d sent=%d skipped=%d skipped_new_rid=%d "
  262. "skipped_csn_gt_cons_maxcsn=%d skipped_up_to_date=%d "
  263. "skipped_csn_gt_ruv=%d skipped_csn_covered=%d\n",
  264. (*buf)->buf_state,
  265. (*buf)->buf_load_cnt,
  266. (*buf)->buf_record_cnt - (*buf)->buf_record_skipped,
  267. (*buf)->buf_record_skipped, (*buf)->buf_skipped_new_rid,
  268. (*buf)->buf_skipped_csn_gt_cons_maxcsn,
  269. (*buf)->buf_skipped_up_to_date, (*buf)->buf_skipped_csn_gt_ruv,
  270. (*buf)->buf_skipped_csn_covered);
  271. for (i = 0; i < (*buf)->buf_num_cscbs; i++) {
  272. clcache_free_cscb(&(*buf)->buf_cscbs[i]);
  273. }
  274. slapi_ch_free((void **)&(*buf)->buf_cscbs);
  275. if ((*buf)->buf_cursor) {
  276. (*buf)->buf_cursor->c_close((*buf)->buf_cursor);
  277. (*buf)->buf_cursor = NULL;
  278. }
  279. }
  280. /*
  281. * Loads a buffer from DB.
  282. *
  283. * anchorcsn - passed in for the first load of a replication session;
  284. * flag - DB_SET to load in the key CSN record.
  285. * DB_NEXT to load in the records greater than key CSN.
  286. * return - DB error code instead of cl5 one because of the
  287. * historic reason.
  288. */
  289. int
  290. clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss)
  291. {
  292. int rc = 0;
  293. int flag = DB_NEXT;
  294. if (anchorCSN)
  295. *anchorCSN = NULL;
  296. clcache_refresh_local_maxcsns(buf);
  297. if (buf->buf_load_cnt == 0) {
  298. clcache_refresh_consumer_maxcsns(buf);
  299. rc = clcache_initial_anchorcsn(buf, &flag);
  300. } else {
  301. rc = clcache_adjust_anchorcsn(buf, &flag);
  302. }
  303. if (rc == 0) {
  304. buf->buf_state = CLC_STATE_READY;
  305. if (anchorCSN)
  306. *anchorCSN = buf->buf_current_csn;
  307. rc = clcache_load_buffer_bulk(buf, flag);
  308. if (rc == DB_NOTFOUND && continue_on_miss && *continue_on_miss) {
  309. /* make replication going using next best startcsn */
  310. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  311. "clcache_load_buffer - Can't load changelog buffer starting at CSN %s with flag(%s). "
  312. "Trying to use an alterantive start CSN.\n",
  313. (char *)buf->buf_key.data,
  314. flag == DB_NEXT ? "DB_NEXT" : "DB_SET");
  315. rc = clcache_load_buffer_bulk(buf, DB_SET_RANGE);
  316. if (rc == 0) {
  317. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  318. "clcache_load_buffer - Using alternative start iteration csn: %s \n",
  319. (char *)buf->buf_key.data);
  320. }
  321. /* the use of alternative start csns can be limited, record its usage */
  322. (*continue_on_miss)--;
  323. }
  324. /* Reset some flag variables */
  325. if (rc == 0) {
  326. int i;
  327. for (i = 0; i < buf->buf_num_cscbs; i++) {
  328. buf->buf_cscbs[i]->state = CLC_STATE_READY;
  329. }
  330. } else {
  331. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  332. "clcache_load_buffer - Can't locate CSN %s in the changelog (DB rc=%d). "
  333. "If replication stops, the consumer may need to be reinitialized.\n",
  334. (char *)buf->buf_key.data, rc);
  335. }
  336. } else if (rc == CLC_STATE_DONE) {
  337. rc = DB_NOTFOUND;
  338. }
  339. if (rc != 0) {
  340. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  341. "clcache_load_buffer - rc=%d\n", rc);
  342. }
  343. return rc;
  344. }
  345. static int
  346. clcache_load_buffer_bulk(CLC_Buffer *buf, int flag)
  347. {
  348. DB_TXN *txn = NULL;
  349. DBC *cursor = NULL;
  350. int rc = 0;
  351. int tries = 0;
  352. int use_flag = flag;
  353. if (NULL == buf) {
  354. slapi_log_err(SLAPI_LOG_ERR, get_thread_private_agmtname(),
  355. "clcache_load_buffer_bulk - NULL buf\n");
  356. return rc;
  357. }
  358. if (NULL == buf->buf_busy_list) {
  359. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name, "clcache_load_buffer_bulk - "
  360. "%s%sno buf_busy_list\n",
  361. buf->buf_agmt_name ? buf->buf_agmt_name : "",
  362. buf->buf_agmt_name ? ": " : "");
  363. return rc;
  364. }
  365. PR_Lock(buf->buf_busy_list->bl_lock);
  366. retry:
  367. if (0 == (rc = clcache_open_cursor(txn, buf, &cursor))) {
  368. if (use_flag == DB_NEXT) {
  369. /* For bulk read, position the cursor before read the next block */
  370. rc = cursor->c_get(cursor,
  371. &buf->buf_key,
  372. &buf->buf_data,
  373. DB_SET);
  374. }
  375. /*
  376. * Continue if the error is no-mem since we don't need to
  377. * load in the key record anyway with DB_SET.
  378. */
  379. if (0 == rc || DB_BUFFER_SMALL == rc) {
  380. rc = clcache_cursor_get(cursor, buf, use_flag);
  381. }
  382. }
  383. /*
  384. * Don't keep a cursor open across the whole replication session.
  385. * That had caused noticeable DB resource contention.
  386. */
  387. if (cursor) {
  388. cursor->c_close(cursor);
  389. cursor = NULL;
  390. }
  391. if ((rc == DB_LOCK_DEADLOCK) && (tries < MAX_TRIALS)) {
  392. PRIntervalTime interval;
  393. tries++;
  394. slapi_log_err(SLAPI_LOG_TRACE, buf->buf_agmt_name, "clcache_load_buffer_bulk - "
  395. "deadlock number [%d] - retrying\n",
  396. tries);
  397. /* back off */
  398. interval = PR_MillisecondsToInterval(slapi_rand() % 100);
  399. DS_Sleep(interval);
  400. use_flag = flag;
  401. goto retry;
  402. }
  403. if ((rc == DB_LOCK_DEADLOCK) && (tries >= MAX_TRIALS)) {
  404. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name, "clcache_load_buffer_bulk - "
  405. "could not load buffer from changelog after %d tries\n",
  406. tries);
  407. }
  408. PR_Unlock(buf->buf_busy_list->bl_lock);
  409. buf->buf_record_ptr = NULL;
  410. if (0 == rc) {
  411. DB_MULTIPLE_INIT(buf->buf_record_ptr, &buf->buf_data);
  412. if (NULL == buf->buf_record_ptr)
  413. rc = DB_NOTFOUND;
  414. else
  415. buf->buf_load_cnt++;
  416. }
  417. return rc;
  418. }
  419. /*
  420. * Gets the next change from the buffer.
  421. * *key : output - key of the next change, or NULL if no more change
  422. * *data: output - data of the next change, or NULL if no more change
  423. */
  424. int
  425. clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn)
  426. {
  427. int skip = 1;
  428. int rc = 0;
  429. do {
  430. *key = *data = NULL;
  431. *keylen = *datalen = 0;
  432. if (buf->buf_record_ptr) {
  433. DB_MULTIPLE_KEY_NEXT(buf->buf_record_ptr, &buf->buf_data,
  434. *key, *keylen, *data, *datalen);
  435. }
  436. /*
  437. * We're done with the current buffer. Now load the next chunk.
  438. */
  439. if (NULL == *key && CLC_STATE_READY == buf->buf_state) {
  440. rc = clcache_load_buffer(buf, NULL, NULL);
  441. if (0 == rc && buf->buf_record_ptr) {
  442. DB_MULTIPLE_KEY_NEXT(buf->buf_record_ptr, &buf->buf_data,
  443. *key, *keylen, *data, *datalen);
  444. }
  445. }
  446. /* Compare the new change to the local and remote RUVs */
  447. if (NULL != *key) {
  448. buf->buf_record_cnt++;
  449. csn_init_by_string(buf->buf_current_csn, (char *)*key);
  450. skip = clcache_skip_change(buf);
  451. if (skip)
  452. buf->buf_record_skipped++;
  453. }
  454. } while (rc == 0 && *key && skip);
  455. if (NULL == *key) {
  456. *key = NULL;
  457. *csn = NULL;
  458. rc = DB_NOTFOUND;
  459. } else {
  460. *csn = buf->buf_current_csn;
  461. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  462. "clcache_get_next_change - load=%d rec=%d csn=%s\n",
  463. buf->buf_load_cnt, buf->buf_record_cnt, (char *)*key);
  464. }
  465. return rc;
  466. }
  467. static void
  468. clcache_refresh_consumer_maxcsns(CLC_Buffer *buf)
  469. {
  470. int i;
  471. for (i = 0; i < buf->buf_num_cscbs; i++) {
  472. csn_free(&buf->buf_cscbs[i]->consumer_maxcsn);
  473. ruv_get_largest_csn_for_replica(
  474. buf->buf_consumer_ruv,
  475. buf->buf_cscbs[i]->rid,
  476. &buf->buf_cscbs[i]->consumer_maxcsn);
  477. }
  478. }
  479. static int
  480. clcache_refresh_local_maxcsn(const ruv_enum_data *rid_data, void *data)
  481. {
  482. struct clc_buffer *buf = (struct clc_buffer *)data;
  483. ReplicaId rid;
  484. int rc = 0;
  485. int i;
  486. rid = csn_get_replicaid(rid_data->csn);
  487. /* we do not handle updates originated at the consumer if not required
  488. * and we ignore RID which have been cleaned
  489. */
  490. if ((rid == buf->buf_consumer_rid && buf->buf_ignoreConsumerRID) ||
  491. is_cleaned_rid(rid))
  492. return rc;
  493. for (i = 0; i < buf->buf_num_cscbs; i++) {
  494. if (buf->buf_cscbs[i]->rid == rid)
  495. break;
  496. }
  497. if (i >= buf->buf_num_cscbs) {
  498. if (i + 1 > buf->buf_max_cscbs) {
  499. buf->buf_cscbs = (struct csn_seq_ctrl_block **)slapi_ch_realloc((char *)buf->buf_cscbs,
  500. (i + 2) * sizeof(struct csn_seq_ctrl_block *));
  501. buf->buf_max_cscbs = i + 1;
  502. }
  503. buf->buf_cscbs[i] = clcache_new_cscb();
  504. if (buf->buf_cscbs[i] == NULL) {
  505. return -1;
  506. }
  507. buf->buf_cscbs[i]->rid = rid;
  508. buf->buf_num_cscbs++;
  509. /* this is the first time we have a local change for the RID
  510. * we need to check what the consumer knows about it.
  511. */
  512. ruv_get_largest_csn_for_replica(
  513. buf->buf_consumer_ruv,
  514. buf->buf_cscbs[i]->rid,
  515. &buf->buf_cscbs[i]->consumer_maxcsn);
  516. }
  517. if (buf->buf_cscbs[i]->local_maxcsn)
  518. csn_dup_or_init_by_csn(&buf->buf_cscbs[i]->prev_local_maxcsn, buf->buf_cscbs[i]->local_maxcsn);
  519. csn_dup_or_init_by_csn(&buf->buf_cscbs[i]->local_maxcsn, rid_data->csn);
  520. csn_dup_or_init_by_csn(&buf->buf_cscbs[i]->local_mincsn, rid_data->min_csn);
  521. if (buf->buf_cscbs[i]->consumer_maxcsn &&
  522. csn_compare(buf->buf_cscbs[i]->consumer_maxcsn, rid_data->csn) >= 0) {
  523. /* No change need to be sent for this RID */
  524. buf->buf_cscbs[i]->state = CLC_STATE_UP_TO_DATE;
  525. }
  526. return rc;
  527. }
  528. static int
  529. clcache_refresh_local_maxcsns(CLC_Buffer *buf)
  530. {
  531. return ruv_enumerate_elements(buf->buf_local_ruv, clcache_refresh_local_maxcsn, buf);
  532. }
  533. /*
  534. * Algorithm:
  535. *
  536. * 1. Determine anchorcsn for each RID:
  537. * 2. Determine anchorcsn for next load:
  538. * Anchor-CSN = min { all Next-Anchor-CSN, Buffer-MaxCSN }
  539. */
  540. static int
  541. clcache_initial_anchorcsn(CLC_Buffer *buf, int *flag)
  542. {
  543. PRBool hasChange = PR_FALSE;
  544. struct csn_seq_ctrl_block *cscb;
  545. int i;
  546. CSN *anchorcsn = NULL;
  547. if (buf->buf_state == CLC_STATE_READY) {
  548. for (i = 0; i < buf->buf_num_cscbs; i++) {
  549. CSN *rid_anchor = NULL;
  550. int rid_flag = DB_NEXT;
  551. cscb = buf->buf_cscbs[i];
  552. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  553. char prevmax[CSN_STRSIZE];
  554. char local[CSN_STRSIZE];
  555. char curr[CSN_STRSIZE];
  556. char conmaxcsn[CSN_STRSIZE];
  557. csn_as_string(cscb->prev_local_maxcsn, 0, prevmax);
  558. csn_as_string(cscb->local_maxcsn, 0, local);
  559. csn_as_string(buf->buf_current_csn, 0, curr);
  560. csn_as_string(cscb->consumer_maxcsn, 0, conmaxcsn);
  561. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  562. "clcache_initial_anchorcsn - "
  563. "%s - (cscb %d - state %d) - csnPrevMax (%s) "
  564. "csnMax (%s) csnBuf (%s) csnConsumerMax (%s)\n",
  565. buf->buf_agmt_name, i, cscb->state, prevmax, local,
  566. curr, conmaxcsn);
  567. }
  568. if (cscb->consumer_maxcsn == NULL) {
  569. /* the consumer hasn't seen changes for this RID */
  570. rid_anchor = cscb->local_mincsn;
  571. rid_flag = DB_SET;
  572. } else if (csn_compare(cscb->local_maxcsn, cscb->consumer_maxcsn) > 0) {
  573. rid_anchor = cscb->consumer_maxcsn;
  574. }
  575. if (rid_anchor && (anchorcsn == NULL ||
  576. (csn_compare(rid_anchor, anchorcsn) < 0))) {
  577. anchorcsn = rid_anchor;
  578. *flag = rid_flag;
  579. hasChange = PR_TRUE;
  580. }
  581. }
  582. }
  583. if (!hasChange) {
  584. buf->buf_state = CLC_STATE_DONE;
  585. } else {
  586. csn_init_by_csn(buf->buf_current_csn, anchorcsn);
  587. buf->buf_key.data = csn_as_string(buf->buf_current_csn, 0, (char *)buf->buf_key.data);
  588. slapi_log_err(SLAPI_LOG_REPL, "clcache_initial_anchorcsn",
  589. "anchor is now: %s\n", (char *)buf->buf_key.data);
  590. }
  591. return buf->buf_state;
  592. }
  593. static int
  594. clcache_adjust_anchorcsn(CLC_Buffer *buf, int *flag)
  595. {
  596. PRBool hasChange = PR_FALSE;
  597. struct csn_seq_ctrl_block *cscb;
  598. int i;
  599. CSN *anchorcsn = NULL;
  600. if (buf->buf_state == CLC_STATE_READY) {
  601. for (i = 0; i < buf->buf_num_cscbs; i++) {
  602. CSN *rid_anchor = NULL;
  603. int rid_flag = DB_NEXT;
  604. cscb = buf->buf_cscbs[i];
  605. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  606. char prevmax[CSN_STRSIZE];
  607. char local[CSN_STRSIZE];
  608. char curr[CSN_STRSIZE];
  609. char conmaxcsn[CSN_STRSIZE];
  610. csn_as_string(cscb->prev_local_maxcsn, 0, prevmax);
  611. csn_as_string(cscb->local_maxcsn, 0, local);
  612. csn_as_string(buf->buf_current_csn, 0, curr);
  613. csn_as_string(cscb->consumer_maxcsn, 0, conmaxcsn);
  614. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name, "clcache_adjust_anchorcsn - "
  615. "%s - (cscb %d - state %d) - csnPrevMax (%s) "
  616. "csnMax (%s) csnBuf (%s) csnConsumerMax (%s)\n",
  617. buf->buf_agmt_name, i, cscb->state, prevmax, local,
  618. curr, conmaxcsn);
  619. }
  620. if (csn_compare(cscb->local_maxcsn, cscb->consumer_maxcsn) > 0) {
  621. /* We have something to send for this RID */
  622. if (csn_compare(cscb->local_maxcsn, cscb->prev_local_maxcsn) == 0 ||
  623. csn_compare(cscb->prev_local_maxcsn, buf->buf_current_csn) > 0) {
  624. /* No new changes or it remains, in the buffer, updates to send */
  625. rid_anchor = buf->buf_current_csn;
  626. } else {
  627. /* prev local max csn < csnBuffer AND different from local maxcsn */
  628. if (cscb->consumer_maxcsn == NULL) {
  629. /* the consumer hasn't seen changes for this RID */
  630. rid_anchor = cscb->local_mincsn;
  631. rid_flag = DB_SET;
  632. } else {
  633. rid_anchor = cscb->consumer_maxcsn;
  634. }
  635. }
  636. }
  637. if (rid_anchor && (anchorcsn == NULL ||
  638. (csn_compare(rid_anchor, anchorcsn) < 0))) {
  639. anchorcsn = rid_anchor;
  640. *flag = rid_flag;
  641. hasChange = PR_TRUE;
  642. }
  643. }
  644. }
  645. if (!hasChange) {
  646. buf->buf_state = CLC_STATE_DONE;
  647. } else {
  648. csn_init_by_csn(buf->buf_current_csn, anchorcsn);
  649. buf->buf_key.data = csn_as_string(buf->buf_current_csn, 0, (char *)buf->buf_key.data);
  650. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  651. "clcache_adjust_anchorcsn - anchor is now: %s\n", (char *)buf->buf_key.data);
  652. }
  653. return buf->buf_state;
  654. }
  655. static int
  656. clcache_skip_change(CLC_Buffer *buf)
  657. {
  658. struct csn_seq_ctrl_block *cscb = NULL;
  659. ReplicaId rid;
  660. int skip = 1;
  661. int i;
  662. char buf_cur_csn_str[CSN_STRSIZE];
  663. do {
  664. rid = csn_get_replicaid(buf->buf_current_csn);
  665. /*
  666. * Skip CSN that is originated from the consumer,
  667. * unless the CSN is newer than the maxcsn.
  668. * If RID==65535, the CSN is originated from a
  669. * legacy consumer. In this case the supplier
  670. * and the consumer may have the same RID.
  671. */
  672. if (rid == buf->buf_consumer_rid && buf->buf_ignoreConsumerRID) {
  673. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  674. csn_as_string(buf->buf_current_csn, 0, buf_cur_csn_str);
  675. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  676. "clcache_skip_change - Skipping update because the consumer with Rid: [%d] is ignored\n", rid);
  677. buf->buf_skipped_csn_gt_cons_maxcsn++;
  678. }
  679. break;
  680. }
  681. /* Skip helper entry (ENTRY_COUNT, PURGE_RUV and so on) */
  682. if (cl5HelperEntry(NULL, buf->buf_current_csn) == PR_TRUE) {
  683. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  684. "clcache_skip_change - Skip helper entry type=%ld\n", csn_get_time(buf->buf_current_csn));
  685. break;
  686. }
  687. /* Find csn sequence control block for the current rid */
  688. for (i = 0; i < buf->buf_num_cscbs && buf->buf_cscbs[i]->rid != rid; i++)
  689. ;
  690. /* Skip CSN whose RID is unknown to the local RUV snapshot */
  691. if (i >= buf->buf_num_cscbs) {
  692. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  693. csn_as_string(buf->buf_current_csn, 0, buf_cur_csn_str);
  694. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  695. "clcache_skip_change - Skipping update because the changelog buffer current csn [%s] rid "
  696. "[%d] is not in the list of changelog csn buffers (length %d)\n",
  697. buf_cur_csn_str, rid, buf->buf_num_cscbs);
  698. }
  699. buf->buf_skipped_new_rid++;
  700. break;
  701. }
  702. cscb = buf->buf_cscbs[i];
  703. /* Skip if the consumer is already up-to-date for the RID */
  704. if (cscb->state == CLC_STATE_UP_TO_DATE) {
  705. buf->buf_skipped_up_to_date++;
  706. break;
  707. }
  708. /* Skip CSN whose preceedents are not covered by local RUV snapshot */
  709. if (cscb->state == CLC_STATE_CSN_GT_RUV) {
  710. buf->buf_skipped_csn_gt_ruv++;
  711. break;
  712. }
  713. /* Skip CSNs already covered by consumer RUV */
  714. if (cscb->consumer_maxcsn &&
  715. csn_compare(buf->buf_current_csn, cscb->consumer_maxcsn) <= 0) {
  716. buf->buf_skipped_csn_covered++;
  717. break;
  718. }
  719. /* Send CSNs that are covered by the local RUV snapshot */
  720. if (csn_compare(buf->buf_current_csn, cscb->local_maxcsn) <= 0) {
  721. skip = 0;
  722. csn_dup_or_init_by_csn(&cscb->consumer_maxcsn, buf->buf_current_csn);
  723. break;
  724. }
  725. /*
  726. * Promote the local maxcsn to its next neighbor
  727. * to keep the current session going. Skip if we
  728. * are not sure if current_csn is the neighbor.
  729. */
  730. if (csn_time_difference(buf->buf_current_csn, cscb->local_maxcsn) == 0 &&
  731. (csn_get_seqnum(buf->buf_current_csn) == csn_get_seqnum(cscb->local_maxcsn) + 1))
  732. {
  733. csn_init_by_csn(cscb->local_maxcsn, buf->buf_current_csn);
  734. if (cscb->consumer_maxcsn) {
  735. csn_init_by_csn(cscb->consumer_maxcsn, buf->buf_current_csn);
  736. }
  737. skip = 0;
  738. break;
  739. }
  740. /* Skip CSNs not covered by local RUV snapshot */
  741. cscb->state = CLC_STATE_CSN_GT_RUV;
  742. buf->buf_skipped_csn_gt_ruv++;
  743. } while (0);
  744. #ifdef DEBUG
  745. if (skip && cscb) {
  746. char consumer[24] = {'\0'};
  747. char local[24] = {'\0'};
  748. char current[24] = {'\0'};
  749. if (cscb->consumer_maxcsn)
  750. csn_as_string(cscb->consumer_maxcsn, PR_FALSE, consumer);
  751. if (cscb->local_maxcsn)
  752. csn_as_string(cscb->local_maxcsn, PR_FALSE, local);
  753. csn_as_string(buf->buf_current_csn, PR_FALSE, current);
  754. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  755. "clcache_skip_change - Skip %s consumer=%s local=%s\n", current, consumer, local);
  756. }
  757. #endif
  758. return skip;
  759. }
  760. static struct csn_seq_ctrl_block *
  761. clcache_new_cscb(void)
  762. {
  763. struct csn_seq_ctrl_block *cscb;
  764. cscb = (struct csn_seq_ctrl_block *)slapi_ch_calloc(1, sizeof(struct csn_seq_ctrl_block));
  765. if (cscb == NULL) {
  766. slapi_log_err(SLAPI_LOG_ERR, NULL, "clcache: malloc failure\n");
  767. }
  768. return cscb;
  769. }
  770. static void
  771. clcache_free_cscb(struct csn_seq_ctrl_block **cscb)
  772. {
  773. csn_free(&(*cscb)->consumer_maxcsn);
  774. csn_free(&(*cscb)->local_maxcsn);
  775. csn_free(&(*cscb)->prev_local_maxcsn);
  776. csn_free(&(*cscb)->local_mincsn);
  777. slapi_ch_free((void **)cscb);
  778. }
  779. /*
  780. * Allocate and initialize a new buffer
  781. * It is called when there is a request for a buffer while
  782. * buffer free list is empty.
  783. */
  784. static CLC_Buffer *
  785. clcache_new_buffer(ReplicaId consumer_rid)
  786. {
  787. CLC_Buffer *buf = NULL;
  788. int welldone = 0;
  789. do {
  790. buf = (CLC_Buffer *)slapi_ch_calloc(1, sizeof(CLC_Buffer));
  791. if (NULL == buf)
  792. break;
  793. buf->buf_key.flags = DB_DBT_USERMEM;
  794. buf->buf_key.ulen = CSN_STRSIZE + 1;
  795. buf->buf_key.size = CSN_STRSIZE;
  796. buf->buf_key.data = slapi_ch_calloc(1, buf->buf_key.ulen);
  797. if (NULL == buf->buf_key.data)
  798. break;
  799. buf->buf_data.flags = DB_DBT_USERMEM;
  800. buf->buf_data.ulen = _pool->pl_buffer_default_pages * DEFAULT_CLC_BUFFER_PAGE_SIZE;
  801. buf->buf_data.data = slapi_ch_malloc(buf->buf_data.ulen);
  802. if (NULL == buf->buf_data.data)
  803. break;
  804. if (NULL == (buf->buf_current_csn = csn_new()))
  805. break;
  806. buf->buf_state = CLC_STATE_READY;
  807. buf->buf_agmt_name = get_thread_private_agmtname();
  808. buf->buf_consumer_rid = consumer_rid;
  809. buf->buf_num_cscbs = 0;
  810. buf->buf_max_cscbs = MAX_NUM_OF_MASTERS;
  811. buf->buf_cscbs = (struct csn_seq_ctrl_block **)slapi_ch_calloc(MAX_NUM_OF_MASTERS + 1,
  812. sizeof(struct csn_seq_ctrl_block *));
  813. welldone = 1;
  814. } while (0);
  815. if (!welldone) {
  816. clcache_delete_buffer(&buf);
  817. }
  818. return buf;
  819. }
  820. /*
  821. * Deallocates a buffer.
  822. * It is called when a buffer is returned to the buffer pool
  823. * and the pool size is over the limit.
  824. */
  825. static void
  826. clcache_delete_buffer(CLC_Buffer **buf)
  827. {
  828. if (buf && *buf) {
  829. slapi_ch_free(&((*buf)->buf_key.data));
  830. slapi_ch_free(&((*buf)->buf_data.data));
  831. csn_free(&((*buf)->buf_current_csn));
  832. csn_free(&((*buf)->buf_missing_csn));
  833. csn_free(&((*buf)->buf_prev_missing_csn));
  834. slapi_ch_free((void **)buf);
  835. }
  836. }
  837. static CLC_Busy_List *
  838. clcache_new_busy_list(void)
  839. {
  840. CLC_Busy_List *bl;
  841. int welldone = 0;
  842. do {
  843. if (NULL == (bl = (CLC_Busy_List *)slapi_ch_calloc(1, sizeof(CLC_Busy_List))))
  844. break;
  845. if (NULL == (bl->bl_lock = PR_NewLock()))
  846. break;
  847. /*
  848. if ( NULL == (bl->bl_max_csn = csn_new ()) )
  849. break;
  850. */
  851. welldone = 1;
  852. } while (0);
  853. if (!welldone) {
  854. clcache_delete_busy_list(&bl);
  855. }
  856. return bl;
  857. }
  858. static void
  859. clcache_delete_busy_list(CLC_Busy_List **bl)
  860. {
  861. if (bl && *bl) {
  862. CLC_Buffer *buf = NULL;
  863. if ((*bl)->bl_lock) {
  864. PR_Lock((*bl)->bl_lock);
  865. }
  866. buf = (*bl)->bl_buffers;
  867. while (buf) {
  868. CLC_Buffer *next = buf->buf_next;
  869. clcache_delete_buffer(&buf);
  870. buf = next;
  871. }
  872. (*bl)->bl_buffers = NULL;
  873. (*bl)->bl_db = NULL;
  874. if ((*bl)->bl_lock) {
  875. PR_Unlock((*bl)->bl_lock);
  876. PR_DestroyLock((*bl)->bl_lock);
  877. (*bl)->bl_lock = NULL;
  878. }
  879. /* csn_free (&( (*bl)->bl_max_csn )); */
  880. slapi_ch_free((void **)bl);
  881. }
  882. }
  883. static int
  884. clcache_enqueue_busy_list(DB *db, CLC_Buffer *buf)
  885. {
  886. CLC_Busy_List *bl;
  887. int rc = 0;
  888. slapi_rwlock_rdlock(_pool->pl_lock);
  889. for (bl = _pool->pl_busy_lists; bl && bl->bl_db != db; bl = bl->bl_next)
  890. ;
  891. slapi_rwlock_unlock(_pool->pl_lock);
  892. if (NULL == bl) {
  893. if (NULL == (bl = clcache_new_busy_list())) {
  894. rc = CL5_MEMORY_ERROR;
  895. } else {
  896. slapi_rwlock_wrlock(_pool->pl_lock);
  897. bl->bl_db = db;
  898. bl->bl_next = _pool->pl_busy_lists;
  899. _pool->pl_busy_lists = bl;
  900. slapi_rwlock_unlock(_pool->pl_lock);
  901. }
  902. }
  903. if (NULL != bl) {
  904. PR_Lock(bl->bl_lock);
  905. buf->buf_busy_list = bl;
  906. buf->buf_next = bl->bl_buffers;
  907. bl->bl_buffers = buf;
  908. PR_Unlock(bl->bl_lock);
  909. }
  910. return rc;
  911. }
  912. static int
  913. clcache_open_cursor(DB_TXN *txn, CLC_Buffer *buf, DBC **cursor)
  914. {
  915. int rc;
  916. rc = buf->buf_busy_list->bl_db->cursor(buf->buf_busy_list->bl_db, txn, cursor, 0);
  917. if (rc != 0) {
  918. slapi_log_err(SLAPI_LOG_ERR, get_thread_private_agmtname(),
  919. "clcache: failed to open cursor; db error - %d %s\n",
  920. rc, db_strerror(rc));
  921. }
  922. return rc;
  923. }
  924. static int
  925. clcache_cursor_get(DBC *cursor, CLC_Buffer *buf, int flag)
  926. {
  927. int rc;
  928. if (buf->buf_data.ulen > WORK_CLC_BUFFER_PAGE_SIZE) {
  929. /*
  930. * The buffer size had to be increased,
  931. * reset it to a smaller working size,
  932. * if not sufficient it will be increased again
  933. */
  934. buf->buf_data.ulen = WORK_CLC_BUFFER_PAGE_SIZE;
  935. }
  936. rc = cursor->c_get(cursor,
  937. &buf->buf_key,
  938. &buf->buf_data,
  939. buf->buf_load_flag | flag);
  940. if (DB_BUFFER_SMALL == rc) {
  941. /*
  942. * The record takes more space than the current size of the
  943. * buffer. Fortunately, buf->buf_data.size has been set by
  944. * c_get() to the actual data size needed. So we can
  945. * reallocate the data buffer and try to read again.
  946. */
  947. buf->buf_data.ulen = (buf->buf_data.size / DEFAULT_CLC_BUFFER_PAGE_SIZE + 1) * DEFAULT_CLC_BUFFER_PAGE_SIZE;
  948. buf->buf_data.data = slapi_ch_realloc(buf->buf_data.data, buf->buf_data.ulen);
  949. if (buf->buf_data.data != NULL) {
  950. rc = cursor->c_get(cursor,
  951. &(buf->buf_key),
  952. &(buf->buf_data),
  953. buf->buf_load_flag | flag);
  954. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  955. "clcache_cursor_get - clcache: (%d | %d) buf key len %d reallocated and retry returns %d\n", buf->buf_load_flag, flag, buf->buf_key.size, rc);
  956. }
  957. }
  958. switch (rc) {
  959. case EINVAL:
  960. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  961. "clcache_cursor_get - invalid parameter\n");
  962. break;
  963. case DB_BUFFER_SMALL:
  964. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  965. "clcache_cursor_get - can't allocate %u bytes\n", buf->buf_data.ulen);
  966. break;
  967. default:
  968. break;
  969. }
  970. return rc;
  971. }
  972. static void
  973. csn_dup_or_init_by_csn(CSN **csn1, CSN *csn2)
  974. {
  975. if (*csn1 == NULL)
  976. *csn1 = csn_new();
  977. csn_init_by_csn(*csn1, csn2);
  978. }
  979. void
  980. clcache_destroy()
  981. {
  982. if (_pool) {
  983. CLC_Busy_List *bl = NULL;
  984. if (_pool->pl_lock) {
  985. slapi_rwlock_wrlock(_pool->pl_lock);
  986. }
  987. bl = _pool->pl_busy_lists;
  988. while (bl) {
  989. CLC_Busy_List *next = bl->bl_next;
  990. clcache_delete_busy_list(&bl);
  991. bl = next;
  992. }
  993. _pool->pl_busy_lists = NULL;
  994. if (_pool->pl_lock) {
  995. slapi_rwlock_unlock(_pool->pl_lock);
  996. slapi_destroy_rwlock(_pool->pl_lock);
  997. _pool->pl_lock = NULL;
  998. }
  999. slapi_ch_free((void **)&_pool);
  1000. }
  1001. }