cl5_clcache.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163
  1. /** BEGIN COPYRIGHT BLOCK
  2. * Copyright (C) 2005 Red Hat, Inc.
  3. * All rights reserved.
  4. *
  5. * License: GPL (version 3 or any later version).
  6. * See LICENSE for details.
  7. * END COPYRIGHT BLOCK **/
  8. #ifdef HAVE_CONFIG_H
  9. #include <config.h>
  10. #endif
  11. #include "errno.h" /* ENOMEM, EVAL used by Berkeley DB */
  12. #include "db.h" /* Berkeley DB */
  13. #include "cl5.h" /* changelog5Config */
  14. #include "cl5_clcache.h"
  15. /* newer bdb uses DB_BUFFER_SMALL instead of ENOMEM as the
  16. error return if the given buffer in which to load a
  17. key or value is too small - if it is not defined, define
  18. it here to ENOMEM
  19. */
  20. #ifndef DB_BUFFER_SMALL
  21. #define DB_BUFFER_SMALL ENOMEM
  22. #endif
  23. /*
  24. * Constants for the buffer pool:
  25. *
  26. * DEFAULT_CLC_BUFFER_PAGE_COUNT
  27. * Little performance boost if it is too small.
  28. *
  29. * DEFAULT_CLC_BUFFER_PAGE_SIZE
  30. * Its value is determined based on the DB requirement that
  31. * the buffer size should be the multiple of 1024.
  32. */
  33. #define DEFAULT_CLC_BUFFER_COUNT_MIN 10
  34. #define DEFAULT_CLC_BUFFER_COUNT_MAX 0
  35. #define DEFAULT_CLC_BUFFER_PAGE_COUNT 32
  36. #define DEFAULT_CLC_BUFFER_PAGE_SIZE 1024
  37. #define WORK_CLC_BUFFER_PAGE_SIZE 8 * DEFAULT_CLC_BUFFER_PAGE_SIZE
  38. enum
  39. {
  40. CLC_STATE_READY = 0, /* ready to iterate */
  41. CLC_STATE_UP_TO_DATE, /* remote RUV already covers the CSN */
  42. CLC_STATE_CSN_GT_RUV, /* local RUV doesn't conver the CSN */
  43. CLC_STATE_NEW_RID, /* unknown RID to local RUVs */
  44. CLC_STATE_UNSAFE_RUV_CHANGE, /* (RUV1 < maxcsn-in-buffer) && (RUV1 < RUV1') */
  45. CLC_STATE_DONE, /* no more change */
  46. CLC_STATE_ABORTING /* abort replication session */
  47. };
  48. typedef struct clc_busy_list CLC_Busy_List;
  49. struct csn_seq_ctrl_block
  50. {
  51. ReplicaId rid; /* RID this block serves */
  52. CSN *consumer_maxcsn; /* Don't send CSN <= this */
  53. CSN *local_maxcsn; /* Don't send CSN > this */
  54. CSN *prev_local_maxcsn; /* Copy of last state at buffer loading */
  55. CSN *local_mincsn; /* Used to determin anchor csn*/
  56. int state; /* CLC_STATE_* */
  57. };
  58. /*
  59. * Each cl5replayiterator acquires a buffer from the buffer pool
  60. * at the beginning of a replication session, and returns it back
  61. * at the end.
  62. */
  63. struct clc_buffer
  64. {
  65. char *buf_agmt_name; /* agreement acquired this buffer */
  66. ReplicaId buf_consumer_rid; /* help checking threshold csn */
  67. const RUV *buf_consumer_ruv; /* used to skip change */
  68. const RUV *buf_local_ruv; /* used to refresh local_maxcsn */
  69. int buf_ignoreConsumerRID; /* how to handle updates from consumer */
  70. int buf_load_cnt; /* number of loads for session */
  71. /*
  72. * fields for retriving data from DB
  73. */
  74. int buf_state;
  75. CSN *buf_current_csn;
  76. int buf_load_flag; /* db flag DB_MULTIPLE_KEY, DB_SET, DB_NEXT */
  77. DBC *buf_cursor;
  78. DBT buf_key; /* current csn string */
  79. DBT buf_data; /* data retrived from db */
  80. void *buf_record_ptr; /* ptr to the current record in data */
  81. CSN *buf_missing_csn; /* used to detect persistent missing of CSN */
  82. CSN *buf_prev_missing_csn; /* used to surpress the repeated messages */
  83. /* fields for control the CSN sequence sent to the consumer */
  84. struct csn_seq_ctrl_block **buf_cscbs;
  85. int buf_num_cscbs; /* number of csn sequence ctrl blocks */
  86. int buf_max_cscbs;
  87. /* fields for debugging stat */
  88. int buf_record_cnt; /* number of changes for session */
  89. int buf_record_skipped; /* number of changes skipped */
  90. int buf_skipped_new_rid; /* number of changes skipped due to new_rid */
  91. int buf_skipped_csn_gt_cons_maxcsn; /* number of changes skipped due to csn greater than consumer maxcsn */
  92. int buf_skipped_up_to_date; /* number of changes skipped due to consumer being up-to-date for the given rid */
  93. int buf_skipped_csn_gt_ruv; /* number of changes skipped due to preceedents are not covered by local RUV snapshot */
  94. int buf_skipped_csn_covered; /* number of changes skipped due to CSNs already covered by consumer RUV */
  95. /*
  96. * fields that should be accessed via bl_lock or pl_lock
  97. */
  98. CLC_Buffer *buf_next; /* next buffer in the same list */
  99. CLC_Busy_List *buf_busy_list; /* which busy list I'm in */
  100. };
  101. /*
  102. * Each changelog has a busy buffer list
  103. */
  104. struct clc_busy_list
  105. {
  106. PRLock *bl_lock;
  107. DB *bl_db; /* changelog db handle */
  108. CLC_Buffer *bl_buffers; /* busy buffers of this list */
  109. CLC_Busy_List *bl_next; /* next busy list in the pool */
  110. };
  111. /*
  112. * Each process has a buffer pool
  113. */
  114. struct clc_pool
  115. {
  116. Slapi_RWLock *pl_lock; /* cl writer and agreements */
  117. DB_ENV **pl_dbenv; /* pointer to DB_ENV for all the changelog files */
  118. CLC_Busy_List *pl_busy_lists; /* busy buffer lists, one list per changelog file */
  119. int pl_buffer_cnt_now; /* total number of buffers */
  120. int pl_buffer_cnt_min; /* free a newly returned buffer if _now > _min */
  121. int pl_buffer_cnt_max; /* no use */
  122. int pl_buffer_default_pages; /* num of pages in a new buffer */
  123. };
  124. /* static variables */
  125. static struct clc_pool *_pool = NULL; /* process's buffer pool */
  126. /* static prototypes */
  127. static int clcache_initial_anchorcsn(CLC_Buffer *buf, int *flag);
  128. static int clcache_adjust_anchorcsn(CLC_Buffer *buf, int *flag);
  129. static void clcache_refresh_consumer_maxcsns(CLC_Buffer *buf);
  130. static int clcache_refresh_local_maxcsns(CLC_Buffer *buf);
  131. static int clcache_skip_change(CLC_Buffer *buf);
  132. static int clcache_load_buffer_bulk(CLC_Buffer *buf, int flag);
  133. static int clcache_open_cursor(DB_TXN *txn, CLC_Buffer *buf, DBC **cursor);
  134. static int clcache_cursor_get(DBC *cursor, CLC_Buffer *buf, int flag);
  135. static struct csn_seq_ctrl_block *clcache_new_cscb(void);
  136. static void clcache_free_cscb(struct csn_seq_ctrl_block **cscb);
  137. static CLC_Buffer *clcache_new_buffer(ReplicaId consumer_rid);
  138. static void clcache_delete_buffer(CLC_Buffer **buf);
  139. static CLC_Busy_List *clcache_new_busy_list(void);
  140. static void clcache_delete_busy_list(CLC_Busy_List **bl);
  141. static int clcache_enqueue_busy_list(DB *db, CLC_Buffer *buf);
  142. static void csn_dup_or_init_by_csn(CSN **csn1, CSN *csn2);
  143. /*
  144. * Initiates the process buffer pool. This should be done
  145. * once and only once when process starts.
  146. */
  147. int
  148. clcache_init(DB_ENV **dbenv)
  149. {
  150. if (_pool) {
  151. return 0; /* already initialized */
  152. }
  153. if (NULL == dbenv) {
  154. return -1;
  155. }
  156. _pool = (struct clc_pool *)slapi_ch_calloc(1, sizeof(struct clc_pool));
  157. _pool->pl_dbenv = dbenv;
  158. _pool->pl_buffer_cnt_min = DEFAULT_CLC_BUFFER_COUNT_MIN;
  159. _pool->pl_buffer_cnt_max = DEFAULT_CLC_BUFFER_COUNT_MAX;
  160. _pool->pl_buffer_default_pages = DEFAULT_CLC_BUFFER_COUNT_MAX;
  161. _pool->pl_lock = slapi_new_rwlock();
  162. return 0;
  163. }
  164. /*
  165. * This is part of a callback function when changelog configuration
  166. * is read or updated.
  167. */
  168. void
  169. clcache_set_config()
  170. {
  171. slapi_rwlock_wrlock(_pool->pl_lock);
  172. _pool->pl_buffer_cnt_max = CL5_DEFAULT_CONFIG_CACHESIZE;
  173. /*
  174. * According to http://www.sleepycat.com/docs/api_c/dbc_get.html,
  175. * data buffer should be a multiple of 1024 bytes in size
  176. * for DB_MULTIPLE_KEY operation.
  177. */
  178. _pool->pl_buffer_default_pages = CL5_DEFAULT_CONFIG_CACHEMEMSIZE / DEFAULT_CLC_BUFFER_PAGE_SIZE + 1;
  179. if (_pool->pl_buffer_default_pages <= 0) { /* this never be true... */
  180. _pool->pl_buffer_default_pages = DEFAULT_CLC_BUFFER_PAGE_COUNT;
  181. }
  182. slapi_rwlock_unlock(_pool->pl_lock);
  183. }
  184. /*
  185. * Gets the pointer to a thread dedicated buffer, or allocates
  186. * a new buffer if there is no buffer allocated yet for this thread.
  187. *
  188. * This is called when a cl5replayiterator is created for
  189. * a replication session.
  190. */
  191. int
  192. clcache_get_buffer(CLC_Buffer **buf, DB *db, ReplicaId consumer_rid, const RUV *consumer_ruv, const RUV *local_ruv)
  193. {
  194. int rc = 0;
  195. int need_new;
  196. if (buf == NULL)
  197. return CL5_BAD_DATA;
  198. *buf = NULL;
  199. /* if the pool was re-initialized, the thread private cache will be invalid,
  200. so we must get a new one */
  201. need_new = (!_pool || !_pool->pl_busy_lists || !_pool->pl_busy_lists->bl_buffers);
  202. if ((!need_new) && (NULL != (*buf = (CLC_Buffer *)get_thread_private_cache()))) {
  203. slapi_log_err(SLAPI_LOG_REPL, get_thread_private_agmtname(),
  204. "clcache_get_buffer - found thread private buffer cache %p\n", *buf);
  205. slapi_log_err(SLAPI_LOG_REPL, get_thread_private_agmtname(),
  206. "clcache_get_buffer - _pool is %p _pool->pl_busy_lists is %p _pool->pl_busy_lists->bl_buffers is %p\n",
  207. _pool, _pool ? _pool->pl_busy_lists : NULL,
  208. (_pool && _pool->pl_busy_lists) ? _pool->pl_busy_lists->bl_buffers : NULL);
  209. (*buf)->buf_state = CLC_STATE_READY;
  210. (*buf)->buf_load_cnt = 0;
  211. (*buf)->buf_record_cnt = 0;
  212. (*buf)->buf_record_skipped = 0;
  213. (*buf)->buf_cursor = NULL;
  214. (*buf)->buf_skipped_new_rid = 0;
  215. (*buf)->buf_skipped_csn_gt_cons_maxcsn = 0;
  216. (*buf)->buf_skipped_up_to_date = 0;
  217. (*buf)->buf_skipped_csn_gt_ruv = 0;
  218. (*buf)->buf_skipped_csn_covered = 0;
  219. (*buf)->buf_cscbs = (struct csn_seq_ctrl_block **)slapi_ch_calloc(MAX_NUM_OF_MASTERS + 1,
  220. sizeof(struct csn_seq_ctrl_block *));
  221. (*buf)->buf_num_cscbs = 0;
  222. (*buf)->buf_max_cscbs = MAX_NUM_OF_MASTERS;
  223. } else {
  224. *buf = clcache_new_buffer(consumer_rid);
  225. if (*buf) {
  226. if (0 == clcache_enqueue_busy_list(db, *buf)) {
  227. set_thread_private_cache((void *)(*buf));
  228. } else {
  229. clcache_delete_buffer(buf);
  230. }
  231. }
  232. }
  233. if (NULL != *buf) {
  234. CSN *c_csn = NULL;
  235. CSN *l_csn = NULL;
  236. (*buf)->buf_consumer_ruv = consumer_ruv;
  237. (*buf)->buf_local_ruv = local_ruv;
  238. (*buf)->buf_load_flag = DB_MULTIPLE_KEY;
  239. ruv_get_largest_csn_for_replica(consumer_ruv, consumer_rid, &c_csn);
  240. ruv_get_largest_csn_for_replica(local_ruv, consumer_rid, &l_csn);
  241. if (l_csn && csn_compare(l_csn, c_csn) > 0) {
  242. /* the supplier has updates for the consumer RID and
  243. * these updates are newer than on the consumer
  244. */
  245. (*buf)->buf_ignoreConsumerRID = 0;
  246. } else {
  247. (*buf)->buf_ignoreConsumerRID = 1;
  248. }
  249. csn_free(&c_csn);
  250. csn_free(&l_csn);
  251. } else {
  252. slapi_log_err(SLAPI_LOG_ERR, get_thread_private_agmtname(),
  253. "clcache_get_buffer - Can't allocate new buffer\n");
  254. rc = CL5_MEMORY_ERROR;
  255. }
  256. return rc;
  257. }
  258. /*
  259. * Returns a buffer back to the buffer pool.
  260. */
  261. void
  262. clcache_return_buffer(CLC_Buffer **buf)
  263. {
  264. int i;
  265. slapi_log_err(SLAPI_LOG_REPL, (*buf)->buf_agmt_name,
  266. "clcache_return_buffer - session end: state=%d load=%d sent=%d skipped=%d skipped_new_rid=%d "
  267. "skipped_csn_gt_cons_maxcsn=%d skipped_up_to_date=%d "
  268. "skipped_csn_gt_ruv=%d skipped_csn_covered=%d\n",
  269. (*buf)->buf_state,
  270. (*buf)->buf_load_cnt,
  271. (*buf)->buf_record_cnt - (*buf)->buf_record_skipped,
  272. (*buf)->buf_record_skipped, (*buf)->buf_skipped_new_rid,
  273. (*buf)->buf_skipped_csn_gt_cons_maxcsn,
  274. (*buf)->buf_skipped_up_to_date, (*buf)->buf_skipped_csn_gt_ruv,
  275. (*buf)->buf_skipped_csn_covered);
  276. for (i = 0; i < (*buf)->buf_num_cscbs; i++) {
  277. clcache_free_cscb(&(*buf)->buf_cscbs[i]);
  278. }
  279. slapi_ch_free((void **)&(*buf)->buf_cscbs);
  280. if ((*buf)->buf_cursor) {
  281. (*buf)->buf_cursor->c_close((*buf)->buf_cursor);
  282. (*buf)->buf_cursor = NULL;
  283. }
  284. }
  285. /*
  286. * Loads a buffer from DB.
  287. *
  288. * anchorcsn - passed in for the first load of a replication session;
  289. * flag - DB_SET to load in the key CSN record.
  290. * DB_NEXT to load in the records greater than key CSN.
  291. * return - DB error code instead of cl5 one because of the
  292. * historic reason.
  293. */
  294. int
  295. clcache_load_buffer(CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss)
  296. {
  297. int rc = 0;
  298. int flag = DB_NEXT;
  299. if (anchorCSN)
  300. *anchorCSN = NULL;
  301. clcache_refresh_local_maxcsns(buf);
  302. if (buf->buf_load_cnt == 0) {
  303. clcache_refresh_consumer_maxcsns(buf);
  304. rc = clcache_initial_anchorcsn(buf, &flag);
  305. } else {
  306. rc = clcache_adjust_anchorcsn(buf, &flag);
  307. }
  308. if (rc == 0) {
  309. buf->buf_state = CLC_STATE_READY;
  310. if (anchorCSN)
  311. *anchorCSN = buf->buf_current_csn;
  312. rc = clcache_load_buffer_bulk(buf, flag);
  313. if (rc == DB_NOTFOUND && continue_on_miss && *continue_on_miss) {
  314. /* make replication going using next best startcsn */
  315. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  316. "clcache_load_buffer - Can't load changelog buffer starting at CSN %s with flag(%s). "
  317. "Trying to use an alterantive start CSN.\n",
  318. (char *)buf->buf_key.data,
  319. flag == DB_NEXT ? "DB_NEXT" : "DB_SET");
  320. rc = clcache_load_buffer_bulk(buf, DB_SET_RANGE);
  321. if (rc == 0) {
  322. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  323. "clcache_load_buffer - Using alternative start iteration csn: %s \n",
  324. (char *)buf->buf_key.data);
  325. }
  326. /* the use of alternative start csns can be limited, record its usage */
  327. (*continue_on_miss)--;
  328. }
  329. /* Reset some flag variables */
  330. if (rc == 0) {
  331. int i;
  332. for (i = 0; i < buf->buf_num_cscbs; i++) {
  333. buf->buf_cscbs[i]->state = CLC_STATE_READY;
  334. }
  335. } else {
  336. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  337. "clcache_load_buffer - Can't locate CSN %s in the changelog (DB rc=%d). "
  338. "If replication stops, the consumer may need to be reinitialized.\n",
  339. (char *)buf->buf_key.data, rc);
  340. }
  341. } else if (rc == CLC_STATE_DONE) {
  342. rc = DB_NOTFOUND;
  343. }
  344. if (rc != 0) {
  345. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  346. "clcache_load_buffer - rc=%d\n", rc);
  347. }
  348. return rc;
  349. }
  350. static int
  351. clcache_load_buffer_bulk(CLC_Buffer *buf, int flag)
  352. {
  353. DB_TXN *txn = NULL;
  354. DBC *cursor = NULL;
  355. int rc = 0;
  356. int tries = 0;
  357. int use_flag = flag;
  358. #if 0 /* txn control seems not improving anything so turn it off */
  359. if ( *(_pool->pl_dbenv) ) {
  360. txn_begin( *(_pool->pl_dbenv), NULL, &txn, 0 );
  361. }
  362. #endif
  363. if (NULL == buf) {
  364. slapi_log_err(SLAPI_LOG_ERR, get_thread_private_agmtname(),
  365. "clcache_load_buffer_bulk - NULL buf\n");
  366. return rc;
  367. }
  368. if (NULL == buf->buf_busy_list) {
  369. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name, "clcache_load_buffer_bulk - "
  370. "%s%sno buf_busy_list\n",
  371. buf->buf_agmt_name ? buf->buf_agmt_name : "",
  372. buf->buf_agmt_name ? ": " : "");
  373. return rc;
  374. }
  375. PR_Lock(buf->buf_busy_list->bl_lock);
  376. retry:
  377. if (0 == (rc = clcache_open_cursor(txn, buf, &cursor))) {
  378. if (use_flag == DB_NEXT) {
  379. /* For bulk read, position the cursor before read the next block */
  380. rc = cursor->c_get(cursor,
  381. &buf->buf_key,
  382. &buf->buf_data,
  383. DB_SET);
  384. }
  385. /*
  386. * Continue if the error is no-mem since we don't need to
  387. * load in the key record anyway with DB_SET.
  388. */
  389. if (0 == rc || DB_BUFFER_SMALL == rc) {
  390. rc = clcache_cursor_get(cursor, buf, use_flag);
  391. }
  392. }
  393. /*
  394. * Don't keep a cursor open across the whole replication session.
  395. * That had caused noticeable DB resource contention.
  396. */
  397. if (cursor) {
  398. cursor->c_close(cursor);
  399. cursor = NULL;
  400. }
  401. if ((rc == DB_LOCK_DEADLOCK) && (tries < MAX_TRIALS)) {
  402. PRIntervalTime interval;
  403. tries++;
  404. slapi_log_err(SLAPI_LOG_TRACE, buf->buf_agmt_name, "clcache_load_buffer_bulk - "
  405. "deadlock number [%d] - retrying\n",
  406. tries);
  407. /* back off */
  408. interval = PR_MillisecondsToInterval(slapi_rand() % 100);
  409. DS_Sleep(interval);
  410. use_flag = flag;
  411. goto retry;
  412. }
  413. if ((rc == DB_LOCK_DEADLOCK) && (tries >= MAX_TRIALS)) {
  414. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name, "clcache_load_buffer_bulk - "
  415. "could not load buffer from changelog after %d tries\n",
  416. tries);
  417. }
  418. #if 0 /* txn control seems not improving anything so turn it off */
  419. if ( txn ) {
  420. txn->commit ( txn, DB_TXN_NOSYNC );
  421. }
  422. #endif
  423. PR_Unlock(buf->buf_busy_list->bl_lock);
  424. buf->buf_record_ptr = NULL;
  425. if (0 == rc) {
  426. DB_MULTIPLE_INIT(buf->buf_record_ptr, &buf->buf_data);
  427. if (NULL == buf->buf_record_ptr)
  428. rc = DB_NOTFOUND;
  429. else
  430. buf->buf_load_cnt++;
  431. }
  432. return rc;
  433. }
  434. /*
  435. * Gets the next change from the buffer.
  436. * *key : output - key of the next change, or NULL if no more change
  437. * *data: output - data of the next change, or NULL if no more change
  438. */
  439. int
  440. clcache_get_next_change(CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn)
  441. {
  442. int skip = 1;
  443. int rc = 0;
  444. do {
  445. *key = *data = NULL;
  446. *keylen = *datalen = 0;
  447. if (buf->buf_record_ptr) {
  448. DB_MULTIPLE_KEY_NEXT(buf->buf_record_ptr, &buf->buf_data,
  449. *key, *keylen, *data, *datalen);
  450. }
  451. /*
  452. * We're done with the current buffer. Now load the next chunk.
  453. */
  454. if (NULL == *key && CLC_STATE_READY == buf->buf_state) {
  455. rc = clcache_load_buffer(buf, NULL, NULL);
  456. if (0 == rc && buf->buf_record_ptr) {
  457. DB_MULTIPLE_KEY_NEXT(buf->buf_record_ptr, &buf->buf_data,
  458. *key, *keylen, *data, *datalen);
  459. }
  460. }
  461. /* Compare the new change to the local and remote RUVs */
  462. if (NULL != *key) {
  463. buf->buf_record_cnt++;
  464. csn_init_by_string(buf->buf_current_csn, (char *)*key);
  465. skip = clcache_skip_change(buf);
  466. if (skip)
  467. buf->buf_record_skipped++;
  468. }
  469. } while (rc == 0 && *key && skip);
  470. if (NULL == *key) {
  471. *key = NULL;
  472. *csn = NULL;
  473. rc = DB_NOTFOUND;
  474. } else {
  475. *csn = buf->buf_current_csn;
  476. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  477. "clcache_get_next_change - load=%d rec=%d csn=%s\n",
  478. buf->buf_load_cnt, buf->buf_record_cnt, (char *)*key);
  479. }
  480. return rc;
  481. }
  482. static void
  483. clcache_refresh_consumer_maxcsns(CLC_Buffer *buf)
  484. {
  485. int i;
  486. for (i = 0; i < buf->buf_num_cscbs; i++) {
  487. ruv_get_largest_csn_for_replica(
  488. buf->buf_consumer_ruv,
  489. buf->buf_cscbs[i]->rid,
  490. &buf->buf_cscbs[i]->consumer_maxcsn);
  491. }
  492. }
  493. static int
  494. clcache_refresh_local_maxcsn(const ruv_enum_data *rid_data, void *data)
  495. {
  496. struct clc_buffer *buf = (struct clc_buffer *)data;
  497. ReplicaId rid;
  498. int rc = 0;
  499. int i;
  500. rid = csn_get_replicaid(rid_data->csn);
  501. /* we do not handle updates originated at the consumer if not required
  502. * and we ignore RID which have been cleaned
  503. */
  504. if ((rid == buf->buf_consumer_rid && buf->buf_ignoreConsumerRID) ||
  505. is_cleaned_rid(rid))
  506. return rc;
  507. for (i = 0; i < buf->buf_num_cscbs; i++) {
  508. if (buf->buf_cscbs[i]->rid == rid)
  509. break;
  510. }
  511. if (i >= buf->buf_num_cscbs) {
  512. if (i + 1 > buf->buf_max_cscbs) {
  513. buf->buf_cscbs = (struct csn_seq_ctrl_block **)slapi_ch_realloc((char *)buf->buf_cscbs,
  514. (i + 2) * sizeof(struct csn_seq_ctrl_block *));
  515. buf->buf_max_cscbs = i + 1;
  516. }
  517. buf->buf_cscbs[i] = clcache_new_cscb();
  518. if (buf->buf_cscbs[i] == NULL) {
  519. return -1;
  520. }
  521. buf->buf_cscbs[i]->rid = rid;
  522. buf->buf_num_cscbs++;
  523. /* this is the first time we have a local change for the RID
  524. * we need to check what the consumer knows about it.
  525. */
  526. ruv_get_largest_csn_for_replica(
  527. buf->buf_consumer_ruv,
  528. buf->buf_cscbs[i]->rid,
  529. &buf->buf_cscbs[i]->consumer_maxcsn);
  530. }
  531. if (buf->buf_cscbs[i]->local_maxcsn)
  532. csn_dup_or_init_by_csn(&buf->buf_cscbs[i]->prev_local_maxcsn, buf->buf_cscbs[i]->local_maxcsn);
  533. csn_dup_or_init_by_csn(&buf->buf_cscbs[i]->local_maxcsn, rid_data->csn);
  534. csn_dup_or_init_by_csn(&buf->buf_cscbs[i]->local_mincsn, rid_data->min_csn);
  535. if (buf->buf_cscbs[i]->consumer_maxcsn &&
  536. csn_compare(buf->buf_cscbs[i]->consumer_maxcsn, rid_data->csn) >= 0) {
  537. /* No change need to be sent for this RID */
  538. buf->buf_cscbs[i]->state = CLC_STATE_UP_TO_DATE;
  539. }
  540. return rc;
  541. }
  542. static int
  543. clcache_refresh_local_maxcsns(CLC_Buffer *buf)
  544. {
  545. return ruv_enumerate_elements(buf->buf_local_ruv, clcache_refresh_local_maxcsn, buf);
  546. }
  547. /*
  548. * Algorithm:
  549. *
  550. * 1. Determine anchorcsn for each RID:
  551. * 2. Determine anchorcsn for next load:
  552. * Anchor-CSN = min { all Next-Anchor-CSN, Buffer-MaxCSN }
  553. */
  554. static int
  555. clcache_initial_anchorcsn(CLC_Buffer *buf, int *flag)
  556. {
  557. PRBool hasChange = PR_FALSE;
  558. struct csn_seq_ctrl_block *cscb;
  559. int i;
  560. CSN *anchorcsn = NULL;
  561. if (buf->buf_state == CLC_STATE_READY) {
  562. for (i = 0; i < buf->buf_num_cscbs; i++) {
  563. CSN *rid_anchor = NULL;
  564. int rid_flag = DB_NEXT;
  565. cscb = buf->buf_cscbs[i];
  566. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  567. char prevmax[CSN_STRSIZE];
  568. char local[CSN_STRSIZE];
  569. char curr[CSN_STRSIZE];
  570. char conmaxcsn[CSN_STRSIZE];
  571. csn_as_string(cscb->prev_local_maxcsn, 0, prevmax);
  572. csn_as_string(cscb->local_maxcsn, 0, local);
  573. csn_as_string(buf->buf_current_csn, 0, curr);
  574. csn_as_string(cscb->consumer_maxcsn, 0, conmaxcsn);
  575. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  576. "clcache_initial_anchorcsn - "
  577. "%s - (cscb %d - state %d) - csnPrevMax (%s) "
  578. "csnMax (%s) csnBuf (%s) csnConsumerMax (%s)\n",
  579. buf->buf_agmt_name, i, cscb->state, prevmax, local,
  580. curr, conmaxcsn);
  581. }
  582. if (cscb->consumer_maxcsn == NULL) {
  583. /* the consumer hasn't seen changes for this RID */
  584. rid_anchor = cscb->local_mincsn;
  585. rid_flag = DB_SET;
  586. } else if (csn_compare(cscb->local_maxcsn, cscb->consumer_maxcsn) > 0) {
  587. rid_anchor = cscb->consumer_maxcsn;
  588. }
  589. if (rid_anchor && (anchorcsn == NULL ||
  590. (csn_compare(rid_anchor, anchorcsn) < 0))) {
  591. anchorcsn = rid_anchor;
  592. *flag = rid_flag;
  593. hasChange = PR_TRUE;
  594. }
  595. }
  596. }
  597. if (!hasChange) {
  598. buf->buf_state = CLC_STATE_DONE;
  599. } else {
  600. csn_init_by_csn(buf->buf_current_csn, anchorcsn);
  601. csn_as_string(buf->buf_current_csn, 0, (char *)buf->buf_key.data);
  602. slapi_log_err(SLAPI_LOG_REPL, "clcache_initial_anchorcsn",
  603. "anchor is now: %s\n", (char *)buf->buf_key.data);
  604. }
  605. return buf->buf_state;
  606. }
  607. static int
  608. clcache_adjust_anchorcsn(CLC_Buffer *buf, int *flag)
  609. {
  610. PRBool hasChange = PR_FALSE;
  611. struct csn_seq_ctrl_block *cscb;
  612. int i;
  613. CSN *anchorcsn = NULL;
  614. if (buf->buf_state == CLC_STATE_READY) {
  615. for (i = 0; i < buf->buf_num_cscbs; i++) {
  616. CSN *rid_anchor = NULL;
  617. int rid_flag = DB_NEXT;
  618. cscb = buf->buf_cscbs[i];
  619. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  620. char prevmax[CSN_STRSIZE];
  621. char local[CSN_STRSIZE];
  622. char curr[CSN_STRSIZE];
  623. char conmaxcsn[CSN_STRSIZE];
  624. csn_as_string(cscb->prev_local_maxcsn, 0, prevmax);
  625. csn_as_string(cscb->local_maxcsn, 0, local);
  626. csn_as_string(buf->buf_current_csn, 0, curr);
  627. csn_as_string(cscb->consumer_maxcsn, 0, conmaxcsn);
  628. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name, "clcache_adjust_anchorcsn - "
  629. "%s - (cscb %d - state %d) - csnPrevMax (%s) "
  630. "csnMax (%s) csnBuf (%s) csnConsumerMax (%s)\n",
  631. buf->buf_agmt_name, i, cscb->state, prevmax, local,
  632. curr, conmaxcsn);
  633. }
  634. if (csn_compare(cscb->local_maxcsn, cscb->consumer_maxcsn) > 0) {
  635. /* We have something to send for this RID */
  636. if (csn_compare(cscb->local_maxcsn, cscb->prev_local_maxcsn) == 0 ||
  637. csn_compare(cscb->prev_local_maxcsn, buf->buf_current_csn) > 0) {
  638. /* No new changes or it remains, in the buffer, updates to send */
  639. rid_anchor = buf->buf_current_csn;
  640. } else {
  641. /* prev local max csn < csnBuffer AND different from local maxcsn */
  642. if (cscb->consumer_maxcsn == NULL) {
  643. /* the consumer hasn't seen changes for this RID */
  644. rid_anchor = cscb->local_mincsn;
  645. rid_flag = DB_SET;
  646. } else {
  647. rid_anchor = cscb->consumer_maxcsn;
  648. }
  649. }
  650. }
  651. if (rid_anchor && (anchorcsn == NULL ||
  652. (csn_compare(rid_anchor, anchorcsn) < 0))) {
  653. anchorcsn = rid_anchor;
  654. *flag = rid_flag;
  655. hasChange = PR_TRUE;
  656. }
  657. }
  658. }
  659. if (!hasChange) {
  660. buf->buf_state = CLC_STATE_DONE;
  661. } else {
  662. csn_init_by_csn(buf->buf_current_csn, anchorcsn);
  663. csn_as_string(buf->buf_current_csn, 0, (char *)buf->buf_key.data);
  664. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name, "clcache_adjust_anchorcsn - "
  665. "anchor is now: %s\n",
  666. (char *)buf->buf_key.data);
  667. }
  668. return buf->buf_state;
  669. }
  670. static int
  671. clcache_skip_change(CLC_Buffer *buf)
  672. {
  673. struct csn_seq_ctrl_block *cscb = NULL;
  674. ReplicaId rid;
  675. int skip = 1;
  676. int i;
  677. char buf_cur_csn_str[CSN_STRSIZE];
  678. do {
  679. rid = csn_get_replicaid(buf->buf_current_csn);
  680. /*
  681. * Skip CSN that is originated from the consumer,
  682. * unless the CSN is newer than the maxcsn.
  683. * If RID==65535, the CSN is originated from a
  684. * legacy consumer. In this case the supplier
  685. * and the consumer may have the same RID.
  686. */
  687. if (rid == buf->buf_consumer_rid && buf->buf_ignoreConsumerRID) {
  688. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  689. csn_as_string(buf->buf_current_csn, 0, buf_cur_csn_str);
  690. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  691. "clcache_skip_change - Skipping update because the consumer with Rid: [%d] is ignored\n", rid);
  692. buf->buf_skipped_csn_gt_cons_maxcsn++;
  693. }
  694. break;
  695. }
  696. /* Skip helper entry (ENTRY_COUNT, PURGE_RUV and so on) */
  697. if (cl5HelperEntry(NULL, buf->buf_current_csn) == PR_TRUE) {
  698. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  699. "clcache_skip_change - Skip helper entry type=%ld\n", csn_get_time(buf->buf_current_csn));
  700. break;
  701. }
  702. /* Find csn sequence control block for the current rid */
  703. for (i = 0; i < buf->buf_num_cscbs && buf->buf_cscbs[i]->rid != rid; i++)
  704. ;
  705. /* Skip CSN whose RID is unknown to the local RUV snapshot */
  706. if (i >= buf->buf_num_cscbs) {
  707. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  708. csn_as_string(buf->buf_current_csn, 0, buf_cur_csn_str);
  709. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  710. "clcache_skip_change - Skipping update because the changelog buffer current csn [%s] rid "
  711. "[%d] is not in the list of changelog csn buffers (length %d)\n",
  712. buf_cur_csn_str, rid, buf->buf_num_cscbs);
  713. }
  714. buf->buf_skipped_new_rid++;
  715. break;
  716. }
  717. cscb = buf->buf_cscbs[i];
  718. /* Skip if the consumer is already up-to-date for the RID */
  719. if (cscb->state == CLC_STATE_UP_TO_DATE) {
  720. buf->buf_skipped_up_to_date++;
  721. break;
  722. }
  723. /* Skip CSN whose preceedents are not covered by local RUV snapshot */
  724. if (cscb->state == CLC_STATE_CSN_GT_RUV) {
  725. buf->buf_skipped_csn_gt_ruv++;
  726. break;
  727. }
  728. /* Skip CSNs already covered by consumer RUV */
  729. if (cscb->consumer_maxcsn &&
  730. csn_compare(buf->buf_current_csn, cscb->consumer_maxcsn) <= 0) {
  731. buf->buf_skipped_csn_covered++;
  732. break;
  733. }
  734. /* Send CSNs that are covered by the local RUV snapshot */
  735. if (csn_compare(buf->buf_current_csn, cscb->local_maxcsn) <= 0) {
  736. skip = 0;
  737. csn_dup_or_init_by_csn(&cscb->consumer_maxcsn, buf->buf_current_csn);
  738. break;
  739. }
  740. /*
  741. * Promote the local maxcsn to its next neighbor
  742. * to keep the current session going. Skip if we
  743. * are not sure if current_csn is the neighbor.
  744. */
  745. if (csn_time_difference(buf->buf_current_csn, cscb->local_maxcsn) == 0 &&
  746. (csn_get_seqnum(buf->buf_current_csn) ==
  747. csn_get_seqnum(cscb->local_maxcsn) + 1)) {
  748. csn_init_by_csn(cscb->local_maxcsn, buf->buf_current_csn);
  749. if (cscb->consumer_maxcsn) {
  750. csn_init_by_csn(cscb->consumer_maxcsn, buf->buf_current_csn);
  751. }
  752. skip = 0;
  753. break;
  754. }
  755. /* Skip CSNs not covered by local RUV snapshot */
  756. cscb->state = CLC_STATE_CSN_GT_RUV;
  757. buf->buf_skipped_csn_gt_ruv++;
  758. } while (0);
  759. #ifdef DEBUG
  760. if (skip && cscb) {
  761. char consumer[24] = {'\0'};
  762. char local[24] = {'\0'};
  763. char current[24] = {'\0'};
  764. if (cscb->consumer_maxcsn)
  765. csn_as_string(cscb->consumer_maxcsn, PR_FALSE, consumer);
  766. if (cscb->local_maxcsn)
  767. csn_as_string(cscb->local_maxcsn, PR_FALSE, local);
  768. csn_as_string(buf->buf_current_csn, PR_FALSE, current);
  769. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  770. "clcache_skip_change - Skip %s consumer=%s local=%s\n", current, consumer, local);
  771. }
  772. #endif
  773. return skip;
  774. }
  775. static struct csn_seq_ctrl_block *
  776. clcache_new_cscb(void)
  777. {
  778. struct csn_seq_ctrl_block *cscb;
  779. cscb = (struct csn_seq_ctrl_block *)slapi_ch_calloc(1, sizeof(struct csn_seq_ctrl_block));
  780. if (cscb == NULL) {
  781. slapi_log_err(SLAPI_LOG_ERR, NULL, "clcache: malloc failure\n");
  782. }
  783. return cscb;
  784. }
  785. static void
  786. clcache_free_cscb(struct csn_seq_ctrl_block **cscb)
  787. {
  788. csn_free(&(*cscb)->consumer_maxcsn);
  789. csn_free(&(*cscb)->local_maxcsn);
  790. csn_free(&(*cscb)->prev_local_maxcsn);
  791. csn_free(&(*cscb)->local_mincsn);
  792. slapi_ch_free((void **)cscb);
  793. }
  794. /*
  795. * Allocate and initialize a new buffer
  796. * It is called when there is a request for a buffer while
  797. * buffer free list is empty.
  798. */
  799. static CLC_Buffer *
  800. clcache_new_buffer(ReplicaId consumer_rid)
  801. {
  802. CLC_Buffer *buf = NULL;
  803. int welldone = 0;
  804. do {
  805. buf = (CLC_Buffer *)slapi_ch_calloc(1, sizeof(CLC_Buffer));
  806. if (NULL == buf)
  807. break;
  808. buf->buf_key.flags = DB_DBT_USERMEM;
  809. buf->buf_key.ulen = CSN_STRSIZE + 1;
  810. buf->buf_key.size = CSN_STRSIZE;
  811. buf->buf_key.data = slapi_ch_calloc(1, buf->buf_key.ulen);
  812. if (NULL == buf->buf_key.data)
  813. break;
  814. buf->buf_data.flags = DB_DBT_USERMEM;
  815. buf->buf_data.ulen = _pool->pl_buffer_default_pages * DEFAULT_CLC_BUFFER_PAGE_SIZE;
  816. buf->buf_data.data = slapi_ch_malloc(buf->buf_data.ulen);
  817. if (NULL == buf->buf_data.data)
  818. break;
  819. if (NULL == (buf->buf_current_csn = csn_new()))
  820. break;
  821. buf->buf_state = CLC_STATE_READY;
  822. buf->buf_agmt_name = get_thread_private_agmtname();
  823. buf->buf_consumer_rid = consumer_rid;
  824. buf->buf_num_cscbs = 0;
  825. buf->buf_max_cscbs = MAX_NUM_OF_MASTERS;
  826. buf->buf_cscbs = (struct csn_seq_ctrl_block **)slapi_ch_calloc(MAX_NUM_OF_MASTERS + 1,
  827. sizeof(struct csn_seq_ctrl_block *));
  828. welldone = 1;
  829. } while (0);
  830. if (!welldone) {
  831. clcache_delete_buffer(&buf);
  832. }
  833. return buf;
  834. }
  835. /*
  836. * Deallocates a buffer.
  837. * It is called when a buffer is returned to the buffer pool
  838. * and the pool size is over the limit.
  839. */
  840. static void
  841. clcache_delete_buffer(CLC_Buffer **buf)
  842. {
  843. if (buf && *buf) {
  844. slapi_ch_free(&((*buf)->buf_key.data));
  845. slapi_ch_free(&((*buf)->buf_data.data));
  846. csn_free(&((*buf)->buf_current_csn));
  847. csn_free(&((*buf)->buf_missing_csn));
  848. csn_free(&((*buf)->buf_prev_missing_csn));
  849. slapi_ch_free((void **)buf);
  850. }
  851. }
  852. static CLC_Busy_List *
  853. clcache_new_busy_list(void)
  854. {
  855. CLC_Busy_List *bl;
  856. int welldone = 0;
  857. do {
  858. if (NULL == (bl = (CLC_Busy_List *)slapi_ch_calloc(1, sizeof(CLC_Busy_List))))
  859. break;
  860. if (NULL == (bl->bl_lock = PR_NewLock()))
  861. break;
  862. /*
  863. if ( NULL == (bl->bl_max_csn = csn_new ()) )
  864. break;
  865. */
  866. welldone = 1;
  867. } while (0);
  868. if (!welldone) {
  869. clcache_delete_busy_list(&bl);
  870. }
  871. return bl;
  872. }
  873. static void
  874. clcache_delete_busy_list(CLC_Busy_List **bl)
  875. {
  876. if (bl && *bl) {
  877. CLC_Buffer *buf = NULL;
  878. if ((*bl)->bl_lock) {
  879. PR_Lock((*bl)->bl_lock);
  880. }
  881. buf = (*bl)->bl_buffers;
  882. while (buf) {
  883. CLC_Buffer *next = buf->buf_next;
  884. clcache_delete_buffer(&buf);
  885. buf = next;
  886. }
  887. (*bl)->bl_buffers = NULL;
  888. (*bl)->bl_db = NULL;
  889. if ((*bl)->bl_lock) {
  890. PR_Unlock((*bl)->bl_lock);
  891. PR_DestroyLock((*bl)->bl_lock);
  892. (*bl)->bl_lock = NULL;
  893. }
  894. /* csn_free (&( (*bl)->bl_max_csn )); */
  895. slapi_ch_free((void **)bl);
  896. }
  897. }
  898. static int
  899. clcache_enqueue_busy_list(DB *db, CLC_Buffer *buf)
  900. {
  901. CLC_Busy_List *bl;
  902. int rc = 0;
  903. slapi_rwlock_rdlock(_pool->pl_lock);
  904. for (bl = _pool->pl_busy_lists; bl && bl->bl_db != db; bl = bl->bl_next)
  905. ;
  906. slapi_rwlock_unlock(_pool->pl_lock);
  907. if (NULL == bl) {
  908. if (NULL == (bl = clcache_new_busy_list())) {
  909. rc = CL5_MEMORY_ERROR;
  910. } else {
  911. slapi_rwlock_wrlock(_pool->pl_lock);
  912. bl->bl_db = db;
  913. bl->bl_next = _pool->pl_busy_lists;
  914. _pool->pl_busy_lists = bl;
  915. slapi_rwlock_unlock(_pool->pl_lock);
  916. }
  917. }
  918. if (NULL != bl) {
  919. PR_Lock(bl->bl_lock);
  920. buf->buf_busy_list = bl;
  921. buf->buf_next = bl->bl_buffers;
  922. bl->bl_buffers = buf;
  923. PR_Unlock(bl->bl_lock);
  924. }
  925. return rc;
  926. }
  927. static int
  928. clcache_open_cursor(DB_TXN *txn, CLC_Buffer *buf, DBC **cursor)
  929. {
  930. int rc;
  931. rc = buf->buf_busy_list->bl_db->cursor(buf->buf_busy_list->bl_db, txn, cursor, 0);
  932. if (rc != 0) {
  933. slapi_log_err(SLAPI_LOG_ERR, get_thread_private_agmtname(),
  934. "clcache: failed to open cursor; db error - %d %s\n",
  935. rc, db_strerror(rc));
  936. }
  937. return rc;
  938. }
  939. static int
  940. clcache_cursor_get(DBC *cursor, CLC_Buffer *buf, int flag)
  941. {
  942. int rc;
  943. if (buf->buf_data.ulen > WORK_CLC_BUFFER_PAGE_SIZE) {
  944. /*
  945. * The buffer size had to be increased,
  946. * reset it to a smaller working size,
  947. * if not sufficient it will be increased again
  948. */
  949. buf->buf_data.ulen = WORK_CLC_BUFFER_PAGE_SIZE;
  950. }
  951. rc = cursor->c_get(cursor,
  952. &buf->buf_key,
  953. &buf->buf_data,
  954. buf->buf_load_flag | flag);
  955. if (DB_BUFFER_SMALL == rc) {
  956. /*
  957. * The record takes more space than the current size of the
  958. * buffer. Fortunately, buf->buf_data.size has been set by
  959. * c_get() to the actual data size needed. So we can
  960. * reallocate the data buffer and try to read again.
  961. */
  962. buf->buf_data.ulen = (buf->buf_data.size / DEFAULT_CLC_BUFFER_PAGE_SIZE + 1) * DEFAULT_CLC_BUFFER_PAGE_SIZE;
  963. buf->buf_data.data = slapi_ch_realloc(buf->buf_data.data, buf->buf_data.ulen);
  964. if (buf->buf_data.data != NULL) {
  965. rc = cursor->c_get(cursor,
  966. &(buf->buf_key),
  967. &(buf->buf_data),
  968. buf->buf_load_flag | flag);
  969. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  970. "clcache_cursor_get - clcache: (%d | %d) buf key len %d reallocated and retry returns %d\n", buf->buf_load_flag, flag, buf->buf_key.size, rc);
  971. }
  972. }
  973. switch (rc) {
  974. case EINVAL:
  975. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  976. "clcache_cursor_get - invalid parameter\n");
  977. break;
  978. case DB_BUFFER_SMALL:
  979. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  980. "clcache_cursor_get - can't allocate %u bytes\n", buf->buf_data.ulen);
  981. break;
  982. default:
  983. break;
  984. }
  985. return rc;
  986. }
  987. static void
  988. csn_dup_or_init_by_csn(CSN **csn1, CSN *csn2)
  989. {
  990. if (*csn1 == NULL)
  991. *csn1 = csn_new();
  992. csn_init_by_csn(*csn1, csn2);
  993. }
  994. void
  995. clcache_destroy()
  996. {
  997. if (_pool) {
  998. CLC_Busy_List *bl = NULL;
  999. if (_pool->pl_lock) {
  1000. slapi_rwlock_wrlock(_pool->pl_lock);
  1001. }
  1002. bl = _pool->pl_busy_lists;
  1003. while (bl) {
  1004. CLC_Busy_List *next = bl->bl_next;
  1005. clcache_delete_busy_list(&bl);
  1006. bl = next;
  1007. }
  1008. _pool->pl_busy_lists = NULL;
  1009. _pool->pl_dbenv = NULL;
  1010. if (_pool->pl_lock) {
  1011. slapi_rwlock_unlock(_pool->pl_lock);
  1012. slapi_destroy_rwlock(_pool->pl_lock);
  1013. _pool->pl_lock = NULL;
  1014. }
  1015. slapi_ch_free((void **)&_pool);
  1016. }
  1017. }