cl5_clcache.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159
  1. /** BEGIN COPYRIGHT BLOCK
  2. * Copyright (C) 2005 Red Hat, Inc.
  3. * All rights reserved.
  4. *
  5. * License: GPL (version 3 or any later version).
  6. * See LICENSE for details.
  7. * END COPYRIGHT BLOCK **/
  8. #ifdef HAVE_CONFIG_H
  9. # include <config.h>
  10. #endif
  11. #include "errno.h" /* ENOMEM, EVAL used by Berkeley DB */
  12. #include "db.h" /* Berkeley DB */
  13. #include "cl5.h" /* changelog5Config */
  14. #include "cl5_clcache.h"
  15. /* newer bdb uses DB_BUFFER_SMALL instead of ENOMEM as the
  16. error return if the given buffer in which to load a
  17. key or value is too small - if it is not defined, define
  18. it here to ENOMEM
  19. */
  20. #ifndef DB_BUFFER_SMALL
  21. #define DB_BUFFER_SMALL ENOMEM
  22. #endif
  23. /*
  24. * Constants for the buffer pool:
  25. *
  26. * DEFAULT_CLC_BUFFER_PAGE_COUNT
  27. * Little performance boost if it is too small.
  28. *
  29. * DEFAULT_CLC_BUFFER_PAGE_SIZE
  30. * Its value is determined based on the DB requirement that
  31. * the buffer size should be the multiple of 1024.
  32. */
  33. #define DEFAULT_CLC_BUFFER_COUNT_MIN 10
  34. #define DEFAULT_CLC_BUFFER_COUNT_MAX 0
  35. #define DEFAULT_CLC_BUFFER_PAGE_COUNT 32
  36. #define DEFAULT_CLC_BUFFER_PAGE_SIZE 1024
  37. #define WORK_CLC_BUFFER_PAGE_SIZE 8*DEFAULT_CLC_BUFFER_PAGE_SIZE
  38. enum {
  39. CLC_STATE_READY = 0, /* ready to iterate */
  40. CLC_STATE_UP_TO_DATE, /* remote RUV already covers the CSN */
  41. CLC_STATE_CSN_GT_RUV, /* local RUV doesn't conver the CSN */
  42. CLC_STATE_NEW_RID, /* unknown RID to local RUVs */
  43. CLC_STATE_UNSAFE_RUV_CHANGE,/* (RUV1 < maxcsn-in-buffer) && (RUV1 < RUV1') */
  44. CLC_STATE_DONE, /* no more change */
  45. CLC_STATE_ABORTING /* abort replication session */
  46. };
  47. typedef struct clc_busy_list CLC_Busy_List;
  48. struct csn_seq_ctrl_block {
  49. ReplicaId rid; /* RID this block serves */
  50. CSN *consumer_maxcsn; /* Don't send CSN <= this */
  51. CSN *local_maxcsn; /* Don't send CSN > this */
  52. CSN *prev_local_maxcsn; /* Copy of last state at buffer loading */
  53. CSN *local_mincsn; /* Used to determin anchor csn*/
  54. int state; /* CLC_STATE_* */
  55. };
  56. /*
  57. * Each cl5replayiterator acquires a buffer from the buffer pool
  58. * at the beginning of a replication session, and returns it back
  59. * at the end.
  60. */
  61. struct clc_buffer {
  62. char *buf_agmt_name; /* agreement acquired this buffer */
  63. ReplicaId buf_consumer_rid; /* help checking threshold csn */
  64. const RUV *buf_consumer_ruv; /* used to skip change */
  65. const RUV *buf_local_ruv; /* used to refresh local_maxcsn */
  66. int buf_ignoreConsumerRID; /* how to handle updates from consumer */
  67. int buf_load_cnt; /* number of loads for session */
  68. /*
  69. * fields for retriving data from DB
  70. */
  71. int buf_state;
  72. CSN *buf_current_csn;
  73. int buf_load_flag; /* db flag DB_MULTIPLE_KEY, DB_SET, DB_NEXT */
  74. DBC *buf_cursor;
  75. DBT buf_key; /* current csn string */
  76. DBT buf_data; /* data retrived from db */
  77. void *buf_record_ptr; /* ptr to the current record in data */
  78. CSN *buf_missing_csn; /* used to detect persistent missing of CSN */
  79. CSN *buf_prev_missing_csn; /* used to surpress the repeated messages */
  80. /* fields for control the CSN sequence sent to the consumer */
  81. struct csn_seq_ctrl_block **buf_cscbs;
  82. int buf_num_cscbs; /* number of csn sequence ctrl blocks */
  83. int buf_max_cscbs;
  84. /* fields for debugging stat */
  85. int buf_record_cnt; /* number of changes for session */
  86. int buf_record_skipped; /* number of changes skipped */
  87. int buf_skipped_new_rid; /* number of changes skipped due to new_rid */
  88. int buf_skipped_csn_gt_cons_maxcsn; /* number of changes skipped due to csn greater than consumer maxcsn */
  89. int buf_skipped_up_to_date; /* number of changes skipped due to consumer being up-to-date for the given rid */
  90. int buf_skipped_csn_gt_ruv; /* number of changes skipped due to preceedents are not covered by local RUV snapshot */
  91. int buf_skipped_csn_covered; /* number of changes skipped due to CSNs already covered by consumer RUV */
  92. /*
  93. * fields that should be accessed via bl_lock or pl_lock
  94. */
  95. CLC_Buffer *buf_next; /* next buffer in the same list */
  96. CLC_Busy_List *buf_busy_list; /* which busy list I'm in */
  97. };
  98. /*
  99. * Each changelog has a busy buffer list
  100. */
  101. struct clc_busy_list {
  102. PRLock *bl_lock;
  103. DB *bl_db; /* changelog db handle */
  104. CLC_Buffer *bl_buffers; /* busy buffers of this list */
  105. CLC_Busy_List *bl_next; /* next busy list in the pool */
  106. };
  107. /*
  108. * Each process has a buffer pool
  109. */
  110. struct clc_pool {
  111. Slapi_RWLock *pl_lock; /* cl writer and agreements */
  112. DB_ENV **pl_dbenv; /* pointer to DB_ENV for all the changelog files */
  113. CLC_Busy_List *pl_busy_lists; /* busy buffer lists, one list per changelog file */
  114. int pl_buffer_cnt_now; /* total number of buffers */
  115. int pl_buffer_cnt_min; /* free a newly returned buffer if _now > _min */
  116. int pl_buffer_cnt_max; /* no use */
  117. int pl_buffer_default_pages; /* num of pages in a new buffer */
  118. };
  119. /* static variables */
  120. static struct clc_pool *_pool = NULL; /* process's buffer pool */
  121. /* static prototypes */
  122. static int clcache_initial_anchorcsn ( CLC_Buffer *buf, int *flag );
  123. static int clcache_adjust_anchorcsn ( CLC_Buffer *buf, int *flag );
  124. static void clcache_refresh_consumer_maxcsns ( CLC_Buffer *buf );
  125. static int clcache_refresh_local_maxcsns ( CLC_Buffer *buf );
  126. static int clcache_skip_change ( CLC_Buffer *buf );
  127. static int clcache_load_buffer_bulk ( CLC_Buffer *buf, int flag );
  128. static int clcache_open_cursor ( DB_TXN *txn, CLC_Buffer *buf, DBC **cursor );
  129. static int clcache_cursor_get ( DBC *cursor, CLC_Buffer *buf, int flag );
  130. static struct csn_seq_ctrl_block *clcache_new_cscb(void);
  131. static void clcache_free_cscb ( struct csn_seq_ctrl_block ** cscb );
  132. static CLC_Buffer *clcache_new_buffer ( ReplicaId consumer_rid );
  133. static void clcache_delete_buffer ( CLC_Buffer **buf );
  134. static CLC_Busy_List *clcache_new_busy_list(void);
  135. static void clcache_delete_busy_list ( CLC_Busy_List **bl );
  136. static int clcache_enqueue_busy_list( DB *db, CLC_Buffer *buf );
  137. static void csn_dup_or_init_by_csn ( CSN **csn1, CSN *csn2 );
  138. /*
  139. * Initiates the process buffer pool. This should be done
  140. * once and only once when process starts.
  141. */
  142. int
  143. clcache_init ( DB_ENV **dbenv )
  144. {
  145. if (_pool) {
  146. return 0; /* already initialized */
  147. }
  148. if (NULL == dbenv) {
  149. return -1;
  150. }
  151. _pool = (struct clc_pool*) slapi_ch_calloc ( 1, sizeof ( struct clc_pool ));
  152. _pool->pl_dbenv = dbenv;
  153. _pool->pl_buffer_cnt_min = DEFAULT_CLC_BUFFER_COUNT_MIN;
  154. _pool->pl_buffer_cnt_max = DEFAULT_CLC_BUFFER_COUNT_MAX;
  155. _pool->pl_buffer_default_pages = DEFAULT_CLC_BUFFER_COUNT_MAX;
  156. _pool->pl_lock = slapi_new_rwlock ();
  157. return 0;
  158. }
  159. /*
  160. * This is part of a callback function when changelog configuration
  161. * is read or updated.
  162. */
  163. void
  164. clcache_set_config ()
  165. {
  166. slapi_rwlock_wrlock ( _pool->pl_lock );
  167. _pool->pl_buffer_cnt_max = CL5_DEFAULT_CONFIG_CACHESIZE;
  168. /*
  169. * According to http://www.sleepycat.com/docs/api_c/dbc_get.html,
  170. * data buffer should be a multiple of 1024 bytes in size
  171. * for DB_MULTIPLE_KEY operation.
  172. */
  173. _pool->pl_buffer_default_pages = CL5_DEFAULT_CONFIG_CACHEMEMSIZE / DEFAULT_CLC_BUFFER_PAGE_SIZE + 1;
  174. if ( _pool->pl_buffer_default_pages <= 0 ) { /* this never be true... */
  175. _pool->pl_buffer_default_pages = DEFAULT_CLC_BUFFER_PAGE_COUNT;
  176. }
  177. slapi_rwlock_unlock ( _pool->pl_lock );
  178. }
  179. /*
  180. * Gets the pointer to a thread dedicated buffer, or allocates
  181. * a new buffer if there is no buffer allocated yet for this thread.
  182. *
  183. * This is called when a cl5replayiterator is created for
  184. * a replication session.
  185. */
  186. int
  187. clcache_get_buffer ( CLC_Buffer **buf, DB *db, ReplicaId consumer_rid, const RUV *consumer_ruv, const RUV *local_ruv )
  188. {
  189. int rc = 0;
  190. int need_new;
  191. if ( buf == NULL ) return CL5_BAD_DATA;
  192. *buf = NULL;
  193. /* if the pool was re-initialized, the thread private cache will be invalid,
  194. so we must get a new one */
  195. need_new = (!_pool || !_pool->pl_busy_lists || !_pool->pl_busy_lists->bl_buffers);
  196. if ( (!need_new) && (NULL != ( *buf = (CLC_Buffer*) get_thread_private_cache())) ) {
  197. slapi_log_err(SLAPI_LOG_REPL, get_thread_private_agmtname(),
  198. "clcache_get_buffer - found thread private buffer cache %p\n", *buf);
  199. slapi_log_err(SLAPI_LOG_REPL, get_thread_private_agmtname(),
  200. "clcache_get_buffer - _pool is %p _pool->pl_busy_lists is %p _pool->pl_busy_lists->bl_buffers is %p\n",
  201. _pool, _pool ? _pool->pl_busy_lists : NULL,
  202. (_pool && _pool->pl_busy_lists) ? _pool->pl_busy_lists->bl_buffers : NULL);
  203. (*buf)->buf_state = CLC_STATE_READY;
  204. (*buf)->buf_load_cnt = 0;
  205. (*buf)->buf_record_cnt = 0;
  206. (*buf)->buf_record_skipped = 0;
  207. (*buf)->buf_cursor = NULL;
  208. (*buf)->buf_skipped_new_rid = 0;
  209. (*buf)->buf_skipped_csn_gt_cons_maxcsn = 0;
  210. (*buf)->buf_skipped_up_to_date = 0;
  211. (*buf)->buf_skipped_csn_gt_ruv = 0;
  212. (*buf)->buf_skipped_csn_covered = 0;
  213. (*buf)->buf_cscbs = (struct csn_seq_ctrl_block **) slapi_ch_calloc(MAX_NUM_OF_MASTERS + 1,
  214. sizeof(struct csn_seq_ctrl_block *));
  215. (*buf)->buf_num_cscbs = 0;
  216. (*buf)->buf_max_cscbs = MAX_NUM_OF_MASTERS;
  217. }
  218. else {
  219. *buf = clcache_new_buffer ( consumer_rid );
  220. if ( *buf ) {
  221. if ( 0 == clcache_enqueue_busy_list ( db, *buf ) ) {
  222. set_thread_private_cache ( (void*) (*buf) );
  223. }
  224. else {
  225. clcache_delete_buffer ( buf );
  226. }
  227. }
  228. }
  229. if ( NULL != *buf ) {
  230. CSN *c_csn = NULL;
  231. CSN *l_csn = NULL;
  232. (*buf)->buf_consumer_ruv = consumer_ruv;
  233. (*buf)->buf_local_ruv = local_ruv;
  234. (*buf)->buf_load_flag = DB_MULTIPLE_KEY;
  235. ruv_get_largest_csn_for_replica (consumer_ruv, consumer_rid, &c_csn);
  236. ruv_get_largest_csn_for_replica (local_ruv, consumer_rid, &l_csn);
  237. if (l_csn && csn_compare(l_csn, c_csn) > 0) {
  238. /* the supplier has updates for the consumer RID and
  239. * these updates are newer than on the consumer
  240. */
  241. (*buf)->buf_ignoreConsumerRID = 0;
  242. } else {
  243. (*buf)->buf_ignoreConsumerRID = 1;
  244. }
  245. csn_free(&c_csn);
  246. csn_free(&l_csn);
  247. }
  248. else {
  249. slapi_log_err(SLAPI_LOG_ERR, get_thread_private_agmtname(),
  250. "clcache_get_buffer - Can't allocate new buffer\n" );
  251. rc = CL5_MEMORY_ERROR;
  252. }
  253. return rc;
  254. }
  255. /*
  256. * Returns a buffer back to the buffer pool.
  257. */
  258. void
  259. clcache_return_buffer ( CLC_Buffer **buf )
  260. {
  261. int i;
  262. slapi_log_err(SLAPI_LOG_REPL, (*buf)->buf_agmt_name,
  263. "clcache_return_buffer - session end: state=%d load=%d sent=%d skipped=%d skipped_new_rid=%d "
  264. "skipped_csn_gt_cons_maxcsn=%d skipped_up_to_date=%d "
  265. "skipped_csn_gt_ruv=%d skipped_csn_covered=%d\n",
  266. (*buf)->buf_state,
  267. (*buf)->buf_load_cnt,
  268. (*buf)->buf_record_cnt - (*buf)->buf_record_skipped,
  269. (*buf)->buf_record_skipped, (*buf)->buf_skipped_new_rid,
  270. (*buf)->buf_skipped_csn_gt_cons_maxcsn,
  271. (*buf)->buf_skipped_up_to_date, (*buf)->buf_skipped_csn_gt_ruv,
  272. (*buf)->buf_skipped_csn_covered);
  273. for ( i = 0; i < (*buf)->buf_num_cscbs; i++ ) {
  274. clcache_free_cscb ( &(*buf)->buf_cscbs[i] );
  275. }
  276. slapi_ch_free((void **)&(*buf)->buf_cscbs);
  277. if ( (*buf)->buf_cursor ) {
  278. (*buf)->buf_cursor->c_close ( (*buf)->buf_cursor );
  279. (*buf)->buf_cursor = NULL;
  280. }
  281. }
  282. /*
  283. * Loads a buffer from DB.
  284. *
  285. * anchorcsn - passed in for the first load of a replication session;
  286. * flag - DB_SET to load in the key CSN record.
  287. * DB_NEXT to load in the records greater than key CSN.
  288. * return - DB error code instead of cl5 one because of the
  289. * historic reason.
  290. */
  291. int
  292. clcache_load_buffer ( CLC_Buffer *buf, CSN **anchorCSN, int *continue_on_miss )
  293. {
  294. int rc = 0;
  295. int flag = DB_NEXT;
  296. if (anchorCSN) *anchorCSN = NULL;
  297. clcache_refresh_local_maxcsns ( buf );
  298. if (buf->buf_load_cnt == 0 ) {
  299. clcache_refresh_consumer_maxcsns ( buf );
  300. rc = clcache_initial_anchorcsn ( buf, &flag );
  301. } else {
  302. rc = clcache_adjust_anchorcsn ( buf, &flag );
  303. }
  304. if ( rc == 0 ) {
  305. buf->buf_state = CLC_STATE_READY;
  306. if (anchorCSN) *anchorCSN = buf->buf_current_csn;
  307. rc = clcache_load_buffer_bulk ( buf, flag );
  308. if (rc == DB_NOTFOUND && continue_on_miss && *continue_on_miss) {
  309. /* make replication going using next best startcsn */
  310. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  311. "clcache_load_buffer - Can't load changelog buffer starting at CSN %s with flag(%s). "
  312. "Trying to use an alterantive start CSN.\n",
  313. (char*)buf->buf_key.data,
  314. flag==DB_NEXT?"DB_NEXT":"DB_SET" );
  315. rc = clcache_load_buffer_bulk ( buf, DB_SET_RANGE );
  316. if (rc == 0) {
  317. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  318. "clcache_load_buffer - Using alternative start iteration csn: %s \n",
  319. (char*)buf->buf_key.data);
  320. }
  321. /* the use of alternative start csns can be limited, record its usage */
  322. (*continue_on_miss)--;
  323. }
  324. /* Reset some flag variables */
  325. if ( rc == 0 ) {
  326. int i;
  327. for ( i = 0; i < buf->buf_num_cscbs; i++ ) {
  328. buf->buf_cscbs[i]->state = CLC_STATE_READY;
  329. }
  330. }
  331. else {
  332. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  333. "clcache_load_buffer - Can't locate CSN %s in the changelog (DB rc=%d). "
  334. "If replication stops, the consumer may need to be reinitialized.\n",
  335. (char*)buf->buf_key.data, rc );
  336. }
  337. } else if (rc == CLC_STATE_DONE) {
  338. rc = DB_NOTFOUND;
  339. }
  340. if ( rc != 0 ) {
  341. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  342. "clcache_load_buffer - rc=%d\n", rc );
  343. }
  344. return rc;
  345. }
  346. static int
  347. clcache_load_buffer_bulk ( CLC_Buffer *buf, int flag )
  348. {
  349. DB_TXN *txn = NULL;
  350. DBC *cursor = NULL;
  351. int rc = 0;
  352. int tries = 0;
  353. int use_flag = flag;
  354. #if 0 /* txn control seems not improving anything so turn it off */
  355. if ( *(_pool->pl_dbenv) ) {
  356. txn_begin( *(_pool->pl_dbenv), NULL, &txn, 0 );
  357. }
  358. #endif
  359. if (NULL == buf) {
  360. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  361. "clcache_load_buffer_bulk - NULL buf\n" );
  362. return rc;
  363. }
  364. if (NULL == buf->buf_busy_list) {
  365. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name, "clcache_load_buffer_bulk - "
  366. "%s%sno buf_busy_list\n",
  367. buf->buf_agmt_name?buf->buf_agmt_name:"",
  368. buf->buf_agmt_name?": ":"" );
  369. return rc;
  370. }
  371. PR_Lock ( buf->buf_busy_list->bl_lock );
  372. retry:
  373. if ( 0 == ( rc = clcache_open_cursor ( txn, buf, &cursor )) ) {
  374. if ( use_flag == DB_NEXT ) {
  375. /* For bulk read, position the cursor before read the next block */
  376. rc = cursor->c_get ( cursor,
  377. & buf->buf_key,
  378. & buf->buf_data,
  379. DB_SET );
  380. }
  381. /*
  382. * Continue if the error is no-mem since we don't need to
  383. * load in the key record anyway with DB_SET.
  384. */
  385. if ( 0 == rc || DB_BUFFER_SMALL == rc ) {
  386. rc = clcache_cursor_get ( cursor, buf, use_flag );
  387. }
  388. }
  389. /*
  390. * Don't keep a cursor open across the whole replication session.
  391. * That had caused noticeable DB resource contention.
  392. */
  393. if ( cursor ) {
  394. cursor->c_close ( cursor );
  395. cursor = NULL;
  396. }
  397. if ((rc == DB_LOCK_DEADLOCK) && (tries < MAX_TRIALS)) {
  398. PRIntervalTime interval;
  399. tries++;
  400. slapi_log_err(SLAPI_LOG_TRACE, buf->buf_agmt_name, "clcache_load_buffer_bulk - "
  401. "deadlock number [%d] - retrying\n", tries );
  402. /* back off */
  403. interval = PR_MillisecondsToInterval(slapi_rand() % 100);
  404. DS_Sleep(interval);
  405. use_flag = flag;
  406. goto retry;
  407. }
  408. if ((rc == DB_LOCK_DEADLOCK) && (tries >= MAX_TRIALS)) {
  409. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name, "clcache_load_buffer_bulk - "
  410. "could not load buffer from changelog after %d tries\n", tries );
  411. }
  412. #if 0 /* txn control seems not improving anything so turn it off */
  413. if ( txn ) {
  414. txn->commit ( txn, DB_TXN_NOSYNC );
  415. }
  416. #endif
  417. PR_Unlock ( buf->buf_busy_list->bl_lock );
  418. buf->buf_record_ptr = NULL;
  419. if ( 0 == rc ) {
  420. DB_MULTIPLE_INIT ( buf->buf_record_ptr, &buf->buf_data );
  421. if ( NULL == buf->buf_record_ptr )
  422. rc = DB_NOTFOUND;
  423. else
  424. buf->buf_load_cnt++;
  425. }
  426. return rc;
  427. }
  428. /*
  429. * Gets the next change from the buffer.
  430. * *key : output - key of the next change, or NULL if no more change
  431. * *data: output - data of the next change, or NULL if no more change
  432. */
  433. int
  434. clcache_get_next_change ( CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn )
  435. {
  436. int skip = 1;
  437. int rc = 0;
  438. do {
  439. *key = *data = NULL;
  440. *keylen = *datalen = 0;
  441. if ( buf->buf_record_ptr ) {
  442. DB_MULTIPLE_KEY_NEXT ( buf->buf_record_ptr, &buf->buf_data,
  443. *key, *keylen, *data, *datalen );
  444. }
  445. /*
  446. * We're done with the current buffer. Now load the next chunk.
  447. */
  448. if ( NULL == *key && CLC_STATE_READY == buf->buf_state ) {
  449. rc = clcache_load_buffer ( buf, NULL, NULL );
  450. if ( 0 == rc && buf->buf_record_ptr ) {
  451. DB_MULTIPLE_KEY_NEXT ( buf->buf_record_ptr, &buf->buf_data,
  452. *key, *keylen, *data, *datalen );
  453. }
  454. }
  455. /* Compare the new change to the local and remote RUVs */
  456. if ( NULL != *key ) {
  457. buf->buf_record_cnt++;
  458. csn_init_by_string ( buf->buf_current_csn, (char*)*key );
  459. skip = clcache_skip_change ( buf );
  460. if (skip) buf->buf_record_skipped++;
  461. }
  462. }
  463. while ( rc == 0 && *key && skip );
  464. if ( NULL == *key ) {
  465. *key = NULL;
  466. *csn = NULL;
  467. rc = DB_NOTFOUND;
  468. }
  469. else {
  470. *csn = buf->buf_current_csn;
  471. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  472. "clcache_get_next_change - load=%d rec=%d csn=%s\n",
  473. buf->buf_load_cnt, buf->buf_record_cnt, (char*)*key );
  474. }
  475. return rc;
  476. }
  477. static void
  478. clcache_refresh_consumer_maxcsns ( CLC_Buffer *buf )
  479. {
  480. int i;
  481. for ( i = 0; i < buf->buf_num_cscbs; i++ ) {
  482. ruv_get_largest_csn_for_replica (
  483. buf->buf_consumer_ruv,
  484. buf->buf_cscbs[i]->rid,
  485. &buf->buf_cscbs[i]->consumer_maxcsn );
  486. }
  487. }
  488. static int
  489. clcache_refresh_local_maxcsn ( const ruv_enum_data *rid_data, void *data )
  490. {
  491. struct clc_buffer *buf = (struct clc_buffer*) data;
  492. ReplicaId rid;
  493. int rc = 0;
  494. int i;
  495. rid = csn_get_replicaid ( rid_data->csn );
  496. /* we do not handle updates originated at the consumer if not required
  497. * and we ignore RID which have been cleaned
  498. */
  499. if ( (rid == buf->buf_consumer_rid && buf->buf_ignoreConsumerRID) ||
  500. is_cleaned_rid(rid) )
  501. return rc;
  502. for ( i = 0; i < buf->buf_num_cscbs; i++ ) {
  503. if ( buf->buf_cscbs[i]->rid == rid )
  504. break;
  505. }
  506. if ( i >= buf->buf_num_cscbs ) {
  507. if( i + 1 > buf->buf_max_cscbs){
  508. buf->buf_cscbs = (struct csn_seq_ctrl_block **) slapi_ch_realloc((char *)buf->buf_cscbs,
  509. (i + 2) * sizeof(struct csn_seq_ctrl_block *));
  510. buf->buf_max_cscbs = i + 1;
  511. }
  512. buf->buf_cscbs[i] = clcache_new_cscb();
  513. if ( buf->buf_cscbs[i] == NULL ) {
  514. return -1;
  515. }
  516. buf->buf_cscbs[i]->rid = rid;
  517. buf->buf_num_cscbs++;
  518. /* this is the first time we have a local change for the RID
  519. * we need to check what the consumer knows about it.
  520. */
  521. ruv_get_largest_csn_for_replica (
  522. buf->buf_consumer_ruv,
  523. buf->buf_cscbs[i]->rid,
  524. &buf->buf_cscbs[i]->consumer_maxcsn );
  525. }
  526. if (buf->buf_cscbs[i]->local_maxcsn)
  527. csn_dup_or_init_by_csn ( &buf->buf_cscbs[i]->prev_local_maxcsn, buf->buf_cscbs[i]->local_maxcsn );
  528. csn_dup_or_init_by_csn ( &buf->buf_cscbs[i]->local_maxcsn, rid_data->csn );
  529. csn_dup_or_init_by_csn ( &buf->buf_cscbs[i]->local_mincsn, rid_data->min_csn );
  530. if ( buf->buf_cscbs[i]->consumer_maxcsn &&
  531. csn_compare (buf->buf_cscbs[i]->consumer_maxcsn, rid_data->csn) >= 0 ) {
  532. /* No change need to be sent for this RID */
  533. buf->buf_cscbs[i]->state = CLC_STATE_UP_TO_DATE;
  534. }
  535. return rc;
  536. }
  537. static int
  538. clcache_refresh_local_maxcsns ( CLC_Buffer *buf )
  539. {
  540. return ruv_enumerate_elements ( buf->buf_local_ruv, clcache_refresh_local_maxcsn, buf );
  541. }
  542. /*
  543. * Algorithm:
  544. *
  545. * 1. Determine anchorcsn for each RID:
  546. * 2. Determine anchorcsn for next load:
  547. * Anchor-CSN = min { all Next-Anchor-CSN, Buffer-MaxCSN }
  548. */
  549. static int
  550. clcache_initial_anchorcsn ( CLC_Buffer *buf, int *flag )
  551. {
  552. PRBool hasChange = PR_FALSE;
  553. struct csn_seq_ctrl_block *cscb;
  554. int i;
  555. CSN *anchorcsn = NULL;
  556. if ( buf->buf_state == CLC_STATE_READY ) {
  557. for ( i = 0; i < buf->buf_num_cscbs; i++ ) {
  558. CSN *rid_anchor = NULL;
  559. int rid_flag = DB_NEXT;
  560. cscb = buf->buf_cscbs[i];
  561. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  562. char prevmax[CSN_STRSIZE];
  563. char local[CSN_STRSIZE];
  564. char curr[CSN_STRSIZE];
  565. char conmaxcsn[CSN_STRSIZE];
  566. csn_as_string(cscb->prev_local_maxcsn, 0, prevmax);
  567. csn_as_string(cscb->local_maxcsn, 0, local);
  568. csn_as_string(buf->buf_current_csn, 0, curr);
  569. csn_as_string(cscb->consumer_maxcsn, 0, conmaxcsn);
  570. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  571. "clcache_initial_anchorcsn - "
  572. "%s - (cscb %d - state %d) - csnPrevMax (%s) "
  573. "csnMax (%s) csnBuf (%s) csnConsumerMax (%s)\n",
  574. buf->buf_agmt_name, i, cscb->state, prevmax, local,
  575. curr, conmaxcsn);
  576. }
  577. if (cscb->consumer_maxcsn == NULL) {
  578. /* the consumer hasn't seen changes for this RID */
  579. rid_anchor = cscb->local_mincsn;
  580. rid_flag = DB_SET;
  581. } else if ( csn_compare (cscb->local_maxcsn, cscb->consumer_maxcsn) > 0 ) {
  582. rid_anchor = cscb->consumer_maxcsn;
  583. }
  584. if (rid_anchor && (anchorcsn == NULL ||
  585. ( csn_compare(rid_anchor, anchorcsn) < 0))) {
  586. anchorcsn = rid_anchor;
  587. *flag = rid_flag;
  588. hasChange = PR_TRUE;
  589. }
  590. }
  591. }
  592. if ( !hasChange ) {
  593. buf->buf_state = CLC_STATE_DONE;
  594. } else {
  595. csn_init_by_csn(buf->buf_current_csn, anchorcsn);
  596. csn_as_string(buf->buf_current_csn, 0, (char *)buf->buf_key.data);
  597. slapi_log_err(SLAPI_LOG_REPL, "clcache_initial_anchorcsn",
  598. "anchor is now: %s\n", (char *)buf->buf_key.data);
  599. }
  600. return buf->buf_state;
  601. }
  602. static int
  603. clcache_adjust_anchorcsn ( CLC_Buffer *buf, int *flag )
  604. {
  605. PRBool hasChange = PR_FALSE;
  606. struct csn_seq_ctrl_block *cscb;
  607. int i;
  608. CSN *anchorcsn = NULL;
  609. if ( buf->buf_state == CLC_STATE_READY ) {
  610. for ( i = 0; i < buf->buf_num_cscbs; i++ ) {
  611. CSN *rid_anchor = NULL;
  612. int rid_flag = DB_NEXT;
  613. cscb = buf->buf_cscbs[i];
  614. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  615. char prevmax[CSN_STRSIZE];
  616. char local[CSN_STRSIZE];
  617. char curr[CSN_STRSIZE];
  618. char conmaxcsn[CSN_STRSIZE];
  619. csn_as_string(cscb->prev_local_maxcsn, 0, prevmax);
  620. csn_as_string(cscb->local_maxcsn, 0, local);
  621. csn_as_string(buf->buf_current_csn, 0, curr);
  622. csn_as_string(cscb->consumer_maxcsn, 0, conmaxcsn);
  623. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name, "clcache_adjust_anchorcsn - "
  624. "%s - (cscb %d - state %d) - csnPrevMax (%s) "
  625. "csnMax (%s) csnBuf (%s) csnConsumerMax (%s)\n",
  626. buf->buf_agmt_name, i, cscb->state, prevmax, local,
  627. curr, conmaxcsn);
  628. }
  629. if (csn_compare(cscb->local_maxcsn, cscb->consumer_maxcsn) > 0) {
  630. /* We have something to send for this RID */
  631. if (csn_compare(cscb->local_maxcsn, cscb->prev_local_maxcsn) == 0 ||
  632. csn_compare(cscb->prev_local_maxcsn, buf->buf_current_csn) > 0) {
  633. /* No new changes or it remains, in the buffer, updates to send */
  634. rid_anchor = buf->buf_current_csn;
  635. } else {
  636. /* prev local max csn < csnBuffer AND different from local maxcsn */
  637. if (cscb->consumer_maxcsn == NULL) {
  638. /* the consumer hasn't seen changes for this RID */
  639. rid_anchor = cscb->local_mincsn;
  640. rid_flag = DB_SET;
  641. } else {
  642. rid_anchor = cscb->consumer_maxcsn;
  643. }
  644. }
  645. }
  646. if (rid_anchor && (anchorcsn == NULL ||
  647. ( csn_compare(rid_anchor, anchorcsn) < 0))) {
  648. anchorcsn = rid_anchor;
  649. *flag = rid_flag;
  650. hasChange = PR_TRUE;
  651. }
  652. }
  653. }
  654. if ( !hasChange ) {
  655. buf->buf_state = CLC_STATE_DONE;
  656. } else {
  657. csn_init_by_csn(buf->buf_current_csn, anchorcsn);
  658. csn_as_string(buf->buf_current_csn, 0, (char *)buf->buf_key.data);
  659. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name, "clcache_adjust_anchorcsn - "
  660. "anchor is now: %s\n", (char *)buf->buf_key.data);
  661. }
  662. return buf->buf_state;
  663. }
  664. static int
  665. clcache_skip_change ( CLC_Buffer *buf )
  666. {
  667. struct csn_seq_ctrl_block *cscb = NULL;
  668. ReplicaId rid;
  669. int skip = 1;
  670. int i;
  671. char buf_cur_csn_str[CSN_STRSIZE];
  672. do {
  673. rid = csn_get_replicaid ( buf->buf_current_csn );
  674. /*
  675. * Skip CSN that is originated from the consumer,
  676. * unless the CSN is newer than the maxcsn.
  677. * If RID==65535, the CSN is originated from a
  678. * legacy consumer. In this case the supplier
  679. * and the consumer may have the same RID.
  680. */
  681. if (rid == buf->buf_consumer_rid && buf->buf_ignoreConsumerRID){
  682. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  683. csn_as_string(buf->buf_current_csn, 0, buf_cur_csn_str);
  684. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  685. "clcache_skip_change - Skipping update because the consumer with Rid: [%d] is ignored\n", rid);
  686. buf->buf_skipped_csn_gt_cons_maxcsn++;
  687. }
  688. break;
  689. }
  690. /* Skip helper entry (ENTRY_COUNT, PURGE_RUV and so on) */
  691. if ( cl5HelperEntry ( NULL, buf->buf_current_csn ) == PR_TRUE ) {
  692. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  693. "clcache_skip_change - Skip helper entry type=%ld\n", csn_get_time( buf->buf_current_csn ));
  694. break;
  695. }
  696. /* Find csn sequence control block for the current rid */
  697. for (i = 0; i < buf->buf_num_cscbs && buf->buf_cscbs[i]->rid != rid; i++);
  698. /* Skip CSN whose RID is unknown to the local RUV snapshot */
  699. if ( i >= buf->buf_num_cscbs ) {
  700. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  701. csn_as_string(buf->buf_current_csn, 0, buf_cur_csn_str);
  702. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  703. "clcache_skip_change - Skipping update because the changelog buffer current csn [%s] rid "
  704. "[%d] is not in the list of changelog csn buffers (length %d)\n",
  705. buf_cur_csn_str, rid, buf->buf_num_cscbs);
  706. }
  707. buf->buf_skipped_new_rid++;
  708. break;
  709. }
  710. cscb = buf->buf_cscbs[i];
  711. /* Skip if the consumer is already up-to-date for the RID */
  712. if ( cscb->state == CLC_STATE_UP_TO_DATE ) {
  713. buf->buf_skipped_up_to_date++;
  714. break;
  715. }
  716. /* Skip CSN whose preceedents are not covered by local RUV snapshot */
  717. if ( cscb->state == CLC_STATE_CSN_GT_RUV ) {
  718. buf->buf_skipped_csn_gt_ruv++;
  719. break;
  720. }
  721. /* Skip CSNs already covered by consumer RUV */
  722. if ( cscb->consumer_maxcsn &&
  723. csn_compare ( buf->buf_current_csn, cscb->consumer_maxcsn ) <= 0 ) {
  724. buf->buf_skipped_csn_covered++;
  725. break;
  726. }
  727. /* Send CSNs that are covered by the local RUV snapshot */
  728. if ( csn_compare ( buf->buf_current_csn, cscb->local_maxcsn ) <= 0 ) {
  729. skip = 0;
  730. csn_dup_or_init_by_csn ( &cscb->consumer_maxcsn, buf->buf_current_csn );
  731. break;
  732. }
  733. /*
  734. * Promote the local maxcsn to its next neighbor
  735. * to keep the current session going. Skip if we
  736. * are not sure if current_csn is the neighbor.
  737. */
  738. if ( csn_time_difference(buf->buf_current_csn, cscb->local_maxcsn) == 0 &&
  739. (csn_get_seqnum(buf->buf_current_csn) ==
  740. csn_get_seqnum(cscb->local_maxcsn) + 1) )
  741. {
  742. csn_init_by_csn ( cscb->local_maxcsn, buf->buf_current_csn );
  743. if(cscb->consumer_maxcsn){
  744. csn_init_by_csn ( cscb->consumer_maxcsn, buf->buf_current_csn );
  745. }
  746. skip = 0;
  747. break;
  748. }
  749. /* Skip CSNs not covered by local RUV snapshot */
  750. cscb->state = CLC_STATE_CSN_GT_RUV;
  751. buf->buf_skipped_csn_gt_ruv++;
  752. } while (0);
  753. #ifdef DEBUG
  754. if (skip && cscb) {
  755. char consumer[24] = {'\0'};
  756. char local[24] = {'\0'};
  757. char current[24] = {'\0'};
  758. if ( cscb->consumer_maxcsn )
  759. csn_as_string ( cscb->consumer_maxcsn, PR_FALSE, consumer );
  760. if ( cscb->local_maxcsn )
  761. csn_as_string ( cscb->local_maxcsn, PR_FALSE, local );
  762. csn_as_string ( buf->buf_current_csn, PR_FALSE, current );
  763. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  764. "clcache_skip_change - Skip %s consumer=%s local=%s\n", current, consumer, local );
  765. }
  766. #endif
  767. return skip;
  768. }
  769. static struct csn_seq_ctrl_block *
  770. clcache_new_cscb(void)
  771. {
  772. struct csn_seq_ctrl_block *cscb;
  773. cscb = (struct csn_seq_ctrl_block *) slapi_ch_calloc ( 1, sizeof (struct csn_seq_ctrl_block) );
  774. if (cscb == NULL) {
  775. slapi_log_err(SLAPI_LOG_ERR, NULL, "clcache: malloc failure\n" );
  776. }
  777. return cscb;
  778. }
  779. static void
  780. clcache_free_cscb ( struct csn_seq_ctrl_block ** cscb )
  781. {
  782. csn_free ( & (*cscb)->consumer_maxcsn );
  783. csn_free ( & (*cscb)->local_maxcsn );
  784. csn_free ( & (*cscb)->prev_local_maxcsn );
  785. csn_free ( & (*cscb)->local_mincsn );
  786. slapi_ch_free ( (void **) cscb );
  787. }
  788. /*
  789. * Allocate and initialize a new buffer
  790. * It is called when there is a request for a buffer while
  791. * buffer free list is empty.
  792. */
  793. static CLC_Buffer *
  794. clcache_new_buffer ( ReplicaId consumer_rid )
  795. {
  796. CLC_Buffer *buf = NULL;
  797. int welldone = 0;
  798. do {
  799. buf = (CLC_Buffer*) slapi_ch_calloc (1, sizeof(CLC_Buffer));
  800. if ( NULL == buf )
  801. break;
  802. buf->buf_key.flags = DB_DBT_USERMEM;
  803. buf->buf_key.ulen = CSN_STRSIZE + 1;
  804. buf->buf_key.size = CSN_STRSIZE;
  805. buf->buf_key.data = slapi_ch_calloc( 1, buf->buf_key.ulen );
  806. if ( NULL == buf->buf_key.data )
  807. break;
  808. buf->buf_data.flags = DB_DBT_USERMEM;
  809. buf->buf_data.ulen = _pool->pl_buffer_default_pages * DEFAULT_CLC_BUFFER_PAGE_SIZE;
  810. buf->buf_data.data = slapi_ch_malloc( buf->buf_data.ulen );
  811. if ( NULL == buf->buf_data.data )
  812. break;
  813. if ( NULL == ( buf->buf_current_csn = csn_new()) )
  814. break;
  815. buf->buf_state = CLC_STATE_READY;
  816. buf->buf_agmt_name = get_thread_private_agmtname();
  817. buf->buf_consumer_rid = consumer_rid;
  818. buf->buf_num_cscbs = 0;
  819. buf->buf_max_cscbs = MAX_NUM_OF_MASTERS;
  820. buf->buf_cscbs = (struct csn_seq_ctrl_block **) slapi_ch_calloc(MAX_NUM_OF_MASTERS + 1,
  821. sizeof(struct csn_seq_ctrl_block *));
  822. welldone = 1;
  823. } while (0);
  824. if ( !welldone ) {
  825. clcache_delete_buffer ( &buf );
  826. }
  827. return buf;
  828. }
  829. /*
  830. * Deallocates a buffer.
  831. * It is called when a buffer is returned to the buffer pool
  832. * and the pool size is over the limit.
  833. */
  834. static void
  835. clcache_delete_buffer ( CLC_Buffer **buf )
  836. {
  837. if ( buf && *buf ) {
  838. slapi_ch_free (&( (*buf)->buf_key.data ));
  839. slapi_ch_free (&( (*buf)->buf_data.data ));
  840. csn_free (&( (*buf)->buf_current_csn ));
  841. csn_free (&( (*buf)->buf_missing_csn ));
  842. csn_free (&( (*buf)->buf_prev_missing_csn ));
  843. slapi_ch_free ( (void **) buf );
  844. }
  845. }
  846. static CLC_Busy_List *
  847. clcache_new_busy_list(void)
  848. {
  849. CLC_Busy_List *bl;
  850. int welldone = 0;
  851. do {
  852. if ( NULL == (bl = ( CLC_Busy_List* ) slapi_ch_calloc (1, sizeof(CLC_Busy_List)) ))
  853. break;
  854. if ( NULL == (bl->bl_lock = PR_NewLock ()) )
  855. break;
  856. /*
  857. if ( NULL == (bl->bl_max_csn = csn_new ()) )
  858. break;
  859. */
  860. welldone = 1;
  861. }
  862. while (0);
  863. if ( !welldone ) {
  864. clcache_delete_busy_list ( &bl );
  865. }
  866. return bl;
  867. }
  868. static void
  869. clcache_delete_busy_list ( CLC_Busy_List **bl )
  870. {
  871. if ( bl && *bl ) {
  872. CLC_Buffer *buf = NULL;
  873. if ( (*bl)->bl_lock ) {
  874. PR_Lock ( (*bl)->bl_lock );
  875. }
  876. buf = (*bl)->bl_buffers;
  877. while (buf) {
  878. CLC_Buffer *next = buf->buf_next;
  879. clcache_delete_buffer(&buf);
  880. buf = next;
  881. }
  882. (*bl)->bl_buffers = NULL;
  883. (*bl)->bl_db = NULL;
  884. if ( (*bl)->bl_lock ) {
  885. PR_Unlock ( (*bl)->bl_lock );
  886. PR_DestroyLock ( (*bl)->bl_lock );
  887. (*bl)->bl_lock = NULL;
  888. }
  889. /* csn_free (&( (*bl)->bl_max_csn )); */
  890. slapi_ch_free ( (void **) bl );
  891. }
  892. }
  893. static int
  894. clcache_enqueue_busy_list ( DB *db, CLC_Buffer *buf )
  895. {
  896. CLC_Busy_List *bl;
  897. int rc = 0;
  898. slapi_rwlock_rdlock ( _pool->pl_lock );
  899. for ( bl = _pool->pl_busy_lists; bl && bl->bl_db != db; bl = bl->bl_next );
  900. slapi_rwlock_unlock ( _pool->pl_lock );
  901. if ( NULL == bl ) {
  902. if ( NULL == ( bl = clcache_new_busy_list ()) ) {
  903. rc = CL5_MEMORY_ERROR;
  904. }
  905. else {
  906. slapi_rwlock_wrlock ( _pool->pl_lock );
  907. bl->bl_db = db;
  908. bl->bl_next = _pool->pl_busy_lists;
  909. _pool->pl_busy_lists = bl;
  910. slapi_rwlock_unlock ( _pool->pl_lock );
  911. }
  912. }
  913. if ( NULL != bl ) {
  914. PR_Lock ( bl->bl_lock );
  915. buf->buf_busy_list = bl;
  916. buf->buf_next = bl->bl_buffers;
  917. bl->bl_buffers = buf;
  918. PR_Unlock ( bl->bl_lock );
  919. }
  920. return rc;
  921. }
  922. static int
  923. clcache_open_cursor ( DB_TXN *txn, CLC_Buffer *buf, DBC **cursor )
  924. {
  925. int rc;
  926. rc = buf->buf_busy_list->bl_db->cursor ( buf->buf_busy_list->bl_db, txn, cursor, 0 );
  927. if ( rc != 0 ) {
  928. slapi_log_err(SLAPI_LOG_ERR, get_thread_private_agmtname(),
  929. "clcache: failed to open cursor; db error - %d %s\n",
  930. rc, db_strerror(rc));
  931. }
  932. return rc;
  933. }
  934. static int
  935. clcache_cursor_get ( DBC *cursor, CLC_Buffer *buf, int flag )
  936. {
  937. int rc;
  938. if (buf->buf_data.ulen > WORK_CLC_BUFFER_PAGE_SIZE) {
  939. /*
  940. * The buffer size had to be increased,
  941. * reset it to a smaller working size,
  942. * if not sufficient it will be increased again
  943. */
  944. buf->buf_data.ulen = WORK_CLC_BUFFER_PAGE_SIZE;
  945. }
  946. rc = cursor->c_get ( cursor,
  947. & buf->buf_key,
  948. & buf->buf_data,
  949. buf->buf_load_flag | flag );
  950. if ( DB_BUFFER_SMALL == rc ) {
  951. /*
  952. * The record takes more space than the current size of the
  953. * buffer. Fortunately, buf->buf_data.size has been set by
  954. * c_get() to the actual data size needed. So we can
  955. * reallocate the data buffer and try to read again.
  956. */
  957. buf->buf_data.ulen = ( buf->buf_data.size / DEFAULT_CLC_BUFFER_PAGE_SIZE + 1 ) * DEFAULT_CLC_BUFFER_PAGE_SIZE;
  958. buf->buf_data.data = slapi_ch_realloc ( buf->buf_data.data, buf->buf_data.ulen );
  959. if ( buf->buf_data.data != NULL ) {
  960. rc = cursor->c_get ( cursor,
  961. &( buf->buf_key ),
  962. &( buf->buf_data ),
  963. buf->buf_load_flag | flag );
  964. slapi_log_err(SLAPI_LOG_REPL, buf->buf_agmt_name,
  965. "clcache_cursor_get - clcache: (%d | %d) buf key len %d reallocated and retry returns %d\n", buf->buf_load_flag, flag, buf->buf_key.size, rc );
  966. }
  967. }
  968. switch ( rc ) {
  969. case EINVAL:
  970. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  971. "clcache_cursor_get - invalid parameter\n" );
  972. break;
  973. case DB_BUFFER_SMALL:
  974. slapi_log_err(SLAPI_LOG_ERR, buf->buf_agmt_name,
  975. "clcache_cursor_get - can't allocate %u bytes\n", buf->buf_data.ulen );
  976. break;
  977. default:
  978. break;
  979. }
  980. return rc;
  981. }
  982. static void
  983. csn_dup_or_init_by_csn ( CSN **csn1, CSN *csn2 )
  984. {
  985. if ( *csn1 == NULL )
  986. *csn1 = csn_new();
  987. csn_init_by_csn ( *csn1, csn2 );
  988. }
  989. void
  990. clcache_destroy()
  991. {
  992. if (_pool) {
  993. CLC_Busy_List *bl = NULL;
  994. if (_pool->pl_lock) {
  995. slapi_rwlock_wrlock (_pool->pl_lock);
  996. }
  997. bl = _pool->pl_busy_lists;
  998. while (bl) {
  999. CLC_Busy_List *next = bl->bl_next;
  1000. clcache_delete_busy_list(&bl);
  1001. bl = next;
  1002. }
  1003. _pool->pl_busy_lists = NULL;
  1004. _pool->pl_dbenv = NULL;
  1005. if (_pool->pl_lock) {
  1006. slapi_rwlock_unlock(_pool->pl_lock);
  1007. slapi_destroy_rwlock(_pool->pl_lock);
  1008. _pool->pl_lock = NULL;
  1009. }
  1010. slapi_ch_free ( (void **) &_pool );
  1011. }
  1012. }