1
0

cl5_clcache.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108
  1. /** BEGIN COPYRIGHT BLOCK
  2. * This Program is free software; you can redistribute it and/or modify it under
  3. * the terms of the GNU General Public License as published by the Free Software
  4. * Foundation; version 2 of the License.
  5. *
  6. * This Program is distributed in the hope that it will be useful, but WITHOUT
  7. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  8. * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
  9. *
  10. * You should have received a copy of the GNU General Public License along with
  11. * this Program; if not, write to the Free Software Foundation, Inc., 59 Temple
  12. * Place, Suite 330, Boston, MA 02111-1307 USA.
  13. *
  14. * In addition, as a special exception, Red Hat, Inc. gives You the additional
  15. * right to link the code of this Program with code not covered under the GNU
  16. * General Public License ("Non-GPL Code") and to distribute linked combinations
  17. * including the two, subject to the limitations in this paragraph. Non-GPL Code
  18. * permitted under this exception must only link to the code of this Program
  19. * through those well defined interfaces identified in the file named EXCEPTION
  20. * found in the source code files (the "Approved Interfaces"). The files of
  21. * Non-GPL Code may instantiate templates or use macros or inline functions from
  22. * the Approved Interfaces without causing the resulting work to be covered by
  23. * the GNU General Public License. Only Red Hat, Inc. may make changes or
  24. * additions to the list of Approved Interfaces. You must obey the GNU General
  25. * Public License in all respects for all of the Program code and other code used
  26. * in conjunction with the Program except the Non-GPL Code covered by this
  27. * exception. If you modify this file, you may extend this exception to your
  28. * version of the file, but you are not obligated to do so. If you do not wish to
  29. * provide this exception without modification, you must delete this exception
  30. * statement from your version and license this file solely under the GPL without
  31. * exception.
  32. *
  33. *
  34. * Copyright (C) 2005 Red Hat, Inc.
  35. * All rights reserved.
  36. * END COPYRIGHT BLOCK **/
  37. #ifdef HAVE_CONFIG_H
  38. # include <config.h>
  39. #endif
  40. #include "errno.h" /* ENOMEM, EVAL used by Berkeley DB */
  41. #include "db.h" /* Berkeley DB */
  42. #include "cl5.h" /* changelog5Config */
  43. #include "cl5_clcache.h"
  44. /* newer bdb uses DB_BUFFER_SMALL instead of ENOMEM as the
  45. error return if the given buffer in which to load a
  46. key or value is too small - if it is not defined, define
  47. it here to ENOMEM
  48. */
  49. #ifndef DB_BUFFER_SMALL
  50. #define DB_BUFFER_SMALL ENOMEM
  51. #endif
  52. /*
  53. * Constants for the buffer pool:
  54. *
  55. * DEFAULT_CLC_BUFFER_PAGE_COUNT
  56. * Little performance boost if it is too small.
  57. *
  58. * DEFAULT_CLC_BUFFER_PAGE_SIZE
  59. * Its value is determined based on the DB requirement that
  60. * the buffer size should be the multiple of 1024.
  61. */
  62. #define DEFAULT_CLC_BUFFER_COUNT_MIN 10
  63. #define DEFAULT_CLC_BUFFER_COUNT_MAX 0
  64. #define DEFAULT_CLC_BUFFER_PAGE_COUNT 32
  65. #define DEFAULT_CLC_BUFFER_PAGE_SIZE 1024
  66. enum {
  67. CLC_STATE_READY = 0, /* ready to iterate */
  68. CLC_STATE_UP_TO_DATE, /* remote RUV already covers the CSN */
  69. CLC_STATE_CSN_GT_RUV, /* local RUV doesn't conver the CSN */
  70. CLC_STATE_NEW_RID, /* unknown RID to local RUVs */
  71. CLC_STATE_UNSAFE_RUV_CHANGE,/* (RUV1 < maxcsn-in-buffer) && (RUV1 < RUV1') */
  72. CLC_STATE_DONE, /* no more change */
  73. CLC_STATE_ABORTING /* abort replication session */
  74. };
  75. typedef struct clc_busy_list CLC_Busy_List;
  76. struct csn_seq_ctrl_block {
  77. ReplicaId rid; /* RID this block serves */
  78. CSN *consumer_maxcsn; /* Don't send CSN <= this */
  79. CSN *local_maxcsn; /* Don't send CSN > this */
  80. CSN *prev_local_maxcsn; /* */
  81. int state; /* CLC_STATE_* */
  82. };
  83. /*
  84. * Each cl5replayiterator acquires a buffer from the buffer pool
  85. * at the beginning of a replication session, and returns it back
  86. * at the end.
  87. */
  88. struct clc_buffer {
  89. char *buf_agmt_name; /* agreement acquired this buffer */
  90. ReplicaId buf_consumer_rid; /* help checking threshold csn */
  91. const RUV *buf_consumer_ruv; /* used to skip change */
  92. const RUV *buf_local_ruv; /* used to refresh local_maxcsn */
  93. /*
  94. * fields for retriving data from DB
  95. */
  96. int buf_state;
  97. CSN *buf_current_csn;
  98. int buf_load_flag; /* db flag DB_MULTIPLE_KEY, DB_SET, DB_NEXT */
  99. DBC *buf_cursor;
  100. DBT buf_key; /* current csn string */
  101. DBT buf_data; /* data retrived from db */
  102. void *buf_record_ptr; /* ptr to the current record in data */
  103. CSN *buf_missing_csn; /* used to detect persistent missing of CSN */
  104. CSN *buf_prev_missing_csn; /* used to surpress the repeated messages */
  105. /* fields for control the CSN sequence sent to the consumer */
  106. struct csn_seq_ctrl_block **buf_cscbs;
  107. int buf_num_cscbs; /* number of csn sequence ctrl blocks */
  108. int buf_max_cscbs;
  109. /* fields for debugging stat */
  110. int buf_load_cnt; /* number of loads for session */
  111. int buf_record_cnt; /* number of changes for session */
  112. int buf_record_skipped; /* number of changes skipped */
  113. int buf_skipped_new_rid; /* number of changes skipped due to new_rid */
  114. int buf_skipped_csn_gt_cons_maxcsn; /* number of changes skipped due to csn greater than consumer maxcsn */
  115. int buf_skipped_up_to_date; /* number of changes skipped due to consumer being up-to-date for the given rid */
  116. int buf_skipped_csn_gt_ruv; /* number of changes skipped due to preceedents are not covered by local RUV snapshot */
  117. int buf_skipped_csn_covered; /* number of changes skipped due to CSNs already covered by consumer RUV */
  118. /*
  119. * fields that should be accessed via bl_lock or pl_lock
  120. */
  121. CLC_Buffer *buf_next; /* next buffer in the same list */
  122. CLC_Busy_List *buf_busy_list; /* which busy list I'm in */
  123. };
  124. /*
  125. * Each changelog has a busy buffer list
  126. */
  127. struct clc_busy_list {
  128. PRLock *bl_lock;
  129. DB *bl_db; /* changelog db handle */
  130. CLC_Buffer *bl_buffers; /* busy buffers of this list */
  131. CLC_Busy_List *bl_next; /* next busy list in the pool */
  132. };
  133. /*
  134. * Each process has a buffer pool
  135. */
  136. struct clc_pool {
  137. Slapi_RWLock *pl_lock; /* cl writer and agreements */
  138. DB_ENV **pl_dbenv; /* pointer to DB_ENV for all the changelog files */
  139. CLC_Busy_List *pl_busy_lists; /* busy buffer lists, one list per changelog file */
  140. int pl_buffer_cnt_now; /* total number of buffers */
  141. int pl_buffer_cnt_min; /* free a newly returned buffer if _now > _min */
  142. int pl_buffer_cnt_max; /* no use */
  143. int pl_buffer_default_pages; /* num of pages in a new buffer */
  144. };
  145. /* static variables */
  146. static struct clc_pool *_pool = NULL; /* process's buffer pool */
  147. /* static prototypes */
  148. static int clcache_adjust_anchorcsn ( CLC_Buffer *buf );
  149. static void clcache_refresh_consumer_maxcsns ( CLC_Buffer *buf );
  150. static int clcache_refresh_local_maxcsns ( CLC_Buffer *buf );
  151. static int clcache_skip_change ( CLC_Buffer *buf );
  152. static int clcache_load_buffer_bulk ( CLC_Buffer *buf, int flag );
  153. static int clcache_open_cursor ( DB_TXN *txn, CLC_Buffer *buf, DBC **cursor );
  154. static int clcache_cursor_get ( DBC *cursor, CLC_Buffer *buf, int flag );
  155. static struct csn_seq_ctrl_block *clcache_new_cscb ();
  156. static void clcache_free_cscb ( struct csn_seq_ctrl_block ** cscb );
  157. static CLC_Buffer *clcache_new_buffer ( ReplicaId consumer_rid );
  158. static void clcache_delete_buffer ( CLC_Buffer **buf );
  159. static CLC_Busy_List *clcache_new_busy_list ();
  160. static void clcache_delete_busy_list ( CLC_Busy_List **bl );
  161. static int clcache_enqueue_busy_list( DB *db, CLC_Buffer *buf );
  162. static void csn_dup_or_init_by_csn ( CSN **csn1, CSN *csn2 );
  163. /*
  164. * Initiates the process buffer pool. This should be done
  165. * once and only once when process starts.
  166. */
  167. int
  168. clcache_init ( DB_ENV **dbenv )
  169. {
  170. if (_pool) {
  171. return 0; /* already initialized */
  172. }
  173. if (NULL == dbenv) {
  174. return -1;
  175. }
  176. _pool = (struct clc_pool*) slapi_ch_calloc ( 1, sizeof ( struct clc_pool ));
  177. _pool->pl_dbenv = dbenv;
  178. _pool->pl_buffer_cnt_min = DEFAULT_CLC_BUFFER_COUNT_MIN;
  179. _pool->pl_buffer_cnt_max = DEFAULT_CLC_BUFFER_COUNT_MAX;
  180. _pool->pl_buffer_default_pages = DEFAULT_CLC_BUFFER_COUNT_MAX;
  181. _pool->pl_lock = slapi_new_rwlock ();
  182. return 0;
  183. }
  184. /*
  185. * This is part of a callback function when changelog configuration
  186. * is read or updated.
  187. */
  188. void
  189. clcache_set_config ()
  190. {
  191. slapi_rwlock_wrlock ( _pool->pl_lock );
  192. _pool->pl_buffer_cnt_max = CL5_DEFAULT_CONFIG_CACHESIZE;
  193. /*
  194. * According to http://www.sleepycat.com/docs/api_c/dbc_get.html,
  195. * data buffer should be a multiple of 1024 bytes in size
  196. * for DB_MULTIPLE_KEY operation.
  197. */
  198. _pool->pl_buffer_default_pages = CL5_DEFAULT_CONFIG_CACHEMEMSIZE / DEFAULT_CLC_BUFFER_PAGE_SIZE + 1;
  199. if ( _pool->pl_buffer_default_pages <= 0 ) { /* this never be true... */
  200. _pool->pl_buffer_default_pages = DEFAULT_CLC_BUFFER_PAGE_COUNT;
  201. }
  202. slapi_rwlock_unlock ( _pool->pl_lock );
  203. }
  204. /*
  205. * Gets the pointer to a thread dedicated buffer, or allocates
  206. * a new buffer if there is no buffer allocated yet for this thread.
  207. *
  208. * This is called when a cl5replayiterator is created for
  209. * a replication session.
  210. */
  211. int
  212. clcache_get_buffer ( CLC_Buffer **buf, DB *db, ReplicaId consumer_rid, const RUV *consumer_ruv, const RUV *local_ruv )
  213. {
  214. int rc = 0;
  215. int need_new;
  216. if ( buf == NULL ) return CL5_BAD_DATA;
  217. *buf = NULL;
  218. /* if the pool was re-initialized, the thread private cache will be invalid,
  219. so we must get a new one */
  220. need_new = (!_pool || !_pool->pl_busy_lists || !_pool->pl_busy_lists->bl_buffers);
  221. if ( (!need_new) && (NULL != ( *buf = (CLC_Buffer*) get_thread_private_cache())) ) {
  222. slapi_log_error ( SLAPI_LOG_REPL, get_thread_private_agmtname(),
  223. "clcache_get_buffer: found thread private buffer cache %p\n", *buf);
  224. slapi_log_error ( SLAPI_LOG_REPL, get_thread_private_agmtname(),
  225. "clcache_get_buffer: _pool is %p _pool->pl_busy_lists is %p _pool->pl_busy_lists->bl_buffers is %p\n",
  226. _pool, _pool ? _pool->pl_busy_lists : NULL,
  227. (_pool && _pool->pl_busy_lists) ? _pool->pl_busy_lists->bl_buffers : NULL);
  228. (*buf)->buf_state = CLC_STATE_READY;
  229. (*buf)->buf_load_cnt = 0;
  230. (*buf)->buf_record_cnt = 0;
  231. (*buf)->buf_record_skipped = 0;
  232. (*buf)->buf_cursor = NULL;
  233. (*buf)->buf_skipped_new_rid = 0;
  234. (*buf)->buf_skipped_csn_gt_cons_maxcsn = 0;
  235. (*buf)->buf_skipped_up_to_date = 0;
  236. (*buf)->buf_skipped_csn_gt_ruv = 0;
  237. (*buf)->buf_skipped_csn_covered = 0;
  238. (*buf)->buf_cscbs = (struct csn_seq_ctrl_block **) slapi_ch_calloc(MAX_NUM_OF_MASTERS + 1,
  239. sizeof(struct csn_seq_ctrl_block *));
  240. (*buf)->buf_num_cscbs = 0;
  241. (*buf)->buf_max_cscbs = MAX_NUM_OF_MASTERS;
  242. }
  243. else {
  244. *buf = clcache_new_buffer ( consumer_rid );
  245. if ( *buf ) {
  246. if ( 0 == clcache_enqueue_busy_list ( db, *buf ) ) {
  247. set_thread_private_cache ( (void*) (*buf) );
  248. }
  249. else {
  250. clcache_delete_buffer ( buf );
  251. }
  252. }
  253. }
  254. if ( NULL != *buf ) {
  255. (*buf)->buf_consumer_ruv = consumer_ruv;
  256. (*buf)->buf_local_ruv = local_ruv;
  257. }
  258. else {
  259. slapi_log_error ( SLAPI_LOG_FATAL, get_thread_private_agmtname(),
  260. "clcache_get_buffer: can't allocate new buffer\n" );
  261. rc = CL5_MEMORY_ERROR;
  262. }
  263. return rc;
  264. }
  265. /*
  266. * Returns a buffer back to the buffer pool.
  267. */
  268. void
  269. clcache_return_buffer ( CLC_Buffer **buf )
  270. {
  271. int i;
  272. slapi_log_error ( SLAPI_LOG_REPL, (*buf)->buf_agmt_name,
  273. "session end: state=%d load=%d sent=%d skipped=%d skipped_new_rid=%d "
  274. "skipped_csn_gt_cons_maxcsn=%d skipped_up_to_date=%d "
  275. "skipped_csn_gt_ruv=%d skipped_csn_covered=%d\n",
  276. (*buf)->buf_state,
  277. (*buf)->buf_load_cnt,
  278. (*buf)->buf_record_cnt - (*buf)->buf_record_skipped,
  279. (*buf)->buf_record_skipped, (*buf)->buf_skipped_new_rid,
  280. (*buf)->buf_skipped_csn_gt_cons_maxcsn,
  281. (*buf)->buf_skipped_up_to_date, (*buf)->buf_skipped_csn_gt_ruv,
  282. (*buf)->buf_skipped_csn_covered);
  283. for ( i = 0; i < (*buf)->buf_num_cscbs; i++ ) {
  284. clcache_free_cscb ( &(*buf)->buf_cscbs[i] );
  285. }
  286. slapi_ch_free((void **)&(*buf)->buf_cscbs);
  287. if ( (*buf)->buf_cursor ) {
  288. (*buf)->buf_cursor->c_close ( (*buf)->buf_cursor );
  289. (*buf)->buf_cursor = NULL;
  290. }
  291. }
  292. /*
  293. * Loads a buffer from DB.
  294. *
  295. * anchorcsn - passed in for the first load of a replication session;
  296. * flag - DB_SET to load in the key CSN record.
  297. * DB_NEXT to load in the records greater than key CSN.
  298. * return - DB error code instead of cl5 one because of the
  299. * historic reason.
  300. */
  301. int
  302. clcache_load_buffer ( CLC_Buffer *buf, CSN *anchorcsn, int flag )
  303. {
  304. int rc = 0;
  305. clcache_refresh_local_maxcsns ( buf );
  306. /* Set the loading key */
  307. if ( anchorcsn ) {
  308. clcache_refresh_consumer_maxcsns ( buf );
  309. buf->buf_load_flag = DB_MULTIPLE_KEY;
  310. csn_as_string ( anchorcsn, 0, (char*)buf->buf_key.data );
  311. slapi_log_error ( SLAPI_LOG_REPL, buf->buf_agmt_name,
  312. "session start: anchorcsn=%s\n", (char*)buf->buf_key.data );
  313. }
  314. else if ( csn_get_time(buf->buf_current_csn) == 0 ) {
  315. /* time == 0 means this csn has never been set */
  316. rc = DB_NOTFOUND;
  317. }
  318. else if ( clcache_adjust_anchorcsn ( buf ) != 0 ) {
  319. rc = DB_NOTFOUND;
  320. }
  321. else {
  322. csn_as_string ( buf->buf_current_csn, 0, (char*)buf->buf_key.data );
  323. slapi_log_error ( SLAPI_LOG_REPL, buf->buf_agmt_name,
  324. "load next: anchorcsn=%s\n", (char*)buf->buf_key.data );
  325. }
  326. if ( rc == 0 ) {
  327. buf->buf_state = CLC_STATE_READY;
  328. rc = clcache_load_buffer_bulk ( buf, flag );
  329. /* Reset some flag variables */
  330. if ( rc == 0 ) {
  331. int i;
  332. for ( i = 0; i < buf->buf_num_cscbs; i++ ) {
  333. buf->buf_cscbs[i]->state = CLC_STATE_READY;
  334. }
  335. }
  336. else if ( anchorcsn ) {
  337. /* Report error only when the missing is persistent */
  338. if ( buf->buf_missing_csn && csn_compare (buf->buf_missing_csn, anchorcsn) == 0 ) {
  339. if (!buf->buf_prev_missing_csn || csn_compare (buf->buf_prev_missing_csn, anchorcsn)) {
  340. slapi_log_error ( SLAPI_LOG_FATAL, buf->buf_agmt_name,
  341. "Can't locate CSN %s in the changelog (DB rc=%d). If replication stops, the consumer may need to be reinitialized.\n",
  342. (char*)buf->buf_key.data, rc );
  343. csn_dup_or_init_by_csn (&buf->buf_prev_missing_csn, anchorcsn);
  344. }
  345. }
  346. else {
  347. csn_dup_or_init_by_csn (&buf->buf_missing_csn, anchorcsn);
  348. }
  349. }
  350. }
  351. if ( rc != 0 ) {
  352. slapi_log_error ( SLAPI_LOG_REPL, buf->buf_agmt_name,
  353. "clcache_load_buffer: rc=%d\n", rc );
  354. }
  355. return rc;
  356. }
  357. static int
  358. clcache_load_buffer_bulk ( CLC_Buffer *buf, int flag )
  359. {
  360. DB_TXN *txn = NULL;
  361. DBC *cursor = NULL;
  362. int rc = 0;
  363. int tries = 0;
  364. #if 0 /* txn control seems not improving anything so turn it off */
  365. if ( *(_pool->pl_dbenv) ) {
  366. txn_begin( *(_pool->pl_dbenv), NULL, &txn, 0 );
  367. }
  368. #endif
  369. if (NULL == buf) {
  370. slapi_log_error ( SLAPI_LOG_FATAL, "clcache_load_buffer_bulk",
  371. "NULL buf\n" );
  372. return rc;
  373. }
  374. if (NULL == buf->buf_busy_list) {
  375. slapi_log_error ( SLAPI_LOG_FATAL, "clcache_load_buffer_bulk",
  376. "%s%sno buf_busy_list\n",
  377. buf->buf_agmt_name?buf->buf_agmt_name:"",
  378. buf->buf_agmt_name?": ":"" );
  379. return rc;
  380. }
  381. PR_Lock ( buf->buf_busy_list->bl_lock );
  382. retry:
  383. if ( 0 == ( rc = clcache_open_cursor ( txn, buf, &cursor )) ) {
  384. if ( flag == DB_NEXT ) {
  385. /* For bulk read, position the cursor before read the next block */
  386. rc = cursor->c_get ( cursor,
  387. & buf->buf_key,
  388. & buf->buf_data,
  389. DB_SET );
  390. }
  391. /*
  392. * Continue if the error is no-mem since we don't need to
  393. * load in the key record anyway with DB_SET.
  394. */
  395. if ( 0 == rc || DB_BUFFER_SMALL == rc )
  396. rc = clcache_cursor_get ( cursor, buf, flag );
  397. }
  398. /*
  399. * Don't keep a cursor open across the whole replication session.
  400. * That had caused noticeable DB resource contention.
  401. */
  402. if ( cursor ) {
  403. cursor->c_close ( cursor );
  404. cursor = NULL;
  405. }
  406. if ((rc == DB_LOCK_DEADLOCK) && (tries < MAX_TRIALS)) {
  407. PRIntervalTime interval;
  408. tries++;
  409. slapi_log_error ( SLAPI_LOG_TRACE, "clcache_load_buffer_bulk",
  410. "deadlock number [%d] - retrying\n", tries );
  411. /* back off */
  412. interval = PR_MillisecondsToInterval(slapi_rand() % 100);
  413. DS_Sleep(interval);
  414. goto retry;
  415. }
  416. if ((rc == DB_LOCK_DEADLOCK) && (tries >= MAX_TRIALS)) {
  417. slapi_log_error ( SLAPI_LOG_REPL, "clcache_load_buffer_bulk",
  418. "could not load buffer from changelog after %d tries\n", tries );
  419. }
  420. #if 0 /* txn control seems not improving anything so turn it off */
  421. if ( txn ) {
  422. txn->commit ( txn, DB_TXN_NOSYNC );
  423. }
  424. #endif
  425. PR_Unlock ( buf->buf_busy_list->bl_lock );
  426. buf->buf_record_ptr = NULL;
  427. if ( 0 == rc ) {
  428. DB_MULTIPLE_INIT ( buf->buf_record_ptr, &buf->buf_data );
  429. if ( NULL == buf->buf_record_ptr )
  430. rc = DB_NOTFOUND;
  431. else
  432. buf->buf_load_cnt++;
  433. }
  434. return rc;
  435. }
  436. /*
  437. * Gets the next change from the buffer.
  438. * *key : output - key of the next change, or NULL if no more change
  439. * *data: output - data of the next change, or NULL if no more change
  440. */
  441. int
  442. clcache_get_next_change ( CLC_Buffer *buf, void **key, size_t *keylen, void **data, size_t *datalen, CSN **csn )
  443. {
  444. int skip = 1;
  445. int rc = 0;
  446. do {
  447. *key = *data = NULL;
  448. *keylen = *datalen = 0;
  449. if ( buf->buf_record_ptr ) {
  450. DB_MULTIPLE_KEY_NEXT ( buf->buf_record_ptr, &buf->buf_data,
  451. *key, *keylen, *data, *datalen );
  452. }
  453. /*
  454. * We're done with the current buffer. Now load the next chunk.
  455. */
  456. if ( NULL == *key && CLC_STATE_READY == buf->buf_state ) {
  457. rc = clcache_load_buffer ( buf, NULL, DB_NEXT );
  458. if ( 0 == rc && buf->buf_record_ptr ) {
  459. DB_MULTIPLE_KEY_NEXT ( buf->buf_record_ptr, &buf->buf_data,
  460. *key, *keylen, *data, *datalen );
  461. }
  462. }
  463. /* Compare the new change to the local and remote RUVs */
  464. if ( NULL != *key ) {
  465. buf->buf_record_cnt++;
  466. csn_init_by_string ( buf->buf_current_csn, (char*)*key );
  467. skip = clcache_skip_change ( buf );
  468. if (skip) buf->buf_record_skipped++;
  469. }
  470. }
  471. while ( rc == 0 && *key && skip );
  472. if ( NULL == *key ) {
  473. *key = NULL;
  474. *csn = NULL;
  475. rc = DB_NOTFOUND;
  476. }
  477. else {
  478. *csn = buf->buf_current_csn;
  479. slapi_log_error ( SLAPI_LOG_REPL, buf->buf_agmt_name,
  480. "load=%d rec=%d csn=%s\n",
  481. buf->buf_load_cnt, buf->buf_record_cnt, (char*)*key );
  482. }
  483. return rc;
  484. }
  485. static void
  486. clcache_refresh_consumer_maxcsns ( CLC_Buffer *buf )
  487. {
  488. int i;
  489. for ( i = 0; i < buf->buf_num_cscbs; i++ ) {
  490. csn_free(&buf->buf_cscbs[i]->consumer_maxcsn);
  491. ruv_get_largest_csn_for_replica (
  492. buf->buf_consumer_ruv,
  493. buf->buf_cscbs[i]->rid,
  494. &buf->buf_cscbs[i]->consumer_maxcsn );
  495. }
  496. }
  497. static int
  498. clcache_refresh_local_maxcsn ( const ruv_enum_data *rid_data, void *data )
  499. {
  500. struct clc_buffer *buf = (struct clc_buffer*) data;
  501. ReplicaId rid;
  502. int rc = 0;
  503. int i;
  504. rid = csn_get_replicaid ( rid_data->csn );
  505. /*
  506. * No need to create cscb for consumer's RID.
  507. * If RID==65535, the CSN is originated from a
  508. * legacy consumer. In this case the supplier
  509. * and the consumer may have the same RID.
  510. */
  511. if ( rid == buf->buf_consumer_rid && rid != MAX_REPLICA_ID )
  512. return rc;
  513. for ( i = 0; i < buf->buf_num_cscbs; i++ ) {
  514. if ( buf->buf_cscbs[i]->rid == rid )
  515. break;
  516. }
  517. if ( i >= buf->buf_num_cscbs ) {
  518. if( i + 1 > buf->buf_max_cscbs){
  519. buf->buf_cscbs = (struct csn_seq_ctrl_block **) slapi_ch_realloc((char *)buf->buf_cscbs,
  520. (i + 2) * sizeof(struct csn_seq_ctrl_block *));
  521. buf->buf_max_cscbs = i + 1;
  522. }
  523. buf->buf_cscbs[i] = clcache_new_cscb();
  524. if ( buf->buf_cscbs[i] == NULL ) {
  525. return -1;
  526. }
  527. buf->buf_cscbs[i]->rid = rid;
  528. buf->buf_num_cscbs++;
  529. }
  530. csn_dup_or_init_by_csn ( &buf->buf_cscbs[i]->local_maxcsn, rid_data->csn );
  531. if ( buf->buf_cscbs[i]->consumer_maxcsn &&
  532. csn_compare (buf->buf_cscbs[i]->consumer_maxcsn, rid_data->csn) >= 0 ) {
  533. /* No change need to be sent for this RID */
  534. buf->buf_cscbs[i]->state = CLC_STATE_UP_TO_DATE;
  535. }
  536. return rc;
  537. }
  538. static int
  539. clcache_refresh_local_maxcsns ( CLC_Buffer *buf )
  540. {
  541. int i;
  542. for ( i = 0; i < buf->buf_num_cscbs; i++ ) {
  543. csn_dup_or_init_by_csn ( &buf->buf_cscbs[i]->prev_local_maxcsn,
  544. buf->buf_cscbs[i]->local_maxcsn );
  545. }
  546. return ruv_enumerate_elements ( buf->buf_local_ruv, clcache_refresh_local_maxcsn, buf );
  547. }
  548. /*
  549. * Algorithm:
  550. *
  551. * 1. Snapshot local RUVs;
  552. * 2. Load buffer;
  553. * 3. Send to the consumer only those CSNs that are covered
  554. * by the RUVs snapshot taken in the first step;
  555. * All CSNs that are covered by the RUVs snapshot taken in the
  556. * first step are guaranteed in consecutive order for the respected
  557. * RIDs because of the the CSN pending list control;
  558. * A CSN that is not covered by the RUVs snapshot may be out of order
  559. * since it is possible that a smaller CSN might not have committed
  560. * yet by the time the buffer was loaded.
  561. * 4. Determine anchorcsn for each RID:
  562. *
  563. * Case| Local vs. Buffer | New Local | Next
  564. * | MaxCSN MaxCSN | MaxCSN | Anchor-CSN
  565. * ----+-------------------+-----------+----------------
  566. * 1 | Cl >= Cb | * | Cb
  567. * 2 | Cl < Cb | Cl | Cb
  568. * 3 | Cl < Cb | Cl2 | Cl
  569. *
  570. * 5. Determine anchorcsn for next load:
  571. * Anchor-CSN = min { all Next-Anchor-CSN, Buffer-MaxCSN }
  572. */
  573. static int
  574. clcache_adjust_anchorcsn ( CLC_Buffer *buf )
  575. {
  576. PRBool hasChange = PR_FALSE;
  577. struct csn_seq_ctrl_block *cscb;
  578. int i;
  579. if ( buf->buf_state == CLC_STATE_READY ) {
  580. for ( i = 0; i < buf->buf_num_cscbs; i++ ) {
  581. cscb = buf->buf_cscbs[i];
  582. if ( cscb->state == CLC_STATE_UP_TO_DATE )
  583. continue;
  584. /*
  585. * Case 3 unsafe ruv change: next buffer load should start
  586. * from where the maxcsn in the old ruv was. Since each
  587. * cscb has remembered the maxcsn sent to the consumer,
  588. * CSNs that may be loaded again could easily be skipped.
  589. */
  590. if ( cscb->prev_local_maxcsn &&
  591. csn_compare (cscb->prev_local_maxcsn, buf->buf_current_csn) < 0 &&
  592. csn_compare (cscb->local_maxcsn, cscb->prev_local_maxcsn) != 0 ) {
  593. hasChange = PR_TRUE;
  594. cscb->state = CLC_STATE_READY;
  595. csn_init_by_csn ( buf->buf_current_csn, cscb->prev_local_maxcsn );
  596. csn_as_string ( cscb->prev_local_maxcsn, 0, (char*)buf->buf_key.data );
  597. slapi_log_error ( SLAPI_LOG_REPL, buf->buf_agmt_name,
  598. "adjust anchor csn upon %s\n",
  599. ( cscb->state == CLC_STATE_CSN_GT_RUV ? "out of sequence csn" : "unsafe ruv change") );
  600. continue;
  601. }
  602. /*
  603. * check if there are still changes to send for this RID
  604. * Assume we had compared the local maxcsn and the consumer
  605. * max csn before this function was called and hence the
  606. * cscb->state had been set accordingly.
  607. */
  608. if ( hasChange == PR_FALSE &&
  609. csn_compare (cscb->local_maxcsn, buf->buf_current_csn) > 0 ) {
  610. hasChange = PR_TRUE;
  611. }
  612. }
  613. }
  614. if ( !hasChange ) {
  615. buf->buf_state = CLC_STATE_DONE;
  616. }
  617. return buf->buf_state;
  618. }
  619. static int
  620. clcache_skip_change ( CLC_Buffer *buf )
  621. {
  622. struct csn_seq_ctrl_block *cscb = NULL;
  623. ReplicaId rid;
  624. int skip = 1;
  625. int i;
  626. char buf_cur_csn_str[CSN_STRSIZE];
  627. char oth_csn_str[CSN_STRSIZE];
  628. do {
  629. rid = csn_get_replicaid ( buf->buf_current_csn );
  630. /*
  631. * Skip CSN that is originated from the consumer,
  632. * unless the CSN is newer than the maxcsn.
  633. * If RID==65535, the CSN is originated from a
  634. * legacy consumer. In this case the supplier
  635. * and the consumer may have the same RID.
  636. */
  637. if (rid == buf->buf_consumer_rid && rid != MAX_REPLICA_ID){
  638. CSN *cons_maxcsn = NULL;
  639. ruv_get_max_csn(buf->buf_consumer_ruv, &cons_maxcsn);
  640. if ( csn_compare ( buf->buf_current_csn, cons_maxcsn) > 0 ) {
  641. /*
  642. * The consumer must have been "restored" and needs this newer update.
  643. */
  644. skip = 0;
  645. } else if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  646. csn_as_string(buf->buf_current_csn, 0, buf_cur_csn_str);
  647. csn_as_string(cons_maxcsn, 0, oth_csn_str);
  648. slapi_log_error(SLAPI_LOG_REPL, buf->buf_agmt_name,
  649. "Skipping update because the changelog buffer current csn [%s] is "
  650. "less than or equal to the consumer max csn [%s]\n",
  651. buf_cur_csn_str, oth_csn_str);
  652. buf->buf_skipped_csn_gt_cons_maxcsn++;
  653. }
  654. csn_free(&cons_maxcsn);
  655. break;
  656. }
  657. /* Skip helper entry (ENTRY_COUNT, PURGE_RUV and so on) */
  658. if ( cl5HelperEntry ( NULL, buf->buf_current_csn ) == PR_TRUE ) {
  659. slapi_log_error ( SLAPI_LOG_REPL, buf->buf_agmt_name,
  660. "Skip helper entry type=%ld\n", csn_get_time( buf->buf_current_csn ));
  661. break;
  662. }
  663. /* Find csn sequence control block for the current rid */
  664. for (i = 0; i < buf->buf_num_cscbs && buf->buf_cscbs[i]->rid != rid; i++);
  665. /* Skip CSN whose RID is unknown to the local RUV snapshot */
  666. if ( i >= buf->buf_num_cscbs ) {
  667. if (slapi_is_loglevel_set(SLAPI_LOG_REPL)) {
  668. csn_as_string(buf->buf_current_csn, 0, buf_cur_csn_str);
  669. slapi_log_error(SLAPI_LOG_REPL, buf->buf_agmt_name,
  670. "Skipping update because the changelog buffer current csn [%s] rid "
  671. "[%d] is not in the list of changelog csn buffers (length %d)\n",
  672. buf_cur_csn_str, rid, buf->buf_num_cscbs);
  673. }
  674. buf->buf_skipped_new_rid++;
  675. break;
  676. }
  677. cscb = buf->buf_cscbs[i];
  678. /* Skip if the consumer is already up-to-date for the RID */
  679. if ( cscb->state == CLC_STATE_UP_TO_DATE ) {
  680. buf->buf_skipped_up_to_date++;
  681. break;
  682. }
  683. /* Skip CSN whose preceedents are not covered by local RUV snapshot */
  684. if ( cscb->state == CLC_STATE_CSN_GT_RUV ) {
  685. buf->buf_skipped_csn_gt_ruv++;
  686. break;
  687. }
  688. /* Skip CSNs already covered by consumer RUV */
  689. if ( cscb->consumer_maxcsn &&
  690. csn_compare ( buf->buf_current_csn, cscb->consumer_maxcsn ) <= 0 ) {
  691. buf->buf_skipped_csn_covered++;
  692. break;
  693. }
  694. /* Send CSNs that are covered by the local RUV snapshot */
  695. if ( csn_compare ( buf->buf_current_csn, cscb->local_maxcsn ) <= 0 ) {
  696. skip = 0;
  697. csn_dup_or_init_by_csn ( &cscb->consumer_maxcsn, buf->buf_current_csn );
  698. break;
  699. }
  700. /*
  701. * Promote the local maxcsn to its next neighbor
  702. * to keep the current session going. Skip if we
  703. * are not sure if current_csn is the neighbor.
  704. */
  705. if ( csn_time_difference(buf->buf_current_csn, cscb->local_maxcsn) == 0 &&
  706. (csn_get_seqnum(buf->buf_current_csn) ==
  707. csn_get_seqnum(cscb->local_maxcsn) + 1) )
  708. {
  709. csn_init_by_csn ( cscb->local_maxcsn, buf->buf_current_csn );
  710. if(cscb->consumer_maxcsn){
  711. csn_init_by_csn ( cscb->consumer_maxcsn, buf->buf_current_csn );
  712. }
  713. skip = 0;
  714. break;
  715. }
  716. /* Skip CSNs not covered by local RUV snapshot */
  717. cscb->state = CLC_STATE_CSN_GT_RUV;
  718. buf->buf_skipped_csn_gt_ruv++;
  719. } while (0);
  720. #ifdef DEBUG
  721. if (skip && cscb) {
  722. char consumer[24] = {'\0'};
  723. char local[24] = {'\0'};
  724. char current[24] = {'\0'};
  725. if ( cscb->consumer_maxcsn )
  726. csn_as_string ( cscb->consumer_maxcsn, PR_FALSE, consumer );
  727. if ( cscb->local_maxcsn )
  728. csn_as_string ( cscb->local_maxcsn, PR_FALSE, local );
  729. csn_as_string ( buf->buf_current_csn, PR_FALSE, current );
  730. slapi_log_error ( SLAPI_LOG_REPL, buf->buf_agmt_name,
  731. "Skip %s consumer=%s local=%s\n", current, consumer, local );
  732. }
  733. #endif
  734. return skip;
  735. }
  736. static struct csn_seq_ctrl_block *
  737. clcache_new_cscb ()
  738. {
  739. struct csn_seq_ctrl_block *cscb;
  740. cscb = (struct csn_seq_ctrl_block *) slapi_ch_calloc ( 1, sizeof (struct csn_seq_ctrl_block) );
  741. if (cscb == NULL) {
  742. slapi_log_error ( SLAPI_LOG_FATAL, NULL, "clcache: malloc failure\n" );
  743. }
  744. return cscb;
  745. }
  746. static void
  747. clcache_free_cscb ( struct csn_seq_ctrl_block ** cscb )
  748. {
  749. csn_free ( & (*cscb)->consumer_maxcsn );
  750. csn_free ( & (*cscb)->local_maxcsn );
  751. csn_free ( & (*cscb)->prev_local_maxcsn );
  752. slapi_ch_free ( (void **) cscb );
  753. }
  754. /*
  755. * Allocate and initialize a new buffer
  756. * It is called when there is a request for a buffer while
  757. * buffer free list is empty.
  758. */
  759. static CLC_Buffer *
  760. clcache_new_buffer ( ReplicaId consumer_rid )
  761. {
  762. CLC_Buffer *buf = NULL;
  763. int welldone = 0;
  764. do {
  765. buf = (CLC_Buffer*) slapi_ch_calloc (1, sizeof(CLC_Buffer));
  766. if ( NULL == buf )
  767. break;
  768. buf->buf_key.flags = DB_DBT_USERMEM;
  769. buf->buf_key.ulen = CSN_STRSIZE + 1;
  770. buf->buf_key.size = CSN_STRSIZE;
  771. buf->buf_key.data = slapi_ch_calloc( 1, buf->buf_key.ulen );
  772. if ( NULL == buf->buf_key.data )
  773. break;
  774. buf->buf_data.flags = DB_DBT_USERMEM;
  775. buf->buf_data.ulen = _pool->pl_buffer_default_pages * DEFAULT_CLC_BUFFER_PAGE_SIZE;
  776. buf->buf_data.data = slapi_ch_malloc( buf->buf_data.ulen );
  777. if ( NULL == buf->buf_data.data )
  778. break;
  779. if ( NULL == ( buf->buf_current_csn = csn_new()) )
  780. break;
  781. buf->buf_state = CLC_STATE_READY;
  782. buf->buf_agmt_name = get_thread_private_agmtname();
  783. buf->buf_consumer_rid = consumer_rid;
  784. buf->buf_num_cscbs = 0;
  785. buf->buf_max_cscbs = MAX_NUM_OF_MASTERS;
  786. buf->buf_cscbs = (struct csn_seq_ctrl_block **) slapi_ch_calloc(MAX_NUM_OF_MASTERS + 1,
  787. sizeof(struct csn_seq_ctrl_block *));
  788. welldone = 1;
  789. } while (0);
  790. if ( !welldone ) {
  791. clcache_delete_buffer ( &buf );
  792. }
  793. return buf;
  794. }
  795. /*
  796. * Deallocates a buffer.
  797. * It is called when a buffer is returned to the buffer pool
  798. * and the pool size is over the limit.
  799. */
  800. static void
  801. clcache_delete_buffer ( CLC_Buffer **buf )
  802. {
  803. if ( buf && *buf ) {
  804. slapi_ch_free (&( (*buf)->buf_key.data ));
  805. slapi_ch_free (&( (*buf)->buf_data.data ));
  806. csn_free (&( (*buf)->buf_current_csn ));
  807. csn_free (&( (*buf)->buf_missing_csn ));
  808. csn_free (&( (*buf)->buf_prev_missing_csn ));
  809. slapi_ch_free ( (void **) buf );
  810. }
  811. }
  812. static CLC_Busy_List *
  813. clcache_new_busy_list ()
  814. {
  815. CLC_Busy_List *bl;
  816. int welldone = 0;
  817. do {
  818. if ( NULL == (bl = ( CLC_Busy_List* ) slapi_ch_calloc (1, sizeof(CLC_Busy_List)) ))
  819. break;
  820. if ( NULL == (bl->bl_lock = PR_NewLock ()) )
  821. break;
  822. /*
  823. if ( NULL == (bl->bl_max_csn = csn_new ()) )
  824. break;
  825. */
  826. welldone = 1;
  827. }
  828. while (0);
  829. if ( !welldone ) {
  830. clcache_delete_busy_list ( &bl );
  831. }
  832. return bl;
  833. }
  834. static void
  835. clcache_delete_busy_list ( CLC_Busy_List **bl )
  836. {
  837. if ( bl && *bl ) {
  838. CLC_Buffer *buf = NULL;
  839. if ( (*bl)->bl_lock ) {
  840. PR_Lock ( (*bl)->bl_lock );
  841. }
  842. buf = (*bl)->bl_buffers;
  843. while (buf) {
  844. CLC_Buffer *next = buf->buf_next;
  845. clcache_delete_buffer(&buf);
  846. buf = next;
  847. }
  848. (*bl)->bl_buffers = NULL;
  849. (*bl)->bl_db = NULL;
  850. if ( (*bl)->bl_lock ) {
  851. PR_Unlock ( (*bl)->bl_lock );
  852. PR_DestroyLock ( (*bl)->bl_lock );
  853. (*bl)->bl_lock = NULL;
  854. }
  855. /* csn_free (&( (*bl)->bl_max_csn )); */
  856. slapi_ch_free ( (void **) bl );
  857. }
  858. }
  859. static int
  860. clcache_enqueue_busy_list ( DB *db, CLC_Buffer *buf )
  861. {
  862. CLC_Busy_List *bl;
  863. int rc = 0;
  864. slapi_rwlock_rdlock ( _pool->pl_lock );
  865. for ( bl = _pool->pl_busy_lists; bl && bl->bl_db != db; bl = bl->bl_next );
  866. slapi_rwlock_unlock ( _pool->pl_lock );
  867. if ( NULL == bl ) {
  868. if ( NULL == ( bl = clcache_new_busy_list ()) ) {
  869. rc = CL5_MEMORY_ERROR;
  870. }
  871. else {
  872. slapi_rwlock_wrlock ( _pool->pl_lock );
  873. bl->bl_db = db;
  874. bl->bl_next = _pool->pl_busy_lists;
  875. _pool->pl_busy_lists = bl;
  876. slapi_rwlock_unlock ( _pool->pl_lock );
  877. }
  878. }
  879. if ( NULL != bl ) {
  880. PR_Lock ( bl->bl_lock );
  881. buf->buf_busy_list = bl;
  882. buf->buf_next = bl->bl_buffers;
  883. bl->bl_buffers = buf;
  884. PR_Unlock ( bl->bl_lock );
  885. }
  886. return rc;
  887. }
  888. static int
  889. clcache_open_cursor ( DB_TXN *txn, CLC_Buffer *buf, DBC **cursor )
  890. {
  891. int rc;
  892. rc = buf->buf_busy_list->bl_db->cursor ( buf->buf_busy_list->bl_db, txn, cursor, 0 );
  893. if ( rc != 0 ) {
  894. slapi_log_error ( SLAPI_LOG_FATAL, get_thread_private_agmtname(),
  895. "clcache: failed to open cursor; db error - %d %s\n",
  896. rc, db_strerror(rc));
  897. }
  898. return rc;
  899. }
  900. static int
  901. clcache_cursor_get ( DBC *cursor, CLC_Buffer *buf, int flag )
  902. {
  903. int rc;
  904. rc = cursor->c_get ( cursor,
  905. & buf->buf_key,
  906. & buf->buf_data,
  907. buf->buf_load_flag | flag );
  908. if ( DB_BUFFER_SMALL == rc ) {
  909. /*
  910. * The record takes more space than the current size of the
  911. * buffer. Fortunately, buf->buf_data.size has been set by
  912. * c_get() to the actual data size needed. So we can
  913. * reallocate the data buffer and try to read again.
  914. */
  915. buf->buf_data.ulen = ( buf->buf_data.size / DEFAULT_CLC_BUFFER_PAGE_SIZE + 1 ) * DEFAULT_CLC_BUFFER_PAGE_SIZE;
  916. buf->buf_data.data = slapi_ch_realloc ( buf->buf_data.data, buf->buf_data.ulen );
  917. if ( buf->buf_data.data != NULL ) {
  918. rc = cursor->c_get ( cursor,
  919. &( buf->buf_key ),
  920. &( buf->buf_data ),
  921. buf->buf_load_flag | flag );
  922. slapi_log_error ( SLAPI_LOG_REPL, buf->buf_agmt_name,
  923. "clcache: (%d | %d) buf key len %d reallocated and retry returns %d\n", buf->buf_load_flag, flag, buf->buf_key.size, rc );
  924. }
  925. }
  926. switch ( rc ) {
  927. case EINVAL:
  928. slapi_log_error ( SLAPI_LOG_FATAL, buf->buf_agmt_name,
  929. "clcache_cursor_get: invalid parameter\n" );
  930. break;
  931. case DB_BUFFER_SMALL:
  932. slapi_log_error ( SLAPI_LOG_FATAL, buf->buf_agmt_name,
  933. "clcache_cursor_get: can't allocate %u bytes\n", buf->buf_data.ulen );
  934. break;
  935. default:
  936. break;
  937. }
  938. return rc;
  939. }
  940. static void
  941. csn_dup_or_init_by_csn ( CSN **csn1, CSN *csn2 )
  942. {
  943. if ( *csn1 == NULL )
  944. *csn1 = csn_new();
  945. csn_init_by_csn ( *csn1, csn2 );
  946. }
  947. void
  948. clcache_destroy()
  949. {
  950. if (_pool) {
  951. CLC_Busy_List *bl = NULL;
  952. if (_pool->pl_lock) {
  953. slapi_rwlock_wrlock (_pool->pl_lock);
  954. }
  955. bl = _pool->pl_busy_lists;
  956. while (bl) {
  957. CLC_Busy_List *next = bl->bl_next;
  958. clcache_delete_busy_list(&bl);
  959. bl = next;
  960. }
  961. _pool->pl_busy_lists = NULL;
  962. _pool->pl_dbenv = NULL;
  963. if (_pool->pl_lock) {
  964. slapi_rwlock_unlock(_pool->pl_lock);
  965. slapi_destroy_rwlock(_pool->pl_lock);
  966. _pool->pl_lock = NULL;
  967. }
  968. slapi_ch_free ( (void **) &_pool );
  969. }
  970. }