Преглед на файлове

Ticket 49476 - refactor ldbm backend to allow replacement of BDB

BACKEND REDESIGN -Phase 1

This patch provides the first phase of the backend redesign. It does
split the configuration of the LDBM layer and the DB specific layer.

The dblayer_private defines a set of functions to be used by the LDBM
layer and to be implemented by the DB layer.

Currently this is only done for the BDB implementation, the patch automatically
splits the configuration for existing instances

See also:

http://www.port389.org/docs/389ds/design/backend-redesign.html
Ludwig Krispenz преди 6 години
родител
ревизия
94c74015e5
променени са 45 файла, в които са добавени 16775 реда и са изтрити 7154 реда
  1. 13 7
      Makefile.am
  2. 18 15
      dirsrvtests/tests/suites/config/autotuning_test.py
  3. 6 3
      dirsrvtests/tests/suites/config/config_test.py
  4. 1 693
      ldap/servers/slapd/back-ldbm/ancestorid.c
  5. 19 18
      ldap/servers/slapd/back-ldbm/archive.c
  6. 4 4
      ldap/servers/slapd/back-ldbm/back-ldbm.h
  7. 7 6
      ldap/servers/slapd/back-ldbm/cleanup.c
  8. 2207 0
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c
  9. 3405 0
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c
  10. 22 46
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
  11. 293 0
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c
  12. 6054 0
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c
  13. 163 0
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h
  14. 3311 0
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c
  15. 394 0
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c
  16. 5 7
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c
  17. 7 8
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_upgrade.c
  18. 233 0
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c
  19. 15 16
      ldap/servers/slapd/back-ldbm/db-bdb/bdb_version.c
  20. 0 353
      ldap/servers/slapd/back-ldbm/dbhelp.c
  21. 34 1384
      ldap/servers/slapd/back-ldbm/dblayer.c
  22. 83 91
      ldap/servers/slapd/back-ldbm/dblayer.h
  23. 4 5
      ldap/servers/slapd/back-ldbm/dbsize.c
  24. 2 209
      ldap/servers/slapd/back-ldbm/dbverify.c
  25. 30 4
      ldap/servers/slapd/back-ldbm/idl.c
  26. 0 705
      ldap/servers/slapd/back-ldbm/import-merge.c
  27. 1 969
      ldap/servers/slapd/back-ldbm/import.c
  28. 1 6
      ldap/servers/slapd/back-ldbm/import.h
  29. 0 21
      ldap/servers/slapd/back-ldbm/init.c
  30. 7 3
      ldap/servers/slapd/back-ldbm/instance.c
  31. 176 962
      ldap/servers/slapd/back-ldbm/ldbm_config.c
  32. 7 1
      ldap/servers/slapd/back-ldbm/ldbm_config.h
  33. 1 1
      ldap/servers/slapd/back-ldbm/ldbm_index_config.c
  34. 158 275
      ldap/servers/slapd/back-ldbm/ldbm_instance_config.c
  35. 8 1011
      ldap/servers/slapd/back-ldbm/ldif2ldbm.c
  36. 11 36
      ldap/servers/slapd/back-ldbm/proto-back-ldbm.h
  37. 4 1
      ldap/servers/slapd/back-ldbm/rmdb.c
  38. 34 269
      ldap/servers/slapd/back-ldbm/start.c
  39. 3 3
      ldap/servers/slapd/back-ldbm/vlv_srch.c
  40. 2 4
      ldap/servers/slapd/main.c
  41. 8 1
      ldap/servers/slapd/result.c
  42. 4 2
      ldap/servers/slapd/slapi-plugin.h
  43. 0 15
      ldap/servers/slapd/task.c
  44. 1 0
      src/lib389/lib389/_constants.py
  45. 19 0
      src/lib389/lib389/config.py

+ 13 - 7
Makefile.am

@@ -1405,10 +1405,8 @@ libback_ldbm_la_SOURCES = ldap/servers/slapd/back-ldbm/ancestorid.c \
 	ldap/servers/slapd/back-ldbm/cache.c \
 	ldap/servers/slapd/back-ldbm/cleanup.c \
 	ldap/servers/slapd/back-ldbm/close.c \
-	ldap/servers/slapd/back-ldbm/dbhelp.c \
 	ldap/servers/slapd/back-ldbm/dblayer.c \
 	ldap/servers/slapd/back-ldbm/dbsize.c \
-	ldap/servers/slapd/back-ldbm/dbversion.c \
 	ldap/servers/slapd/back-ldbm/dn2entry.c \
 	ldap/servers/slapd/back-ldbm/entrystore.c \
 	ldap/servers/slapd/back-ldbm/filterindex.c \
@@ -1421,8 +1419,6 @@ libback_ldbm_la_SOURCES = ldap/servers/slapd/back-ldbm/ancestorid.c \
 	ldap/servers/slapd/back-ldbm/idl_set.c \
 	ldap/servers/slapd/back-ldbm/idl_common.c \
 	ldap/servers/slapd/back-ldbm/import.c \
-	ldap/servers/slapd/back-ldbm/import-merge.c \
-	ldap/servers/slapd/back-ldbm/import-threads.c \
 	ldap/servers/slapd/back-ldbm/index.c \
 	ldap/servers/slapd/back-ldbm/init.c \
 	ldap/servers/slapd/back-ldbm/instance.c \
@@ -1447,7 +1443,6 @@ libback_ldbm_la_SOURCES = ldap/servers/slapd/back-ldbm/ancestorid.c \
 	ldap/servers/slapd/back-ldbm/dbverify.c \
 	ldap/servers/slapd/back-ldbm/matchrule.c \
 	ldap/servers/slapd/back-ldbm/misc.c \
-	ldap/servers/slapd/back-ldbm/monitor.c \
 	ldap/servers/slapd/back-ldbm/nextid.c \
 	ldap/servers/slapd/back-ldbm/parents.c \
 	ldap/servers/slapd/back-ldbm/perfctrs.c \
@@ -1456,10 +1451,21 @@ libback_ldbm_la_SOURCES = ldap/servers/slapd/back-ldbm/ancestorid.c \
 	ldap/servers/slapd/back-ldbm/sort.c \
 	ldap/servers/slapd/back-ldbm/start.c \
 	ldap/servers/slapd/back-ldbm/uniqueid2entry.c \
-	ldap/servers/slapd/back-ldbm/upgrade.c \
 	ldap/servers/slapd/back-ldbm/vlv.c \
 	ldap/servers/slapd/back-ldbm/vlv_key.c \
-	ldap/servers/slapd/back-ldbm/vlv_srch.c
+	ldap/servers/slapd/back-ldbm/vlv_srch.c \
+	ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c \
+	ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c \
+	ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c \
+	ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c \
+	ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c \
+	ldap/servers/slapd/back-ldbm/db-bdb/bdb_upgrade.c \
+	ldap/servers/slapd/back-ldbm/db-bdb/bdb_version.c \
+	ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c \
+	ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c \
+	ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c \
+	ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c
+
 
 libback_ldbm_la_CPPFLAGS = $(AM_CPPFLAGS) $(DSPLUGIN_CPPFLAGS) @db_inc@
 libback_ldbm_la_DEPENDENCIES = libslapd.la

+ 18 - 15
dirsrvtests/tests/suites/config/autotuning_test.py

@@ -11,7 +11,7 @@ from lib389._mapped_object import DSLdapObject
 from lib389.utils import *
 from lib389.topologies import topology_st as topo
 
-from lib389._constants import DN_CONFIG_LDBM, DN_USERROOT_LDBM, DEFAULT_SUFFIX
+from lib389._constants import DN_CONFIG_LDBM, DN_CONFIG_LDBM_BDB, DN_USERROOT_LDBM, DEFAULT_SUFFIX
 
 pytestmark = pytest.mark.tier0
 
@@ -119,15 +119,16 @@ def test_cache_autosize_non_zero(topo, autosize, autosize_split):
     """
 
     config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM)
+    bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB)
     userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM)
 
     cachesize = '33333333'
 
-    dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
+    dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize')
     cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
     dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
-    autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
-    autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
+    autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
+    autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
 
     log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize before the test")
     log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
@@ -164,11 +165,11 @@ def test_cache_autosize_non_zero(topo, autosize, autosize_split):
         config_ldbm.set('nsslapd-dbcachesize ', cachesize)
     topo.standalone.restart()
 
-    dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
+    dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize')
     cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
     dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
-    autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
-    autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
+    autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
+    autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
 
     log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range.")
     log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
@@ -208,16 +209,17 @@ def test_cache_autosize_basic_sane(topo, autosize_split):
     """
 
     config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM)
+    bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB)
     userroot_ldbm = DSLdapObject(topo.standalone, DN_USERROOT_LDBM)
     config_ldbm.set('nsslapd-cache-autosize', '0')
 
     # Test with caches with both real values and 0
     for cachesize in ('0', '33333333'):
-        dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
+        dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize')
         cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
         dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
-        autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
-        autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
+        autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
+        autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
 
         log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize before the test")
         log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
@@ -241,11 +243,11 @@ def test_cache_autosize_basic_sane(topo, autosize_split):
         userroot_ldbm.set('nsslapd-cachememsize', cachesize)
         topo.standalone.restart()
 
-        dbcachesize_val = config_ldbm.get_attr_val('nsslapd-dbcachesize')
+        dbcachesize_val = bdb_config_ldbm.get_attr_val('nsslapd-dbcachesize')
         cachenensize_val = userroot_ldbm.get_attr_val('nsslapd-cachememsize')
         dncachenensize_val = userroot_ldbm.get_attr_val('nsslapd-dncachememsize')
-        autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
-        autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
+        autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
+        autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
 
         log.info("Check nsslapd-dbcachesize and nsslapd-cachememsize in the appropriate range.")
         log.info("nsslapd-dbcachesize == {}".format(dbcachesize_val))
@@ -277,8 +279,9 @@ def test_cache_autosize_invalid_values(topo, invalid_value):
     """
 
     config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM)
-    autosize_val = config_ldbm.get_attr_val('nsslapd-cache-autosize')
-    autosize_split_val = config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
+    bdb_config_ldbm = DSLdapObject(topo.standalone, DN_CONFIG_LDBM_BDB)
+    autosize_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize')
+    autosize_split_val = bdb_config_ldbm.get_attr_val('nsslapd-cache-autosize-split')
 
     log.info("Set nsslapd-cache-autosize-split to {}".format(invalid_value))
     with pytest.raises(ldap.UNWILLING_TO_PERFORM):

+ 6 - 3
dirsrvtests/tests/suites/config/config_test.py

@@ -15,7 +15,7 @@ from lib389.utils import *
 from lib389._constants import DN_CONFIG, DEFAULT_SUFFIX
 from lib389.idm.user import UserAccounts, TEST_USER_PROPERTIES
 from lib389.backend import *
-from lib389.config import LDBMConfig
+from lib389.config import LDBMConfig, BDB_LDBMConfig
 from lib389.cos import CosPointerDefinitions, CosTemplates
 from lib389.backend import Backends
 from lib389.monitor import MonitorLDBM
@@ -144,13 +144,16 @@ def test_config_deadlock_policy(topology_m2):
     default_val = b'9'
 
     ldbmconfig = LDBMConfig(topology_m2.ms["master1"])
+    bdbconfig = BDB_LDBMConfig(topology_m2.ms["master1"])
 
-    deadlock_policy = ldbmconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy')
+    deadlock_policy = bdbconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy')
     assert deadlock_policy == default_val
 
     # Try a range of valid values
-    for val in ('0', '5', '9'):
+    for val in (b'0', b'5', b'9'):
         ldbmconfig.replace('nsslapd-db-deadlock-policy', val)
+        deadlock_policy = bdbconfig.get_attr_val_bytes('nsslapd-db-deadlock-policy')
+        assert deadlock_policy == val
 
     # Try a range of invalid values
     for val in ('-1', '10'):

+ 1 - 693
ldap/servers/slapd/back-ldbm/ancestorid.c

@@ -15,699 +15,7 @@
 #include "back-ldbm.h"
 #include "import.h"
 
-static char *sourcefile = LDBM_ANCESTORID_STR;
-
-/* Start of definitions for a simple cache using a hash table */
-
-typedef struct id2idl
-{
-    ID keyid;
-    IDList *idl;
-    struct id2idl *next;
-} id2idl;
-
-static void id2idl_free(id2idl **ididl);
-static int id2idl_same_key(const void *ididl, const void *k);
-
-typedef Hashtable id2idl_hash;
-
-#define id2idl_new_hash(size) new_hash(size, HASHLOC(id2idl, next), NULL, id2idl_same_key)
-#define id2idl_hash_lookup(ht, key, he) find_hash(ht, key, sizeof(ID), (void **)(he))
-#define id2idl_hash_add(ht, key, he, alt) add_hash(ht, key, sizeof(ID), he, (void **)(alt))
-#define id2idl_hash_remove(ht, key) remove_hash(ht, key, sizeof(ID))
-
-static void id2idl_hash_destroy(id2idl_hash *ht);
-
-/* End of definitions for a simple cache using a hash table */
-
-static int ldbm_parentid(backend *be, DB_TXN *txn, ID id, ID *ppid);
-static int check_cache(id2idl_hash *ht);
-static IDList *idl_union_allids(backend *be, struct attrinfo *ai, IDList *a, IDList *b);
-static int ldbm_ancestorid_default_create_index(backend *be, ImportJob *job);
-static int ldbm_ancestorid_new_idl_create_index(backend *be, ImportJob *job);
-
-static int
-ldbm_get_nonleaf_ids(backend *be, DB_TXN *txn, IDList **idl, ImportJob *job)
-{
-    int ret = 0;
-    DB *db = NULL;
-    DBC *dbc = NULL;
-    DBT key = {0};
-    DBT data = {0};
-    struct attrinfo *ai = NULL;
-    IDList *nodes = NULL;
-    ID id;
-    int started_progress_logging = 0;
-    int key_count = 0;
-
-    /* Open the parentid index */
-    ainfo_get(be, LDBM_PARENTID_STR, &ai);
-
-    /* Open the parentid index file */
-    ret = dblayer_get_index_file(be, ai, &db, DBOPEN_CREATE);
-    if (ret != 0) {
-        ldbm_nasty("ldbm_get_nonleaf_ids", sourcefile, 13010, ret);
-        goto out;
-    }
-
-    /* Get a cursor so we can walk through the parentid */
-    ret = db->cursor(db, txn, &dbc, 0);
-    if (ret != 0) {
-        ldbm_nasty("ldbm_get_nonleaf_ids", sourcefile, 13020, ret);
-        goto out;
-    }
-    import_log_notice(job, SLAPI_LOG_INFO, "ldbm_get_nonleaf_ids", "Gathering ancestorid non-leaf IDs...");
-    /* For each key which is an equality key */
-    do {
-        ret = dbc->c_get(dbc, &key, &data, DB_NEXT_NODUP);
-        if ((ret == 0) && (*(char *)key.data == EQ_PREFIX)) {
-            id = (ID)strtoul((char *)key.data + 1, NULL, 10);
-            idl_insert(&nodes, id);
-        }
-        key_count++;
-        if (!(key_count % PROGRESS_INTERVAL)) {
-            import_log_notice(job, SLAPI_LOG_INFO, "ldbm_get_nonleaf_ids",
-                              "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
-                              (key_count * 100 / job->numsubordinates), key_count);
-            started_progress_logging = 1;
-        }
-    } while (ret == 0 && !(job->flags & FLAG_ABORT));
-
-    if (started_progress_logging) {
-        /* finish what we started logging */
-        import_log_notice(job, SLAPI_LOG_INFO, "ldbm_get_nonleaf_ids",
-                          "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
-                          (key_count * 100 / job->numsubordinates), key_count);
-    }
-    import_log_notice(job, SLAPI_LOG_INFO, "ldbm_get_nonleaf_ids",
-                      "Finished gathering ancestorid non-leaf IDs.");
-    /* Check for success */
-    if (ret == DB_NOTFOUND)
-        ret = 0;
-    if (ret != 0)
-        ldbm_nasty("ldbm_get_nonleaf_ids", sourcefile, 13030, ret);
-
-out:
-    /* Close the cursor */
-    if (dbc != NULL) {
-        if (ret == 0) {
-            ret = dbc->c_close(dbc);
-            if (ret != 0)
-                ldbm_nasty("ldbm_get_nonleaf_ids", sourcefile, 13040, ret);
-        } else {
-            (void)dbc->c_close(dbc);
-        }
-    }
-
-    /* Release the parentid file */
-    if (db != NULL) {
-        dblayer_release_index_file(be, ai, db);
-    }
-
-    /* Return the idlist */
-    if (ret == 0) {
-        *idl = nodes;
-        slapi_log_err(SLAPI_LOG_TRACE, "ldbm_get_nonleaf_ids", "Found %lu nodes for ancestorid\n",
-                      (u_long)IDL_NIDS(nodes));
-    } else {
-        idl_free(&nodes);
-        *idl = NULL;
-    }
-
-    return ret;
-}
-
-/*
- * XXX: This function creates ancestorid index, which is a sort of hack.
- *      This function handles idl directly,
- *      which should have been implemented in the idl file(s).
- *      When the idl code would be updated in the future,
- *      this function may also get affected.
- *      (see also bug#: 605535)
- *
- * Construct the ancestorid index. Requirements:
- * - The backend is read only.
- * - The parentid index is accurate.
- * - Non-leaf entries have IDs less than their descendants
- *   (guaranteed after a database import but not after a subtree move)
- *
- */
-int
-ldbm_ancestorid_create_index(backend *be, ImportJob *job)
-{
-    return (idl_get_idl_new()) ? ldbm_ancestorid_new_idl_create_index(be, job) : ldbm_ancestorid_default_create_index(be, job);
-}
-
-/*
- * Create the ancestorid index.  This version is safe to
- * use whichever IDL mode is active.  However, it may be
- * quite a bit slower than ldbm_ancestorid_new_idl_create_index()
- * when the new mode is used, particularly with large databases.
- */
-static int
-ldbm_ancestorid_default_create_index(backend *be, ImportJob *job)
-{
-    int key_count = 0;
-    int ret = 0;
-    DB *db_pid = NULL;
-    DB *db_aid = NULL;
-    DBT key = {0};
-    DB_TXN *txn = NULL;
-    struct attrinfo *ai_pid = NULL;
-    struct attrinfo *ai_aid = NULL;
-    char keybuf[24];
-    IDList *nodes = NULL;
-    IDList *children = NULL, *descendants = NULL;
-    NIDS nids;
-    ID id, parentid;
-    id2idl_hash *ht = NULL;
-    id2idl *ididl;
-    int started_progress_logging = 0;
-
-    /*
-     * We need to iterate depth-first through the non-leaf nodes
-     * in the tree amassing an idlist of descendant ids for each node.
-     * We would prefer to go through the parentid keys just once from
-     * highest id to lowest id but the btree ordering is by string
-     * rather than number. So we go through the parentid keys in btree
-     * order first of all to create an idlist of all the non-leaf nodes.
-     * Then we can use the idlist to iterate through parentid in the
-     * correct order.
-     */
-
-    /* Get the non-leaf node IDs */
-    ret = ldbm_get_nonleaf_ids(be, txn, &nodes, job);
-    if (ret != 0)
-        return ret;
-
-    /* Get the ancestorid index */
-    ainfo_get(be, LDBM_ANCESTORID_STR, &ai_aid);
-
-    /* Prevent any other use of the index */
-    ai_aid->ai_indexmask |= INDEX_OFFLINE;
-
-    /* Open the ancestorid index file */
-    ret = dblayer_get_index_file(be, ai_aid, &db_aid, DBOPEN_CREATE);
-    if (ret != 0) {
-        ldbm_nasty("ldbm_ancestorid_default_create_index", sourcefile, 13050, ret);
-        goto out;
-    }
-
-    /* Maybe nothing to do */
-    if (nodes == NULL || nodes->b_nids == 0) {
-        slapi_log_err(SLAPI_LOG_ERR, "ldbm_ancestorid_default_create_index",
-                      "Nothing to do to build ancestorid index\n");
-        goto out;
-    }
-
-    /* Create an ancestorid cache */
-    ht = id2idl_new_hash(nodes->b_nids);
-
-    /* Get the parentid index */
-    ainfo_get(be, LDBM_PARENTID_STR, &ai_pid);
-
-    /* Open the parentid index file */
-    ret = dblayer_get_index_file(be, ai_pid, &db_pid, DBOPEN_CREATE);
-    if (ret != 0) {
-        ldbm_nasty("ldbm_ancestorid_default_create_index", sourcefile, 13060, ret);
-        goto out;
-    }
-
-    /* Initialize key DBT */
-    key.data = keybuf;
-    key.ulen = sizeof(keybuf);
-    key.flags = DB_DBT_USERMEM;
-
-    import_log_notice(job, SLAPI_LOG_INFO, "ldbm_ancestorid_default_create_index",
-                      "Creating ancestorid index (old idl)...");
-    /* Iterate from highest to lowest ID */
-    nids = nodes->b_nids;
-    do {
-
-        nids--;
-        id = nodes->b_ids[nids];
-
-        /* Get immediate children from parentid index */
-        key.size = PR_snprintf(key.data, key.ulen, "%c%lu",
-                               EQ_PREFIX, (u_long)id);
-        key.size++; /* include the null terminator */
-        ret = NEW_IDL_NO_ALLID;
-        children = idl_fetch(be, db_pid, &key, txn, ai_pid, &ret);
-        if (ret != 0) {
-            ldbm_nasty("ldbm_ancestorid_default_create_index", sourcefile, 13070, ret);
-            break;
-        }
-
-        /* check if we need to abort */
-        if (job->flags & FLAG_ABORT) {
-            import_log_notice(job, SLAPI_LOG_ERR, "ldbm_ancestorid_default_create_index",
-                              "ancestorid creation aborted.");
-            ret = -1;
-            break;
-        }
-
-        key_count++;
-        if (!(key_count % PROGRESS_INTERVAL)) {
-            import_log_notice(job, SLAPI_LOG_INFO, "ldbm_ancestorid_default_create_index",
-                              "Creating ancestorid index: processed %d%% (ID count %d)",
-                              (key_count * 100 / job->numsubordinates), key_count);
-            started_progress_logging = 1;
-        }
-
-        /* Insert into ancestorid for this node */
-        if (id2idl_hash_lookup(ht, &id, &ididl)) {
-            descendants = idl_union_allids(be, ai_aid, ididl->idl, children);
-            idl_free(&children);
-            if (id2idl_hash_remove(ht, &id) == 0) {
-                slapi_log_err(SLAPI_LOG_ERR, "ldbm_ancestorid_default_create_index",
-                              "id2idl_hash_remove() failed\n");
-            } else {
-                id2idl_free(&ididl);
-            }
-        } else {
-            descendants = children;
-        }
-        ret = idl_store_block(be, db_aid, &key, descendants, txn, ai_aid);
-        if (ret != 0)
-            break;
-
-        /* Get parentid for this entry */
-        ret = ldbm_parentid(be, txn, id, &parentid);
-        if (ret != 0) {
-            idl_free(&descendants);
-            break;
-        }
-
-        /* A suffix entry does not have a parent */
-        if (parentid == NOID) {
-            idl_free(&descendants);
-            continue;
-        }
-
-        /* Insert into ancestorid for this node's parent */
-        if (id2idl_hash_lookup(ht, &parentid, &ididl)) {
-            IDList *idl = idl_union_allids(be, ai_aid, ididl->idl, descendants);
-            idl_free(&descendants);
-            idl_free(&(ididl->idl));
-            ididl->idl = idl;
-        } else {
-            ididl = (id2idl *)slapi_ch_calloc(1, sizeof(id2idl));
-            ididl->keyid = parentid;
-            ididl->idl = descendants;
-            if (id2idl_hash_add(ht, &parentid, ididl, NULL) == 0) {
-                slapi_log_err(SLAPI_LOG_ERR, "ldbm_ancestorid_default_create_index ",
-                              "id2idl_hash_add failed\n");
-            }
-        }
-
-    } while (nids > 0);
-
-    if (ret != 0) {
-        goto out;
-    }
-
-    /* We're expecting the cache to be empty */
-    ret = check_cache(ht);
-
-out:
-
-    /* Destroy the cache */
-    id2idl_hash_destroy(ht);
-
-    /* Free any leftover idlists */
-    idl_free(&nodes);
-
-    /* Release the parentid file */
-    if (db_pid != NULL) {
-        dblayer_release_index_file(be, ai_pid, db_pid);
-    }
-
-    /* Release the ancestorid file */
-    if (db_aid != NULL) {
-        dblayer_release_index_file(be, ai_aid, db_aid);
-    }
-
-    /* Enable the index */
-    if (ret == 0) {
-        if (started_progress_logging) {
-            /* finish what we started logging */
-            import_log_notice(job, SLAPI_LOG_INFO, "ldbm_ancestorid_default_create_index",
-                              "Creating ancestorid index: processed %d%% (ID count %d)",
-                              (key_count * 100 / job->numsubordinates), key_count);
-        }
-        import_log_notice(job, SLAPI_LOG_INFO, "ldbm_ancestorid_default_create_index",
-                          "Created ancestorid index (old idl).");
-        ai_aid->ai_indexmask &= ~INDEX_OFFLINE;
-    }
-
-    return ret;
-}
-
-/*
- * Create the ancestorid index.  This version expects to use
- * idl_new_store_block() and should be used when idl_new != 0.
- * It has lower overhead and can be faster than
- * ldbm_ancestorid_default_create_index(), particularly on
- * large databases.  Cf. bug 469800.
- */
-static int
-ldbm_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
-{
-    int key_count = 0;
-    int ret = 0;
-    DB *db_pid = NULL;
-    DB *db_aid = NULL;
-    DBT key = {0};
-    DB_TXN *txn = NULL;
-    struct attrinfo *ai_pid = NULL;
-    struct attrinfo *ai_aid = NULL;
-    char keybuf[24];
-    IDList *nodes = NULL;
-    IDList *children = NULL;
-    NIDS nids;
-    ID id, parentid;
-    int started_progress_logging = 0;
-
-    /*
-     * We need to iterate depth-first through the non-leaf nodes
-     * in the tree amassing an idlist of descendant ids for each node.
-     * We would prefer to go through the parentid keys just once from
-     * highest id to lowest id but the btree ordering is by string
-     * rather than number. So we go through the parentid keys in btree
-     * order first of all to create an idlist of all the non-leaf nodes.
-     * Then we can use the idlist to iterate through parentid in the
-     * correct order.
-     */
-
-    /* Bail now if we did not get here honestly. */
-    if (!idl_get_idl_new()) {
-        slapi_log_err(SLAPI_LOG_ERR, "ldbm_ancestorid_new_idl_create_index",
-                      "Cannot create ancestorid index.  "
-                      "New IDL version called but idl_new is false!\n");
-        return 1;
-    }
-
-    /* Get the non-leaf node IDs */
-    ret = ldbm_get_nonleaf_ids(be, txn, &nodes, job);
-    if (ret != 0)
-        return ret;
-
-    /* Get the ancestorid index */
-    ainfo_get(be, LDBM_ANCESTORID_STR, &ai_aid);
-
-    /* Prevent any other use of the index */
-    ai_aid->ai_indexmask |= INDEX_OFFLINE;
-
-    /* Open the ancestorid index file */
-    ret = dblayer_get_index_file(be, ai_aid, &db_aid, DBOPEN_CREATE);
-    if (ret != 0) {
-        ldbm_nasty("ldbm_ancestorid_new_idl_create_index", sourcefile, 13050, ret);
-        goto out;
-    }
-
-    /* Maybe nothing to do */
-    if (nodes == NULL || nodes->b_nids == 0) {
-        slapi_log_err(SLAPI_LOG_ERR, "ldbm_ancestorid_new_idl_create_index",
-                      "Nothing to do to build ancestorid index\n");
-        goto out;
-    }
-
-    /* Get the parentid index */
-    ainfo_get(be, LDBM_PARENTID_STR, &ai_pid);
-
-    /* Open the parentid index file */
-    ret = dblayer_get_index_file(be, ai_pid, &db_pid, DBOPEN_CREATE);
-    if (ret != 0) {
-        ldbm_nasty("ldbm_ancestorid_new_idl_create_index", sourcefile, 13060, ret);
-        goto out;
-    }
-
-    /* Initialize key DBT */
-    key.data = keybuf;
-    key.ulen = sizeof(keybuf);
-    key.flags = DB_DBT_USERMEM;
-
-    import_log_notice(job, SLAPI_LOG_INFO, "ldbm_ancestorid_new_idl_create_index",
-                      "Creating ancestorid index (new idl)...");
-    /* Iterate from highest to lowest ID */
-    nids = nodes->b_nids;
-    do {
-
-        nids--;
-        id = nodes->b_ids[nids];
-
-        /* Get immediate children from parentid index */
-        key.size = PR_snprintf(key.data, key.ulen, "%c%lu",
-                               EQ_PREFIX, (u_long)id);
-        key.size++; /* include the null terminator */
-        ret = NEW_IDL_NO_ALLID;
-        children = idl_fetch(be, db_pid, &key, txn, ai_pid, &ret);
-        if (ret != 0) {
-            ldbm_nasty("ldbm_ancestorid_new_idl_create_index", sourcefile, 13070, ret);
-            break;
-        }
-
-        /* check if we need to abort */
-        if (job->flags & FLAG_ABORT) {
-            import_log_notice(job, SLAPI_LOG_ERR, "ldbm_ancestorid_new_idl_create_index",
-                              "ancestorid creation aborted.");
-            ret = -1;
-            break;
-        }
-
-        key_count++;
-        if (!(key_count % PROGRESS_INTERVAL)) {
-            import_log_notice(job, SLAPI_LOG_INFO, "ldbm_ancestorid_new_idl_create_index",
-                              "Creating ancestorid index: progress %d%% (ID count %d)",
-                              (key_count * 100 / job->numsubordinates), key_count);
-            started_progress_logging = 1;
-        }
-
-        /* Instead of maintaining a full accounting of IDs in a hashtable
-         * as is done with ldbm_ancestorid_default_create_index(), perform
-         * incremental updates straight to the DB with idl_new_store_block()
-         * (used by idl_store_block() when idl_get_idl_new() is true).  This
-         * can be a significant performance improvement with large databases,
-         * where  the overhead of maintaining and copying the lists is very
-         * expensive, particularly when the allids threshold is not being
-         * used to provide any cut off.  Cf. bug 469800.
-         * TEL 20081029 */
-
-        /* Insert into ancestorid for this node */
-        ret = idl_store_block(be, db_aid, &key, children, txn, ai_aid);
-        if (ret != 0) {
-            idl_free(&children);
-            break;
-        }
-
-        /* Get parentid(s) for this entry */
-        while (1) {
-            ret = ldbm_parentid(be, txn, id, &parentid);
-            if (ret != 0) {
-                slapi_log_err(SLAPI_LOG_ERR, "ldbm_ancestorid_new_idl_create_index",
-                              "Failure: ldbm_parentid on node index [" ID_FMT "] of [" ID_FMT "]\n",
-                              nids, nodes->b_nids);
-                idl_free(&children);
-                goto out;
-            }
-
-            /* A suffix entry does not have a parent */
-            if (parentid == NOID) {
-                idl_free(&children);
-                break;
-            }
-
-            /* Reset the key to the parent id */
-            key.size = PR_snprintf(key.data, key.ulen, "%c%lu",
-                                   EQ_PREFIX, (u_long)parentid);
-            key.size++;
-
-            /* Insert into ancestorid for this node's parent */
-            ret = idl_store_block(be, db_aid, &key, children, txn, ai_aid);
-            if (ret != 0) {
-                idl_free(&children);
-                goto out;
-            }
-            id = parentid;
-        }
-    } while (nids > 0);
-
-    if (ret != 0) {
-        goto out;
-    }
-
-out:
-    if (ret == 0) {
-        if (started_progress_logging) {
-            /* finish what we started logging */
-            import_log_notice(job, SLAPI_LOG_INFO, "ldbm_ancestorid_new_idl_create_index",
-                              "Creating ancestorid index: processed %d%% (ID count %d)",
-                              (key_count * 100 / job->numsubordinates), key_count);
-        }
-        import_log_notice(job, SLAPI_LOG_INFO, "ldbm_ancestorid_new_idl_create_index",
-                          "Created ancestorid index (new idl).");
-    } else {
-        slapi_log_err(SLAPI_LOG_ERR, "ldbm_ancestorid_new_idl_create_index",
-                      "Failed to create ancestorid index\n");
-    }
-
-    /* Free any leftover idlists */
-    idl_free(&nodes);
-
-    /* Release the parentid file */
-    if (db_pid != NULL) {
-        dblayer_release_index_file(be, ai_pid, db_pid);
-    }
-
-    /* Release the ancestorid file */
-    if (db_aid != NULL) {
-        dblayer_release_index_file(be, ai_aid, db_aid);
-    }
-
-    /* Enable the index */
-    if (ret == 0) {
-        ai_aid->ai_indexmask &= ~INDEX_OFFLINE;
-    }
-
-    return ret;
-}
-
-
-/*
- * Get parentid of an id by reading the operational attr from id2entry.
- */
-static int
-ldbm_parentid(backend *be, DB_TXN *txn, ID id, ID *ppid)
-{
-    int ret = 0;
-    DB *db = NULL;
-    DBT key = {0};
-    DBT data = {0};
-    ID stored_id;
-    char *p;
-
-    /* Open the id2entry file */
-    ret = dblayer_get_id2entry(be, &db);
-    if (ret != 0) {
-        ldbm_nasty("ldbm_parentid", sourcefile, 13100, ret);
-        goto out;
-    }
-
-    /* Initialize key and data DBTs */
-    id_internal_to_stored(id, (char *)&stored_id);
-    key.data = (char *)&stored_id;
-    key.size = sizeof(stored_id);
-    key.flags = DB_DBT_USERMEM;
-    data.flags = DB_DBT_MALLOC;
-
-    /* Read id2entry */
-    ret = db->get(db, txn, &key, &data, 0);
-    if (ret != 0) {
-        ldbm_nasty("ldbm_parentid", sourcefile, 13110, ret);
-        slapi_log_err(SLAPI_LOG_ERR, "ldbm_parentid",
-                      "Unable to find entry id [" ID_FMT "] (original [" ID_FMT "])"
-                      " in id2entry\n",
-                      stored_id, id);
-        goto out;
-    }
-
-/* Extract the parentid value */
-#define PARENTID_STR "\nparentid:"
-    p = strstr(data.data, PARENTID_STR);
-    if (p == NULL) {
-        *ppid = NOID;
-        goto out;
-    }
-    *ppid = strtoul(p + strlen(PARENTID_STR), NULL, 10);
-
-out:
-    /* Free the entry value */
-    slapi_ch_free(&(data.data));
-
-    /* Release the id2entry file */
-    if (db != NULL) {
-        dblayer_release_id2entry(be, db);
-    }
-    return ret;
-}
-
-static void
-id2idl_free(id2idl **ididl)
-{
-    idl_free(&((*ididl)->idl));
-    slapi_ch_free((void **)ididl);
-}
-
-static int
-id2idl_same_key(const void *ididl, const void *k)
-{
-    return (((id2idl *)ididl)->keyid == *(ID *)k);
-}
-
-static int
-check_cache(id2idl_hash *ht)
-{
-    id2idl *e;
-    u_long i, found = 0;
-    int ret = 0;
-
-    if (ht == NULL)
-        return 0;
-
-    for (i = 0; i < ht->size; i++) {
-        e = (id2idl *)ht->slot[i];
-        while (e) {
-            found++;
-            e = e->next;
-        }
-    }
-
-    if (found > 0) {
-        slapi_log_err(SLAPI_LOG_ERR, "check_cache",
-                      "parentid index is not complete (%lu extra keys in ancestorid cache)\n", found);
-        ret = -1;
-    }
-
-    return ret;
-}
-
-static void
-id2idl_hash_destroy(id2idl_hash *ht)
-{
-    u_long i;
-    id2idl *e, *next;
-
-    if (ht == NULL)
-        return;
-
-    for (i = 0; i < ht->size; i++) {
-        e = (id2idl *)ht->slot[i];
-        while (e) {
-            next = e->next;
-            id2idl_free(&e);
-            e = next;
-        }
-    }
-    slapi_ch_free((void **)&ht);
-}
-
-/*
- * idl_union_allids - return a union b
- * takes attr index allids setting into account
- */
-static IDList *
-idl_union_allids(backend *be, struct attrinfo *ai, IDList *a, IDList *b)
-{
-    if (!idl_get_idl_new()) {
-        if (a != NULL && b != NULL) {
-            if (ALLIDS(a) || ALLIDS(b) ||
-                (IDL_NIDS(a) + IDL_NIDS(b) > idl_get_allidslimit(ai, 0))) {
-                return (idl_allids(be));
-            }
-        }
-    }
-    return idl_union(be, a, b);
-}
+static char *sourcefile = "ancestorid.c";
 
 static int
 ancestorid_addordel(

+ 19 - 18
ldap/servers/slapd/back-ldbm/archive.c

@@ -21,20 +21,15 @@ ldbm_back_archive2ldbm(Slapi_PBlock *pb)
     struct ldbminfo *li;
     char *rawdirectory = NULL; /* -a <directory> */
     char *directory = NULL;    /* normalized */
-    char *backendname = NULL;
     int return_value = -1;
     int task_flags = 0;
     int run_from_cmdline = 0;
     Slapi_Task *task;
     int is_old_to_new = 0;
     ldbm_instance *inst = NULL;
-    char *dbversion = NULL;
-    char *dataversion = NULL;
-    int value = 0;
 
     slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
     slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawdirectory);
-    slapi_pblock_get(pb, SLAPI_BACKEND_INSTANCE_NAME, &backendname);
     slapi_pblock_get(pb, SLAPI_BACKEND_TASK, &task);
     slapi_pblock_get(pb, SLAPI_TASK_FLAGS, &task_flags);
     li->li_flags = run_from_cmdline = (task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE);
@@ -45,6 +40,7 @@ ldbm_back_archive2ldbm(Slapi_PBlock *pb)
     }
 
     directory = rel2abspath(rawdirectory);
+    /* skip check for version and idl upgrade
     return_value = dbversion_read(li, directory, &dbversion, &dataversion);
     if (return_value) {
         if (ENOENT == return_value) {
@@ -55,8 +51,8 @@ ldbm_back_archive2ldbm(Slapi_PBlock *pb)
         slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_archive2ldbm",
                       "Unable to read dbversion file in %s\n", directory);
     }
-
-    /* check the current idl format vs backup DB version */
+*/
+    /* check the current idl format vs backup DB version
     if (idl_get_idl_new()) {
         value = lookup_dbversion(dbversion, DBVERSION_TYPE);
         if (value & DBVERSION_OLD_IDL) {
@@ -65,11 +61,16 @@ ldbm_back_archive2ldbm(Slapi_PBlock *pb)
     }
     slapi_ch_free_string(&dbversion);
     slapi_ch_free_string(&dataversion);
+*/
 
     /* No ldbm be's exist until we process the config information. */
     if (run_from_cmdline) {
         mapping_tree_init();
-        ldbm_config_load_dse_info(li);
+
+        if (dblayer_setup(li)) {
+            slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_init", "dblayer_setup failed\n");
+            return -1;
+        }
 
         /* initialize a restore file to be able to detect a startup after restore */
         if (dblayer_restore_file_init(li)) {
@@ -77,14 +78,6 @@ ldbm_back_archive2ldbm(Slapi_PBlock *pb)
             return -1;
         }
     }
-    if (backendname) {
-        inst = ldbm_instance_find_by_name(li, backendname);
-        if (NULL == inst) {
-            slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_archive2ldbm", "Backend \"%s\" does not exist.\n",
-                          backendname);
-            return -1;
-        }
-    }
     if (!run_from_cmdline) {
         Object *inst_obj, *inst_obj2;
 
@@ -162,7 +155,7 @@ ldbm_back_archive2ldbm(Slapi_PBlock *pb)
     }
 
     /* tell the database to restore */
-    return_value = dblayer_restore(li, directory, task, backendname);
+    return_value = dblayer_restore(li, directory, task);
     if (0 != return_value) {
         slapi_log_err(SLAPI_LOG_ERR,
                       "ldbm_back_archive2ldbm", "Failed to read backup file set. "
@@ -286,8 +279,16 @@ ldbm_back_ldbm2archive(Slapi_PBlock *pb)
     /* start the database code up, do not attempt to perform recovery */
     if (run_from_cmdline) {
         /* No ldbm be's exist until we process the config information. */
+
+    /* copied here, need better solution */
+    /* initialize dblayer  */
+        if (dblayer_setup(li)) {
+            slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_init", "dblayer_setup failed\n");
+            goto out;
+        }
+
         mapping_tree_init();
-        ldbm_config_load_dse_info(li);
+
         if (0 != (return_value =
                       dblayer_start(li,
                                     DBLAYER_ARCHIVE_MODE | DBLAYER_NO_DBTHREADS_MODE))) {

+ 4 - 4
ldap/servers/slapd/back-ldbm/back-ldbm.h

@@ -388,7 +388,6 @@ struct cache
 
 /* various modules keep private data inside the attrinfo structure */
 typedef struct dblayer_private     dblayer_private;
-typedef struct dblayer_private_env dblayer_private_env;
 typedef struct idl_private         idl_private;
 typedef struct attrcrypt_private   attrcrypt_private;
 
@@ -569,11 +568,11 @@ struct ldbminfo
                                          * use this as the entry cache size (0 = autosize off) */
     uint64_t li_dncache_autosize_ec;    /* Same as above, but dncache. */
     uint64_t li_import_cachesize;       /* size of the mpool for imports */
-    PRLock *li_dbcache_mutex;
-    PRCondVar *li_dbcache_cv;
     int li_shutdown;                     /* flag to tell any BE threads to end */
     PRLock *li_shutdown_mutex;           /* protect shutdown flag */
     dblayer_private *li_dblayer_private; /* session ptr for databases */
+    void *li_dblayer_config;             /* pointer to specific backend implementation */
+    char *li_backend_implement;          /* low layer backend implementation */
     int li_noparentcheck;                /* check if parent exists on add */
 
     /* the next 3 fields are for the params that don't get changed until
@@ -754,7 +753,7 @@ typedef struct ldbm_instance
     long inst_cache_misses;
 
     char *inst_dataversion;          /* The user data version tag.  Used by replication. */
-    dblayer_private_env *import_env; /* use a different DB_ENV for imports */
+    void *inst_db;                   /* implementation specific instance data */
     int require_index;               /* set to 1 to require an index be used in search */
     struct cache inst_dncache;       /* The dn cache for this instance. */
 } ldbm_instance;
@@ -871,4 +870,5 @@ typedef struct _back_search_result_set
 #define DBT_EQ(L, R) \
     ((L)->dsize == (R)->dsize && !memcmp((L)->dptr, (R)->dptr, (L)->dsize))
 
+typedef int backend_implement_init_fn(struct ldbminfo *li, config_info *config_array);
 #endif /* _back_ldbm_h_ */

+ 7 - 6
ldap/servers/slapd/back-ldbm/cleanup.c

@@ -14,6 +14,7 @@
 /* cleanup.c - cleans up ldbm backend */
 
 #include "back-ldbm.h"
+#include "dblayer.h"
 
 int
 ldbm_back_cleanup(Slapi_PBlock *pb)
@@ -48,18 +49,18 @@ ldbm_back_cleanup(Slapi_PBlock *pb)
      * We check if li is NULL. Because of an issue in how we create backends
      * we share the li and plugin info between many unique backends. This causes
      * be_cleanall to try to trigger this multiple times. But we don't need to!
-     * dblayer_terminate is sufficent to be called once for each instance of
+     * the backend cleanup is sufficent to be called once for each instance of
      * ldbminfo. This protects us from heap use after frees while still cleaning
      * up. Ultimately, it's a flaw in how ldbm can have many backends, but for
      * "one" plugin.
      */
     if (li != NULL) {
 
-        dblayer_terminate(li);
-
-        /* JCM I tried adding this to tidy up memory on shutdown. */
-        /* JCM But, the result was very messy. */
-        objset_delete(&(li->li_instance_set));
+        /* call the backend specific cleanup function */
+        dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
+        if (priv) {
+            priv->dblayer_cleanup_fn(li);
+        }
 
         ldbm_config_destroy(li);
 

+ 2207 - 0
ldap/servers/slapd/back-ldbm/db-bdb/bdb_config.c

@@ -0,0 +1,2207 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (C) 2019 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+/* bdb_config.c - Handles configuration information that is specific to a BDB backend instance. */
+
+#include "bdb_layer.h"
+
+/* Forward declarations */
+static int parse_bdb_config_entry(struct ldbminfo *li, Slapi_Entry *e, config_info *config_array);
+static void split_bdb_config_entry(struct ldbminfo *li, Slapi_Entry *ldbm_conf_e,Slapi_Entry *bdb_conf_e, config_info *config_array, Slapi_Mods *smods);
+
+/* Forward callback declarations */
+int bdb_config_search_entry_callback(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg);
+int bdb_config_modify_entry_callback(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg);
+
+static int
+_bdb_log_version(bdb_config *priv)
+{
+    int major, minor = 0;
+    char *string = 0;
+    int ret = 0;
+
+    string = db_version(&major, &minor, NULL);
+    priv->bdb_lib_version = DBLAYER_LIB_VERSION_POST_24;
+    slapi_log_err(SLAPI_LOG_TRACE, "_dblayer_check_version", "version check: %s (%d.%d)\n", string, major, minor);
+    return ret;
+}
+
+int bdb_init(struct ldbminfo *li, config_info *config_array)
+{
+    bdb_config *conf = (bdb_config *)slapi_ch_calloc(1, sizeof(bdb_config));
+    if (NULL == conf) {
+        /* Memory allocation failed */
+        return -1;
+    }
+
+    li->li_dblayer_config = conf;
+    bdb_config_setup_default(li);
+    _bdb_log_version(conf);
+
+    /* write DBVERSION file if one does not exist
+    char *home_dir = bdb_get_home_dir(li, NULL);
+    if (!bdb_version_exists(li, home_dir)) {
+        bdb_version_write(li, home_dir, NULL, DBVERSION_ALL);
+    }
+    */
+
+    dblayer_private *priv = li->li_dblayer_private;
+    priv->dblayer_start_fn = &bdb_start;
+    priv->dblayer_close_fn = &bdb_close; 
+    priv->dblayer_instance_start_fn = &bdb_instance_start;
+    priv->dblayer_backup_fn = &bdb_backup; 
+    priv->dblayer_verify_fn = &bdb_verify; 
+    priv->dblayer_db_size_fn = &bdb_db_size; 
+    priv->dblayer_ldif2db_fn = &bdb_ldif2db; 
+    priv->dblayer_db2ldif_fn = &bdb_db2ldif; 
+    priv->dblayer_db2index_fn = &bdb_db2index; 
+    priv->dblayer_cleanup_fn = &bdb_cleanup; 
+    priv->dblayer_upgradedn_fn = &bdb_upgradednformat; 
+    priv->dblayer_upgradedb_fn = &bdb_upgradedb; 
+    priv->dblayer_restore_fn = &bdb_restore;
+    priv->dblayer_txn_begin_fn = &bdb_txn_begin; 
+    priv->dblayer_txn_commit_fn = &bdb_txn_commit; 
+    priv->dblayer_txn_abort_fn = &bdb_txn_abort; 
+    priv->dblayer_get_info_fn = &bdb_get_info;
+    priv->dblayer_set_info_fn = &bdb_set_info;
+    priv->dblayer_back_ctrl_fn = &bdb_back_ctrl; 
+    priv->dblayer_get_db_fn = &bdb_get_db; 
+    priv->dblayer_rm_db_file_fn = &bdb_rm_db_file;
+    priv->dblayer_delete_db_fn = &bdb_delete_db;
+    priv->dblayer_import_fn = &bdb_import_main;
+    priv->dblayer_load_dse_fn = &bdb_config_load_dse_info;
+    priv->dblayer_config_get_fn = &bdb_public_config_get;
+    priv->dblayer_config_set_fn = &bdb_public_config_set;
+    priv->instance_config_set_fn = &bdb_instance_config_set;
+    priv->instance_add_config_fn = &bdb_instance_add_instance_entry_callback;
+    priv->instance_postadd_config_fn = &bdb_instance_postadd_instance_entry_callback;
+    priv->instance_del_config_fn = &bdb_instance_delete_instance_entry_callback;
+    priv->instance_postdel_config_fn = &bdb_instance_post_delete_instance_entry_callback;
+    priv->instance_cleanup_fn = &bdb_instance_cleanup;
+    priv->instance_create_fn = &bdb_instance_create;
+    priv->instance_search_callback_fn = &bdb_instance_search_callback;
+    priv->dblayer_auto_tune_fn = &bdb_start_autotune;
+    return 0;
+}
+
+/* Used to add an array of entries, like the one above and
+ * bdb_instance_skeleton_entries in bdb_instance_config.c, to the dse.
+ * Returns 0 on success.
+ */
+int
+bdb_config_add_dse_entries(struct ldbminfo *li, char **entries, char *string1, char *string2, char *string3, int flags)
+{
+    int x;
+    Slapi_Entry *e;
+    Slapi_PBlock *util_pb = NULL;
+    int rc;
+    int result;
+    char entry_string[512];
+    int dont_write_file = 0;
+    char ebuf[BUFSIZ];
+
+    if (flags & LDBM_INSTANCE_CONFIG_DONT_WRITE) {
+        dont_write_file = 1;
+    }
+
+    for (x = 0; strlen(entries[x]) > 0; x++) {
+        util_pb = slapi_pblock_new();
+        PR_snprintf(entry_string, 512, entries[x], string1, string2, string3);
+        e = slapi_str2entry(entry_string, 0);
+        PL_strncpyz(ebuf, slapi_entry_get_dn_const(e), sizeof(ebuf)); /* for logging */
+        slapi_add_entry_internal_set_pb(util_pb, e, NULL, li->li_identity, 0);
+        slapi_pblock_set(util_pb, SLAPI_DSE_DONT_WRITE_WHEN_ADDING,
+                         &dont_write_file);
+        rc = slapi_add_internal_pb(util_pb);
+        slapi_pblock_get(util_pb, SLAPI_PLUGIN_INTOP_RESULT, &result);
+        if (!rc && (result == LDAP_SUCCESS)) {
+            slapi_log_err(SLAPI_LOG_CONFIG, "bdb_config_add_dse_entries", "Added database config entry [%s]\n", ebuf);
+        } else if (result == LDAP_ALREADY_EXISTS) {
+            slapi_log_err(SLAPI_LOG_TRACE, "bdb_config_add_dse_entries", "Database config entry [%s] already exists - skipping\n", ebuf);
+        } else {
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_add_dse_entries",
+                          "Unable to add config entry [%s] to the DSE: %d %d\n",
+                          ebuf, result, rc);
+        }
+        slapi_pblock_destroy(util_pb);
+    }
+
+    return 0;
+}
+
+/* used to add a single entry, special case of above */
+int
+bdb_config_add_dse_entry(struct ldbminfo *li, char *entry, int flags)
+{
+    char *entries[] = {"%s", ""};
+
+    return bdb_config_add_dse_entries(li, entries, entry, NULL, NULL, flags);
+}
+
+
+/*------------------------------------------------------------------------
+ * Get and set functions for bdb variables
+ *----------------------------------------------------------------------*/
+
+static void *
+bdb_config_db_lock_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)li->li_new_dblock);
+}
+
+
+static int
+bdb_config_db_lock_set(void *arg, void *value, char *errorbuf, int phase, int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    uint64_t val = (uint64_t)((uintptr_t)value);
+
+    if (val < BDB_LOCK_NB_MIN) {
+        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: Invalid value for %s (%d). Must be greater than %d\n",
+                              CONFIG_DB_LOCK, val, BDB_LOCK_NB_MIN);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_lock_set", "Invalid value for %s (%" PRIu64 ")\n",
+                      CONFIG_DB_LOCK, val);
+        return LDAP_UNWILLING_TO_PERFORM;
+    }
+    if (apply) {
+        if (CONFIG_PHASE_RUNNING == phase) {
+            li->li_new_dblock = val;
+            slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_db_lock_set",
+                          "New db max lock count will not take affect until the server is restarted\n");
+        } else {
+            li->li_new_dblock = val;
+            li->li_dblock = val;
+        }
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_dbcachesize_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)li->li_new_dbcachesize);
+}
+
+static int
+bdb_config_dbcachesize_set(void *arg, void *value, char *errorbuf, int phase, int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    uint64_t val = (size_t)value;
+    uint64_t delta = (size_t)value;
+
+    /* There is an error here. We check the new val against our current mem-alloc
+     * Issue is that we already are using system pages, so while our value *might*
+     * be valid, we may reject it here due to the current procs page usage.
+     *
+     * So how do we solve this? If we are setting a SMALLER value than we
+     * currently have ALLOW it, because we already passed the cache sanity.
+     * If we are setting a LARGER value, we check the delta of the two, and make
+     * sure that it is sane.
+     */
+
+/* Stop the user configuring a stupidly small cache */
+/* min: 8KB (page size) * def thrd cnts (threadnumber==20). */
+#define DBDEFMINSIZ 500000
+    /* We allow a value of 0, because the autotuning in start.c will
+     * register that, and trigger the recalculation of the dbcachesize as
+     * needed on the next start up.
+     */
+    if (val < DBDEFMINSIZ && val > 0) {
+        slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_dbcachesize_set", "cache too small, increasing to %dK bytes\n",
+                      DBDEFMINSIZ / 1000);
+        val = DBDEFMINSIZ;
+    } else if (val > li->li_dbcachesize) {
+        delta = val - li->li_dbcachesize;
+
+        util_cachesize_result sane;
+        slapi_pal_meminfo *mi = spal_meminfo_get();
+        sane = util_is_cachesize_sane(mi, &delta);
+        spal_meminfo_destroy(mi);
+
+        if (sane != UTIL_CACHESIZE_VALID) {
+            slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: nsslapd-dbcachesize value is too large.");
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_dbcachesize_set",
+                          "nsslapd-dbcachesize value is too large.\n");
+            return LDAP_UNWILLING_TO_PERFORM;
+        }
+    }
+
+    if (CONFIG_PHASE_RUNNING == phase) {
+        if (val > 0 && li->li_cache_autosize) {
+            /* We are auto-tuning the cache, so this change would be overwritten - return an error */
+            slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+                                  "Error: \"nsslapd-dbcachesize\" can not be updated while \"nsslapd-cache-autosize\" is set "
+                                  "in \"cn=config,cn=ldbm database,cn=plugins,cn=config\".");
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_dbcachesize_set",
+                          "\"nsslapd-dbcachesize\" can not be set while \"nsslapd-cache-autosize\" is set "
+                          "in \"cn=config,cn=ldbm database,cn=plugins,cn=config\".\n");
+            return LDAP_UNWILLING_TO_PERFORM;
+        }
+    }
+
+    if (apply) {
+        if (CONFIG_PHASE_RUNNING == phase) {
+            li->li_new_dbcachesize = val;
+            if (val == 0) {
+                slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_dbcachesize_set", "cache size reset to 0, will be autosized on next startup.\n");
+            } else {
+                slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_dbcachesize_set", "New db cache size will not take affect until the server is restarted\n");
+            }
+        } else {
+            li->li_new_dbcachesize = val;
+            li->li_dbcachesize = val;
+        }
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_maxpassbeforemerge_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(li->li_maxpassbeforemerge));
+}
+
+static int
+bdb_config_maxpassbeforemerge_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (val < 0) {
+        slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_maxpassbeforemerge_set",
+                      "maxpassbeforemerge will not take negative value - setting to 100\n");
+        val = 100;
+    }
+
+    if (apply) {
+        li->li_maxpassbeforemerge = val;
+    }
+
+    return retval;
+}
+
+
+static void *
+bdb_config_dbncache_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(li->li_new_dbncache));
+}
+
+static int
+bdb_config_dbncache_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase, int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    size_t val = (size_t)((uintptr_t)value);
+
+    if (apply) {
+
+        if (CONFIG_PHASE_RUNNING == phase) {
+            li->li_new_dbncache = val;
+            slapi_log_err(SLAPI_LOG_NOTICE, "bdb_config_dbncache_set",
+                          "New nsslapd-dbncache will not take affect until the server is restarted\n");
+        } else {
+            li->li_new_dbncache = val;
+            li->li_dbncache = val;
+        }
+    }
+
+    return retval;
+}
+
+void *
+bdb_config_db_logdirectory_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    /* Remember get functions of type string need to return
+     * alloced memory. */
+    /* if bdb_log_directory is set to a string different from ""
+     * then it has been set, return this variable
+     * otherwise it is set to default, use the instance home directory
+     */
+    if (strlen(BDB_CONFIG(li)->bdb_log_directory) > 0)
+        return (void *)slapi_ch_strdup(BDB_CONFIG(li)->bdb_log_directory);
+    else
+        return (void *)slapi_ch_strdup(li->li_new_directory);
+}
+
+/* Does not return a copy of the string - used by disk space monitoring feature */
+void *
+bdb_config_db_logdirectory_get_ext(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    if (strlen(BDB_CONFIG(li)->bdb_log_directory) > 0)
+        return (void *)BDB_CONFIG(li)->bdb_log_directory;
+    else
+        return (void *)li->li_new_directory;
+}
+
+static int
+bdb_config_db_logdirectory_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    char *val = (char *)value;
+
+    if (apply) {
+        slapi_ch_free((void **)&(BDB_CONFIG(li)->bdb_log_directory));
+        BDB_CONFIG(li)->bdb_log_directory = slapi_ch_strdup(val);
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_durable_transactions_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_durable_transactions));
+}
+
+static int
+bdb_config_db_durable_transactions_set(void *arg, void *value, char *errorbuf __attribute__((unused)), int phase __attribute__((unused)), int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_durable_transactions = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_lockdown_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_lockdown));
+}
+
+static int
+bdb_config_db_lockdown_set(
+    void *arg,
+    void *value,
+    char *errorbuf __attribute__((unused)),
+    int phase __attribute__((unused)),
+    int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_lockdown = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_circular_logging_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_circular_logging));
+}
+
+static int
+bdb_config_db_circular_logging_set(void *arg,
+                                    void *value,
+                                    char *errorbuf __attribute__((unused)),
+                                    int phase __attribute__((unused)),
+                                    int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_circular_logging = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_transaction_logging_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)BDB_CONFIG(li)->bdb_enable_transactions);
+}
+
+static int
+bdb_config_db_transaction_logging_set(void *arg,
+                                       void *value,
+                                       char *errorbuf __attribute__((unused)),
+                                       int phase __attribute__((unused)),
+                                       int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_enable_transactions = val;
+    }
+
+    return retval;
+}
+
+
+static void *
+bdb_config_db_transaction_wait_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_txn_wait));
+}
+
+static int
+bdb_config_db_transaction_wait_set(void *arg,
+                                    void *value,
+                                    char *errorbuf __attribute__((unused)),
+                                    int phase __attribute__((unused)),
+                                    int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_txn_wait = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_logbuf_size_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_logbuf_size));
+}
+
+static int
+bdb_config_db_logbuf_size_set(void *arg,
+                               void *value,
+                               char *errorbuf __attribute__((unused)),
+                               int phase __attribute__((unused)),
+                               int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    uint64_t val = (uint64_t)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_logbuf_size = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_checkpoint_interval_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_checkpoint_interval));
+}
+
+static int
+bdb_config_db_checkpoint_interval_set(void *arg,
+                                       void *value,
+                                       char *errorbuf __attribute__((unused)),
+                                       int phase __attribute__((unused)),
+                                       int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_checkpoint_interval = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_compactdb_interval_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_compactdb_interval));
+}
+
+static int
+bdb_config_db_compactdb_interval_set(void *arg,
+                                      void *value,
+                                      char *errorbuf __attribute__((unused)),
+                                      int phase __attribute__((unused)),
+                                      int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_compactdb_interval = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_page_size_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_page_size));
+}
+
+static int
+bdb_config_db_page_size_set(void *arg,
+                             void *value,
+                             char *errorbuf __attribute__((unused)),
+                             int phase __attribute__((unused)),
+                             int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    uint32_t val = (uint32_t)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_page_size = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_index_page_size_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_index_page_size));
+}
+
+static int
+bdb_config_db_index_page_size_set(void *arg,
+                                   void *value,
+                                   char *errorbuf __attribute__((unused)),
+                                   int phase __attribute__((unused)),
+                                   int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    uint32_t val = (uint32_t)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_index_page_size = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_old_idl_maxids_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)li->li_old_idl_maxids);
+}
+
+static int
+bdb_config_db_old_idl_maxids_set(void *arg,
+                                  void *value,
+                                  char *errorbuf,
+                                  int phase __attribute__((unused)),
+                                  int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        if (val >= 0) {
+            li->li_old_idl_maxids = val;
+        } else {
+            slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+                                  "Error: Invalid value for %s (%d). Value must be equal or greater than zero.",
+                                  CONFIG_DB_OLD_IDL_MAXIDS, val);
+            return LDAP_UNWILLING_TO_PERFORM;
+        }
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_logfile_size_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_logfile_size));
+}
+
+static int
+bdb_config_db_logfile_size_set(void *arg,
+                                void *value,
+                                char *errorbuf __attribute__((unused)),
+                                int phase __attribute__((unused)),
+                                int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    uint64_t val = (uint64_t)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_logfile_size = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_spin_count_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_spin_count));
+}
+
+static int
+bdb_config_db_spin_count_set(void *arg,
+                              void *value,
+                              char *errorbuf __attribute__((unused)),
+                              int phase __attribute__((unused)),
+                              int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_spin_count = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_trickle_percentage_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_trickle_percentage));
+}
+
+static int
+bdb_config_db_trickle_percentage_set(void *arg,
+                                      void *value,
+                                      char *errorbuf,
+                                      int phase __attribute__((unused)),
+                                      int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (val < 0 || val > 100) {
+        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: Invalid value for %s (%d). Must be between 0 and 100\n",
+                              CONFIG_DB_TRICKLE_PERCENTAGE, val);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_trickle_percentage_set",
+                      "Invalid value for %s (%d). Must be between 0 and 100\n",
+                      CONFIG_DB_TRICKLE_PERCENTAGE, val);
+        return LDAP_UNWILLING_TO_PERFORM;
+    }
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_trickle_percentage = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_debug_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_debug));
+}
+
+static int
+bdb_config_db_debug_set(void *arg,
+                         void *value,
+                         char *errorbuf __attribute__((unused)),
+                         int phase __attribute__((unused)),
+                         int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_debug = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_verbose_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_debug_verbose));
+}
+
+static int
+bdb_config_db_verbose_set(void *arg,
+                           void *value,
+                           char *errorbuf __attribute__((unused)),
+                           int phase __attribute__((unused)),
+                           int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_debug_verbose = val;
+    }
+
+    return retval;
+}
+static void *
+bdb_config_db_named_regions_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_named_regions));
+}
+
+static int
+bdb_config_db_named_regions_set(void *arg,
+                                 void *value,
+                                 char *errorbuf __attribute__((unused)),
+                                 int phase __attribute__((unused)),
+                                 int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_named_regions = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_private_mem_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_private_mem));
+}
+
+static int
+bdb_config_db_private_mem_set(void *arg,
+                               void *value,
+                               char *errorbuf __attribute__((unused)),
+                               int phase __attribute__((unused)),
+                               int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_private_mem = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_online_import_encrypt_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)li->li_online_import_encrypt);
+}
+
+static int
+bdb_config_db_online_import_encrypt_set(void *arg,
+                                         void *value,
+                                         char *errorbuf __attribute__((unused)),
+                                         int phase __attribute__((unused)),
+                                         int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        li->li_online_import_encrypt = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_private_import_mem_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_private_import_mem));
+}
+
+static int
+bdb_config_db_private_import_mem_set(void *arg,
+                                      void *value,
+                                      char *errorbuf __attribute__((unused)),
+                                      int phase __attribute__((unused)),
+                                      int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_private_import_mem = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_shm_key_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)BDB_CONFIG(li)->bdb_shm_key;
+}
+
+static int
+bdb_config_db_shm_key_set(
+    void *arg,
+    void *value,
+    char *errorbuf __attribute__((unused)),
+    int phase __attribute__((unused)),
+    int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_shm_key = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_cache_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_cache_config));
+}
+
+static int
+bdb_config_db_cache_set(void *arg,
+                         void *value,
+                         char *errorbuf,
+                         int phase __attribute__((unused)),
+                         int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = ((uintptr_t)value);
+    uint64_t delta = 0;
+
+    /* There is an error here. We check the new val against our current mem-alloc
+     * Issue is that we already are using system pages, so while our value *might*
+     * be valid, we may reject it here due to the current procs page usage.
+     *
+     * So how do we solve this? If we are setting a SMALLER value than we
+     * currently have ALLOW it, because we already passed the cache sanity.
+     * If we are setting a LARGER value, we check the delta of the two, and make
+     * sure that it is sane.
+     */
+
+    if (val > BDB_CONFIG(li)->bdb_cache_config) {
+        delta = val - BDB_CONFIG(li)->bdb_cache_config;
+        util_cachesize_result sane;
+
+        slapi_pal_meminfo *mi = spal_meminfo_get();
+        sane = util_is_cachesize_sane(mi, &delta);
+        spal_meminfo_destroy(mi);
+
+        if (sane != UTIL_CACHESIZE_VALID) {
+            slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: db cachesize value is too large");
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_cache_set", "db cachesize value is too large.\n");
+            return LDAP_UNWILLING_TO_PERFORM;
+        }
+    }
+    if (apply) {
+        BDB_CONFIG(li)->bdb_cache_config = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_debug_checkpointing_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_debug_checkpointing));
+}
+
+static int
+bdb_config_db_debug_checkpointing_set(void *arg,
+                                       void *value,
+                                       char *errorbuf __attribute__((unused)),
+                                       int phase __attribute__((unused)),
+                                       int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_debug_checkpointing = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_home_directory_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    /* Remember get functions of type string need to return
+     * alloced memory. */
+    return (void *)slapi_ch_strdup(BDB_CONFIG(li)->bdb_dbhome_directory);
+}
+
+static int
+bdb_config_db_home_directory_set(void *arg,
+                                  void *value,
+                                  char *errorbuf __attribute__((unused)),
+                                  int phase __attribute__((unused)),
+                                  int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    char *val = (char *)value;
+
+    if (apply) {
+        slapi_ch_free((void **)&(BDB_CONFIG(li)->bdb_dbhome_directory));
+        BDB_CONFIG(li)->bdb_dbhome_directory = slapi_ch_strdup(val);
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_import_cache_autosize_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(li->li_import_cache_autosize));
+}
+
+static int
+bdb_config_import_cache_autosize_set(void *arg,
+                                      void *value,
+                                      char *errorbuf __attribute__((unused)),
+                                      int phase __attribute__((unused)),
+                                      int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    if (apply)
+        li->li_import_cache_autosize = (int)((uintptr_t)value);
+    return LDAP_SUCCESS;
+}
+
+static void *
+bdb_config_cache_autosize_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(li->li_cache_autosize));
+}
+
+static int
+bdb_config_cache_autosize_set(void *arg,
+                               void *value,
+                               char *errorbuf,
+                               int phase __attribute__((unused)),
+                               int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    int val = (int)((uintptr_t)value);
+    if (val < 0 || val > 100) {
+        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+                              "Error: Invalid value for %s (%d). The value must be between \"0\" and \"100\"\n",
+                              CONFIG_CACHE_AUTOSIZE, val);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_cache_autosize_set",
+                      "Invalid value for %s (%d). The value must be between \"0\" and \"100\"\n",
+                      CONFIG_CACHE_AUTOSIZE, val);
+        return LDAP_UNWILLING_TO_PERFORM;
+    }
+    if (apply) {
+        li->li_cache_autosize = val;
+    }
+    return LDAP_SUCCESS;
+}
+
+static void *
+bdb_config_cache_autosize_split_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(li->li_cache_autosize_split));
+}
+
+static int
+bdb_config_cache_autosize_split_set(void *arg,
+                                     void *value,
+                                     char *errorbuf,
+                                     int phase __attribute__((unused)),
+                                     int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    int val = (int)((uintptr_t)value);
+    if (val < 0 || val > 100) {
+        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+                              "Error: Invalid value for %s (%d). The value must be between \"0\" and \"100\"\n",
+                              CONFIG_CACHE_AUTOSIZE_SPLIT, val);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_cache_autosize_split_set",
+                      "Invalid value for %s (%d). The value must be between \"0\" and \"100\"\n",
+                      CONFIG_CACHE_AUTOSIZE_SPLIT, val);
+        return LDAP_UNWILLING_TO_PERFORM;
+    }
+    if (apply) {
+        li->li_cache_autosize_split = val;
+    }
+    return LDAP_SUCCESS;
+}
+
+static void *
+bdb_config_import_cachesize_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)li->li_import_cachesize);
+}
+
+static int
+bdb_config_import_cachesize_set(void *arg,
+                                 void *value,
+                                 char *errorbuf,
+                                 int phase __attribute__((unused)),
+                                 int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    uint64_t val = (uint64_t)((uintptr_t)value);
+    uint64_t delta;
+    /* There is an error here. We check the new val against our current mem-alloc
+     * Issue is that we already are using system pages, so while our value *might*
+     * be valid, we may reject it here due to the current procs page usage.
+     *
+     * So how do we solve this? If we are setting a SMALLER value than we
+     * currently have ALLOW it, because we already passed the cache sanity.
+     * If we are setting a LARGER value, we check the delta of the two, and make
+     * sure that it is sane.
+     */
+    if (apply) {
+        if (val > li->li_import_cachesize) {
+            delta = val - li->li_import_cachesize;
+
+            util_cachesize_result sane;
+            slapi_pal_meminfo *mi = spal_meminfo_get();
+            sane = util_is_cachesize_sane(mi, &delta);
+            spal_meminfo_destroy(mi);
+
+            if (sane != UTIL_CACHESIZE_VALID) {
+                slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: import cachesize value is too large.");
+                slapi_log_err(SLAPI_LOG_ERR, "bdb_config_import_cachesize_set",
+                              "Import cachesize value is too large.\n");
+                return LDAP_UNWILLING_TO_PERFORM;
+            }
+        }
+        li->li_import_cachesize = val;
+    }
+    return LDAP_SUCCESS;
+}
+
+static void *
+bdb_config_index_buffer_size_get(void *arg __attribute__((unused)))
+{
+    return (void *)import_get_index_buffer_size();
+}
+
+static int
+bdb_config_index_buffer_size_set(void *arg __attribute__((unused)),
+                                  void *value,
+                                  char *errorbuf __attribute__((unused)),
+                                  int phase __attribute__((unused)),
+                                  int apply)
+{
+    if (apply) {
+        import_configure_index_buffer_size((size_t)value);
+    }
+    return LDAP_SUCCESS;
+}
+
+static void *
+bdb_config_serial_lock_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)li->li_fat_lock);
+}
+
+static int
+bdb_config_serial_lock_set(void *arg,
+                            void *value,
+                            char *errorbuf __attribute__((unused)),
+                            int phase __attribute__((unused)),
+                            int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    if (apply) {
+        li->li_fat_lock = (int)((uintptr_t)value);
+    }
+
+    return LDAP_SUCCESS;
+}
+
+static void *
+bdb_config_legacy_errcode_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)li->li_legacy_errcode);
+}
+
+static int
+bdb_config_legacy_errcode_set(void *arg,
+                               void *value,
+                               char *errorbuf __attribute__((unused)),
+                               int phase __attribute__((unused)),
+                               int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    if (apply) {
+        li->li_legacy_errcode = (int)((uintptr_t)value);
+    }
+
+    return LDAP_SUCCESS;
+}
+
+static int
+bdb_config_set_bypass_filter_test(void *arg,
+                                   void *value,
+                                   char *errorbuf __attribute__((unused)),
+                                   int phase __attribute__((unused)),
+                                   int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    if (apply) {
+        char *myvalue = (char *)value;
+
+        if (0 == strcasecmp(myvalue, "on")) {
+            li->li_filter_bypass = 1;
+            li->li_filter_bypass_check = 0;
+        } else if (0 == strcasecmp(myvalue, "verify")) {
+            li->li_filter_bypass = 1;
+            li->li_filter_bypass_check = 1;
+        } else {
+            li->li_filter_bypass = 0;
+            li->li_filter_bypass_check = 0;
+        }
+    }
+    return LDAP_SUCCESS;
+}
+
+static void *
+bdb_config_get_bypass_filter_test(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    char *retstr = NULL;
+
+    if (li->li_filter_bypass) {
+        if (li->li_filter_bypass_check) {
+            /* meaningful only if is bypass filter test called */
+            retstr = slapi_ch_strdup("verify");
+        } else {
+            retstr = slapi_ch_strdup("on");
+        }
+    } else {
+        retstr = slapi_ch_strdup("off");
+    }
+    return (void *)retstr;
+}
+
+static void *
+bdb_config_db_tx_max_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_tx_max));
+}
+
+static int
+bdb_config_db_tx_max_set(
+    void *arg,
+    void *value,
+    char *errorbuf __attribute__((unused)),
+    int phase __attribute__((unused)),
+    int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_tx_max = val;
+    }
+
+    return retval;
+}
+
+static void *
+bdb_config_db_deadlock_policy_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(BDB_CONFIG(li)->bdb_deadlock_policy));
+}
+
+static int
+bdb_config_db_deadlock_policy_set(void *arg,
+                                   void *value,
+                                   char *errorbuf,
+                                   int phase __attribute__((unused)),
+                                   int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    u_int32_t val = (u_int32_t)((uintptr_t)value);
+
+    if (val > DB_LOCK_YOUNGEST) {
+        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+                              "Error: Invalid value for %s (%d). Must be between %d and %d inclusive\n",
+                              CONFIG_DB_DEADLOCK_POLICY, val, DB_LOCK_DEFAULT, DB_LOCK_YOUNGEST);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_db_deadlock_policy_set",
+                      "Invalid value for deadlock policy (%d). Must be between %d and %d inclusive\n",
+                      val, DB_LOCK_DEFAULT, DB_LOCK_YOUNGEST);
+        return LDAP_UNWILLING_TO_PERFORM;
+    }
+    if (val == DB_LOCK_NORUN) {
+        slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
+                              "Warning: Setting value for %s to (%d) will disable deadlock detection\n",
+                              CONFIG_DB_DEADLOCK_POLICY, val);
+        slapi_log_err(SLAPI_LOG_WARNING, "bdb_config_db_deadlock_policy_set",
+                      "Setting value for %s to (%d) will disable deadlock detection\n",
+                      CONFIG_DB_DEADLOCK_POLICY, val);
+    }
+
+    if (apply) {
+        BDB_CONFIG(li)->bdb_deadlock_policy = val;
+    }
+
+    return retval;
+}
+
+
+/*------------------------------------------------------------------------
+ * Configuration array for bdb variables
+ *----------------------------------------------------------------------*/
+static config_info bdb_config_param[] = {
+    {CONFIG_DB_LOCK, CONFIG_TYPE_INT, "10000", &bdb_config_db_lock_get, &bdb_config_db_lock_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_DBCACHESIZE, CONFIG_TYPE_UINT64, DEFAULT_CACHE_SIZE_STR, &bdb_config_dbcachesize_get, &bdb_config_dbcachesize_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_DBNCACHE, CONFIG_TYPE_INT, "0", &bdb_config_dbncache_get, &bdb_config_dbncache_set, CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_MAXPASSBEFOREMERGE, CONFIG_TYPE_INT, "100", &bdb_config_maxpassbeforemerge_get, &bdb_config_maxpassbeforemerge_set, 0},
+    {CONFIG_DB_LOGDIRECTORY, CONFIG_TYPE_STRING, "", &bdb_config_db_logdirectory_get, &bdb_config_db_logdirectory_set, CONFIG_FLAG_ALWAYS_SHOW},
+    {CONFIG_DB_DURABLE_TRANSACTIONS, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_durable_transactions_get, &bdb_config_db_durable_transactions_set, CONFIG_FLAG_ALWAYS_SHOW},
+    {CONFIG_DB_CIRCULAR_LOGGING, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_circular_logging_get, &bdb_config_db_circular_logging_set, 0},
+    {CONFIG_DB_TRANSACTION_LOGGING, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_transaction_logging_get, &bdb_config_db_transaction_logging_set, 0},
+    {CONFIG_DB_TRANSACTION_WAIT, CONFIG_TYPE_ONOFF, "off", &bdb_config_db_transaction_wait_get, &bdb_config_db_transaction_wait_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_DB_CHECKPOINT_INTERVAL, CONFIG_TYPE_INT, "60", &bdb_config_db_checkpoint_interval_get, &bdb_config_db_checkpoint_interval_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_DB_COMPACTDB_INTERVAL, CONFIG_TYPE_INT, "2592000" /*30days*/, &bdb_config_db_compactdb_interval_get, &bdb_config_db_compactdb_interval_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_DB_TRANSACTION_BATCH, CONFIG_TYPE_INT, "0", &bdb_get_batch_transactions, &bdb_set_batch_transactions, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_DB_TRANSACTION_BATCH_MIN_SLEEP, CONFIG_TYPE_INT, "50", &bdb_get_batch_txn_min_sleep, &bdb_set_batch_txn_min_sleep, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_DB_TRANSACTION_BATCH_MAX_SLEEP, CONFIG_TYPE_INT, "50", &bdb_get_batch_txn_max_sleep, &bdb_set_batch_txn_max_sleep, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_DB_LOGBUF_SIZE, CONFIG_TYPE_SIZE_T, "0", &bdb_config_db_logbuf_size_get, &bdb_config_db_logbuf_size_set, CONFIG_FLAG_ALWAYS_SHOW},
+    {CONFIG_DB_PAGE_SIZE, CONFIG_TYPE_SIZE_T, "0", &bdb_config_db_page_size_get, &bdb_config_db_page_size_set, 0},
+    {CONFIG_DB_INDEX_PAGE_SIZE, CONFIG_TYPE_SIZE_T, "0", &bdb_config_db_index_page_size_get, &bdb_config_db_index_page_size_set, 0},
+    {CONFIG_DB_OLD_IDL_MAXIDS, CONFIG_TYPE_INT, "0", &bdb_config_db_old_idl_maxids_get, &bdb_config_db_old_idl_maxids_set, 0},
+    {CONFIG_DB_LOGFILE_SIZE, CONFIG_TYPE_UINT64, "0", &bdb_config_db_logfile_size_get, &bdb_config_db_logfile_size_set, 0},
+    {CONFIG_DB_TRICKLE_PERCENTAGE, CONFIG_TYPE_INT, "5", &bdb_config_db_trickle_percentage_get, &bdb_config_db_trickle_percentage_set, 0},
+    {CONFIG_DB_SPIN_COUNT, CONFIG_TYPE_INT, "0", &bdb_config_db_spin_count_get, &bdb_config_db_spin_count_set, 0},
+    {CONFIG_DB_DEBUG, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_debug_get, &bdb_config_db_debug_set, 0},
+    {CONFIG_DB_VERBOSE, CONFIG_TYPE_ONOFF, "off", &bdb_config_db_verbose_get, &bdb_config_db_verbose_set, 0},
+    {CONFIG_DB_NAMED_REGIONS, CONFIG_TYPE_ONOFF, "off", &bdb_config_db_named_regions_get, &bdb_config_db_named_regions_set, 0},
+    {CONFIG_DB_LOCK, CONFIG_TYPE_INT, "10000", &bdb_config_db_lock_get, &bdb_config_db_lock_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_DB_PRIVATE_MEM, CONFIG_TYPE_ONOFF, "off", &bdb_config_db_private_mem_get, &bdb_config_db_private_mem_set, 0},
+    {CONFIG_DB_PRIVATE_IMPORT_MEM, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_private_import_mem_get, &bdb_config_db_private_import_mem_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONDIF_DB_ONLINE_IMPORT_ENCRYPT, CONFIG_TYPE_ONOFF, "on", &bdb_config_db_online_import_encrypt_get, &bdb_config_db_online_import_encrypt_set, 0},
+    {CONFIG_DB_SHM_KEY, CONFIG_TYPE_LONG, "389389", &bdb_config_db_shm_key_get, &bdb_config_db_shm_key_set, 0},
+    {CONFIG_DB_CACHE, CONFIG_TYPE_INT, "0", &bdb_config_db_cache_get, &bdb_config_db_cache_set, 0},
+    {CONFIG_DB_DEBUG_CHECKPOINTING, CONFIG_TYPE_ONOFF, "off", &bdb_config_db_debug_checkpointing_get, &bdb_config_db_debug_checkpointing_set, 0},
+    {CONFIG_DB_HOME_DIRECTORY, CONFIG_TYPE_STRING, "", &bdb_config_db_home_directory_get, &bdb_config_db_home_directory_set, 0},
+    {CONFIG_IMPORT_CACHE_AUTOSIZE, CONFIG_TYPE_INT, "-1", &bdb_config_import_cache_autosize_get, &bdb_config_import_cache_autosize_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_CACHE_AUTOSIZE, CONFIG_TYPE_INT, "10", &bdb_config_cache_autosize_get, &bdb_config_cache_autosize_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_CACHE_AUTOSIZE_SPLIT, CONFIG_TYPE_INT, "25", &bdb_config_cache_autosize_split_get, &bdb_config_cache_autosize_split_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_IMPORT_CACHESIZE, CONFIG_TYPE_UINT64, "16777216", &bdb_config_import_cachesize_get, &bdb_config_import_cachesize_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_BYPASS_FILTER_TEST, CONFIG_TYPE_STRING, "on", &bdb_config_get_bypass_filter_test, &bdb_config_set_bypass_filter_test, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_DB_LOCKDOWN, CONFIG_TYPE_ONOFF, "off", &bdb_config_db_lockdown_get, &bdb_config_db_lockdown_set, 0},
+    {CONFIG_INDEX_BUFFER_SIZE, CONFIG_TYPE_INT, "0", &bdb_config_index_buffer_size_get, &bdb_config_index_buffer_size_set, 0},
+    {CONFIG_DB_TX_MAX, CONFIG_TYPE_INT, "200", &bdb_config_db_tx_max_get, &bdb_config_db_tx_max_set, 0},
+    {CONFIG_SERIAL_LOCK, CONFIG_TYPE_ONOFF, "on", &bdb_config_serial_lock_get, &bdb_config_serial_lock_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_USE_LEGACY_ERRORCODE, CONFIG_TYPE_ONOFF, "off", &bdb_config_legacy_errcode_get, &bdb_config_legacy_errcode_set, 0},
+    {CONFIG_DB_DEADLOCK_POLICY, CONFIG_TYPE_INT, STRINGIFYDEFINE(DB_LOCK_YOUNGEST), &bdb_config_db_deadlock_policy_get, &bdb_config_db_deadlock_policy_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {NULL, 0, NULL, NULL, NULL, 0}};
+
+void
+bdb_config_setup_default(struct ldbminfo *li)
+{
+    config_info *config;
+    char err_buf[SLAPI_DSE_RETURNTEXT_SIZE];
+
+    for (config = bdb_config_param; config->config_name != NULL; config++) {
+        bdb_config_set((void *)li, config->config_name, bdb_config_param, NULL /* use default */, err_buf, CONFIG_PHASE_INITIALIZATION, 1 /* apply */, LDAP_MOD_REPLACE);
+    }
+}
+
+static int
+bdb_config_upgrade_dse_info(struct ldbminfo *li)
+{
+    Slapi_PBlock *search_pb;
+    Slapi_PBlock *add_pb;
+    Slapi_Entry *bdb_config = NULL;
+    Slapi_Entry **entries = NULL;
+    char *bdb_config_dn = NULL;
+    char *config_dn = NULL;
+    int rval = 0;
+    Slapi_Mods smods;
+
+    slapi_log_err(SLAPI_LOG_ERR, "bdb_config_upgrade_dse_info", "create config entry from old config\n");
+
+    /* first get the existing ldbm config entry, if it fails
+     * nothing can be done
+     */
+
+    config_dn = slapi_create_dn_string("cn=config,cn=%s,cn=plugins,cn=config",
+                                li->li_plugin->plg_name);
+
+    search_pb = slapi_pblock_new();
+    if (!search_pb) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_load_dse_info", "Out of memory\n");
+        rval = 1;
+        goto bail;
+    }
+
+    slapi_search_internal_set_pb(search_pb, config_dn, LDAP_SCOPE_BASE,
+                                 "objectclass=*", NULL, 0, NULL, NULL, li->li_identity, 0);
+    slapi_search_internal_pb(search_pb);
+    slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rval);
+    if (rval == LDAP_SUCCESS) {
+        /* Need to parse the configuration information for the ldbm
+         * plugin that is held in the DSE. */
+        slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES,
+                         &entries);
+        if (NULL == entries || entries[0] == NULL) {
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_load_dse_info", "Error accessing the ldbm config DSE 2\n");
+            rval = 1;
+            goto bail;
+        }
+    } else {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_load_dse_info",
+                      "Error accessing the ldbm config DSE 1\n");
+        rval = 1;
+        goto bail;
+    }
+
+
+    /* next create an new specifc bdb config entry,
+     * look for attributes in the general config antry which
+     * have to go to the bdb entry.
+     * - add them to cn=bdb,cn=config,cn=ldbm database
+     * - remove them from cn=config,cn=ldbm database
+     */
+    bdb_config = slapi_entry_alloc();
+    bdb_config_dn = slapi_create_dn_string("cn=bdb,cn=config,cn=%s,cn=plugins,cn=config",
+                                li->li_plugin->plg_name);
+    slapi_entry_init(bdb_config, bdb_config_dn, NULL);
+
+    slapi_entry_add_string(bdb_config, SLAPI_ATTR_OBJECTCLASS, "extensibleobject");
+
+    slapi_mods_init(&smods, 1);
+    split_bdb_config_entry(li, entries[0], bdb_config, bdb_config_param, &smods);
+    add_pb = slapi_pblock_new();
+    slapi_pblock_init(add_pb);
+
+    slapi_add_entry_internal_set_pb(add_pb,
+                                    bdb_config,
+                                    NULL,
+                                    li->li_identity,
+                                    0);
+    slapi_add_internal_pb(add_pb);
+    slapi_pblock_get(add_pb, SLAPI_PLUGIN_INTOP_RESULT, &rval);
+
+    if (rval != LDAP_SUCCESS) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_upgrade_dse_info", "failed to add bdb config_entry, err= %d\n", rval);
+    } else {
+        /* the new bdb config entry was successfully added
+         * now strip the attrs from the general config entry
+         */
+        Slapi_PBlock *mod_pb = slapi_pblock_new();
+        slapi_modify_internal_set_pb(mod_pb, config_dn,
+                                    slapi_mods_get_ldapmods_byref(&smods),
+                                    NULL, NULL, li->li_identity, 0);
+        slapi_modify_internal_pb(mod_pb);
+        slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &rval);
+        if (rval != LDAP_SUCCESS) {
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_upgrade_dse_info", "failed to modify  config_entry, err= %d\n", rval);
+        }
+        slapi_pblock_destroy(mod_pb);
+    }
+    slapi_pblock_destroy(add_pb);
+    slapi_mods_done(&smods);
+    slapi_free_search_results_internal(search_pb);
+
+bail:
+    slapi_ch_free_string(&config_dn);
+    if (search_pb) {
+        slapi_pblock_destroy(search_pb);
+    }
+    return rval;
+}
+/* Reads in any config information held in the dse for the bdb
+ * implementation of the ldbm plugin.
+ * Creates dse entries used to configure the ldbm plugin and dblayer
+ * if they don't already exist.  Registers dse callback functions to
+ * maintain those dse entries.  Returns 0 on success.
+ */
+int
+bdb_config_load_dse_info(struct ldbminfo *li)
+{
+    Slapi_PBlock *search_pb;
+    Slapi_Entry **entries = NULL;
+    char *dn = NULL;
+    int rval = 0;
+
+    /* We try to read the entry
+     * cn=bdb, cn=config, cn=ldbm database, cn=plugins, cn=config.  If the entry is
+     * there, then we process the config information it stores.
+     */
+    dn = slapi_create_dn_string("cn=bdb,cn=config,cn=%s,cn=plugins,cn=config",
+                                li->li_plugin->plg_name);
+    if (NULL == dn) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_config_load_dse_info",
+                      "failed create config dn for %s\n",
+                      li->li_plugin->plg_name);
+        rval = 1;
+        goto bail;
+    }
+
+    search_pb = slapi_pblock_new();
+    if (!search_pb) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_load_dse_info", "Out of memory\n");
+        rval = 1;
+        goto bail;
+    }
+
+retry:
+    slapi_search_internal_set_pb(search_pb, dn, LDAP_SCOPE_BASE,
+                                 "objectclass=*", NULL, 0, NULL, NULL, li->li_identity, 0);
+    slapi_search_internal_pb(search_pb);
+    slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_RESULT, &rval);
+
+    if (rval == LDAP_SUCCESS) {
+        /* Need to parse the configuration information for the bdb config
+         * entry that is held in the DSE. */
+        slapi_pblock_get(search_pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES,
+                         &entries);
+        if (NULL == entries || entries[0] == NULL) {
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_load_dse_info", "Error accessing the bdb config DSE entry\n");
+            rval = 1;
+            goto bail;
+        }
+        if (0 != parse_bdb_config_entry(li, entries[0], bdb_config_param)) {
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_load_dse_info", "Error parsing the bdb config DSE entry\n");
+            rval = 1;
+            goto bail;
+        }
+    } else if (rval == LDAP_NO_SUCH_OBJECT) {
+    /* The specific bdb entry does not exist,
+     * create it from the old config dse entry */
+        if (bdb_config_upgrade_dse_info(li)) {
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_load_dse_info",
+                          "Error accessing the bdb config DSE entry 1\n");
+            rval = 1;
+            goto bail;
+        } else {
+            slapi_free_search_results_internal(search_pb);
+            slapi_pblock_init(search_pb);
+            goto retry;
+        }
+    } else {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_load_dse_info",
+                      "Error accessing the bdb config DSE entry 2\n");
+        rval = 1;
+        goto bail;
+    }
+
+    if (search_pb) {
+        slapi_free_search_results_internal(search_pb);
+        slapi_pblock_destroy(search_pb);
+    }
+
+    /* setup the dse callback functions for the ldbm backend config entry */
+    slapi_config_register_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP, dn,
+                                   LDAP_SCOPE_BASE, "(objectclass=*)", bdb_config_search_entry_callback,
+                                   (void *)li);
+    slapi_config_register_callback(SLAPI_OPERATION_MODIFY, DSE_FLAG_PREOP, dn,
+                                   LDAP_SCOPE_BASE, "(objectclass=*)", bdb_config_modify_entry_callback,
+                                   (void *)li);
+    slapi_config_register_callback(DSE_OPERATION_WRITE, DSE_FLAG_PREOP, dn,
+                                   LDAP_SCOPE_BASE, "(objectclass=*)", bdb_config_search_entry_callback,
+                                   (void *)li);
+    slapi_ch_free_string(&dn);
+
+    /* setup the dse callback functions for the ldbm backend monitor entry */
+    dn = slapi_create_dn_string("cn=monitor,cn=%s,cn=plugins,cn=config",
+                                li->li_plugin->plg_name);
+    if (NULL == dn) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_config_load_dse_info",
+                      "failed to create monitor dn for %s\n",
+                      li->li_plugin->plg_name);
+        rval = 1;
+        goto bail;
+    }
+
+    /* NOTE (LK): still needs to investigate and clarify the monitoring split between db layers.
+     * Now still using ldbm functions 
+     */
+    slapi_config_register_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP, dn,
+                                   LDAP_SCOPE_BASE, "(objectclass=*)", ldbm_back_monitor_search,
+                                   (void *)li);
+    slapi_ch_free_string(&dn);
+
+    /* And the ldbm backend database monitor entry */
+    dn = slapi_create_dn_string("cn=database,cn=monitor,cn=%s,cn=plugins,cn=config",
+                                li->li_plugin->plg_name);
+    if (NULL == dn) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_config_load_dse_info",
+                      "failed create monitor database dn for %s\n",
+                      li->li_plugin->plg_name);
+        rval = 1;
+        goto bail;
+    }
+    slapi_config_register_callback(SLAPI_OPERATION_SEARCH, DSE_FLAG_PREOP, dn,
+                                   LDAP_SCOPE_BASE, "(objectclass=*)", ldbm_back_dbmonitor_search,
+                                   (void *)li);
+
+bail:
+    slapi_ch_free_string(&dn);
+    return rval;
+}
+
+
+/* Utility function used in creating config entries.  Using the
+ * config_info, this function gets info and formats in the correct
+ * way.
+ * buf is char[BUFSIZ]
+ */
+void
+bdb_config_get(void *arg, config_info *config, char *buf)
+{
+    void *val = NULL;
+
+    if (config == NULL) {
+        buf[0] = '\0';
+        return;
+    }
+
+    val = config->config_get_fn(arg);
+    config_info_print_val(val, config->config_type, buf);
+
+    if (config->config_type == CONFIG_TYPE_STRING) {
+        slapi_ch_free((void **)&val);
+    }
+}
+
+/*
+ * Returns:
+ *   SLAPI_DSE_CALLBACK_ERROR on failure
+ *   SLAPI_DSE_CALLBACK_OK on success
+ */
+int
+bdb_config_search_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
+                                  Slapi_Entry *e,
+                                  Slapi_Entry *entryAfter __attribute__((unused)),
+                                  int *returncode,
+                                  char *returntext,
+                                  void *arg)
+{
+    char buf[BUFSIZ];
+    struct berval *vals[2];
+    struct berval val;
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    config_info *config;
+
+    vals[0] = &val;
+    vals[1] = NULL;
+
+    returntext[0] = '\0';
+
+    PR_Lock(li->li_config_mutex);
+
+    for (config = bdb_config_param; config->config_name != NULL; config++) {
+        /* Go through the bdb_config table and fill in the entry. */
+
+        if (!(config->config_flags & (CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_PREVIOUSLY_SET))) {
+            /* This config option shouldn't be shown */
+            continue;
+        }
+
+        bdb_config_get((void *)li, config, buf);
+
+        val.bv_val = buf;
+        val.bv_len = strlen(buf);
+        slapi_entry_attr_replace(e, config->config_name, vals);
+    }
+
+    PR_Unlock(li->li_config_mutex);
+
+    *returncode = LDAP_SUCCESS;
+    return SLAPI_DSE_CALLBACK_OK;
+}
+
+
+int
+bdb_config_ignored_attr(char *attr_name)
+{
+    /* These are the names of attributes that are in the
+     * config entries but are not config attributes. */
+    if (!strcasecmp("objectclass", attr_name) ||
+        !strcasecmp("cn", attr_name) ||
+        !strcasecmp("creatorsname", attr_name) ||
+        !strcasecmp("createtimestamp", attr_name) ||
+        !strcasecmp(LDBM_NUMSUBORDINATES_STR, attr_name) ||
+        slapi_attr_is_last_mod(attr_name)) {
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+/* Returns LDAP_SUCCESS on success */
+int
+bdb_config_set(void *arg, char *attr_name, config_info *config_array, struct berval *bval, char *err_buf, int phase, int apply_mod, int mod_op)
+{
+    config_info *config;
+    int use_default;
+    int int_val;
+    long long_val;
+    size_t sz_val;
+    PRInt64 llval;
+    int maxint = (int)(((unsigned int)~0) >> 1);
+    int minint = ~maxint;
+    PRInt64 llmaxint;
+    PRInt64 llminint;
+    int err = 0;
+    char *str_val;
+    int retval = 0;
+
+    LL_I2L(llmaxint, maxint);
+    LL_I2L(llminint, minint);
+
+    config = config_info_get(config_array, attr_name);
+    if (NULL == config) {
+        slapi_log_err(SLAPI_LOG_CONFIG, "bdb_config_set", "Unknown config attribute %s\n", attr_name);
+        slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown config attribute %s\n", attr_name);
+        return LDAP_SUCCESS; /* Ignore unknown attributes */
+    }
+
+    /* Some config attrs can't be changed while the server is running. */
+    if (phase == CONFIG_PHASE_RUNNING &&
+        !(config->config_flags & CONFIG_FLAG_ALLOW_RUNNING_CHANGE)) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_config_set", "%s can't be modified while the server is running.\n", attr_name);
+        slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "%s can't be modified while the server is running.\n", attr_name);
+        return LDAP_UNWILLING_TO_PERFORM;
+    }
+
+    /* If the config phase is initialization or if bval is NULL or if we are deleting
+       the value, we will use the default value for the attribute. */
+    if ((CONFIG_PHASE_INITIALIZATION == phase) || (NULL == bval) || SLAPI_IS_MOD_DELETE(mod_op)) {
+        if (CONFIG_FLAG_SKIP_DEFAULT_SETTING & config->config_flags) {
+            return LDAP_SUCCESS; /* Skipping the default config setting */
+        }
+        use_default = 1;
+    } else {
+        use_default = 0;
+
+        /* cannot use mod add on a single valued attribute if the attribute was
+           previously set to a non-default value */
+        if (SLAPI_IS_MOD_ADD(mod_op) && apply_mod &&
+            (config->config_flags & CONFIG_FLAG_PREVIOUSLY_SET)) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "cannot add a value to single valued attribute %s.\n", attr_name);
+            return LDAP_OBJECT_CLASS_VIOLATION;
+        }
+    }
+
+    /* if delete, and a specific value was provided to delete, the existing value must
+       match that value, or return LDAP_NO_SUCH_ATTRIBUTE */
+    if (SLAPI_IS_MOD_DELETE(mod_op) && bval && bval->bv_len && bval->bv_val) {
+        char buf[BUFSIZ];
+        bdb_config_get(arg, config, buf);
+        if (PL_strncmp(buf, bval->bv_val, bval->bv_len)) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE,
+                                  "value [%s] for attribute %s does not match existing value [%s].\n", bval->bv_val, attr_name, buf);
+            return LDAP_NO_SUCH_ATTRIBUTE;
+        }
+    }
+
+    switch (config->config_type) {
+    case CONFIG_TYPE_INT:
+        if (use_default) {
+            str_val = config->config_default_value;
+        } else {
+            str_val = bval->bv_val;
+        }
+        /* get the value as a 64 bit value */
+        llval = db_atoi(str_val, &err);
+        /* check for parsing error (e.g. not a number) */
+        if (err) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: value %s for attr %s is not a number\n", str_val, attr_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_set", "Value %s for attr %s is not a number\n", str_val, attr_name);
+            return LDAP_UNWILLING_TO_PERFORM;
+            /* check for overflow */
+        } else if (LL_CMP(llval, >, llmaxint)) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: value %s for attr %s is greater than the maximum %d\n",
+                                  str_val, attr_name, maxint);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_set", "Value %s for attr %s is greater than the maximum %d\n",
+                          str_val, attr_name, maxint);
+            return LDAP_UNWILLING_TO_PERFORM;
+            /* check for underflow */
+        } else if (LL_CMP(llval, <, llminint)) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: value %s for attr %s is less than the minimum %d\n",
+                                  str_val, attr_name, minint);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_set", "Value %s for attr %s is less than the minimum %d\n",
+                          str_val, attr_name, minint);
+            return LDAP_UNWILLING_TO_PERFORM;
+        }
+        /* convert 64 bit value to 32 bit value */
+        LL_L2I(int_val, llval);
+        retval = config->config_set_fn(arg, (void *)((uintptr_t)int_val), err_buf, phase, apply_mod);
+        break;
+    case CONFIG_TYPE_INT_OCTAL:
+        if (use_default) {
+            int_val = (int)strtol(config->config_default_value, NULL, 8);
+        } else {
+            int_val = (int)strtol((char *)bval->bv_val, NULL, 8);
+        }
+        retval = config->config_set_fn(arg, (void *)((uintptr_t)int_val), err_buf, phase, apply_mod);
+        break;
+    case CONFIG_TYPE_LONG:
+        if (use_default) {
+            str_val = config->config_default_value;
+        } else {
+            str_val = bval->bv_val;
+        }
+        /* get the value as a 64 bit value */
+        llval = db_atoi(str_val, &err);
+        /* check for parsing error (e.g. not a number) */
+        if (err) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: value %s for attr %s is not a number\n",
+                                  str_val, attr_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_set", "Value %s for attr %s is not a number\n",
+                          str_val, attr_name);
+            return LDAP_UNWILLING_TO_PERFORM;
+            /* check for overflow */
+        } else if (LL_CMP(llval, >, llmaxint)) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: value %s for attr %s is greater than the maximum %d\n",
+                                  str_val, attr_name, maxint);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_set", "Value %s for attr %s is greater than the maximum %d\n",
+                          str_val, attr_name, maxint);
+            return LDAP_UNWILLING_TO_PERFORM;
+            /* check for underflow */
+        } else if (LL_CMP(llval, <, llminint)) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: value %s for attr %s is less than the minimum %d\n",
+                                  str_val, attr_name, minint);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_set", "Value %s for attr %s is less than the minimum %d\n",
+                          str_val, attr_name, minint);
+            return LDAP_UNWILLING_TO_PERFORM;
+        }
+        /* convert 64 bit value to 32 bit value */
+        LL_L2I(long_val, llval);
+        retval = config->config_set_fn(arg, (void *)long_val, err_buf, phase, apply_mod);
+        break;
+    case CONFIG_TYPE_SIZE_T:
+        if (use_default) {
+            str_val = config->config_default_value;
+        } else {
+            str_val = bval->bv_val;
+        }
+
+        /* get the value as a size_t value */
+        sz_val = db_strtoul(str_val, &err);
+
+        /* check for parsing error (e.g. not a number) */
+        if (err == EINVAL) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: value %s for attr %s is not a number\n",
+                                  str_val, attr_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_set", "Value %s for attr %s is not a number\n",
+                          str_val, attr_name);
+            return LDAP_UNWILLING_TO_PERFORM;
+            /* check for overflow */
+        } else if (err == ERANGE) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: value %s for attr %s is outside the range of representable values\n",
+                                  str_val, attr_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_set", "Value %s for attr %s is outside the range of representable values\n",
+                          str_val, attr_name);
+            return LDAP_UNWILLING_TO_PERFORM;
+        }
+        retval = config->config_set_fn(arg, (void *)sz_val, err_buf, phase, apply_mod);
+        break;
+
+
+    case CONFIG_TYPE_UINT64:
+        if (use_default) {
+            str_val = config->config_default_value;
+        } else {
+            str_val = bval->bv_val;
+        }
+        /* get the value as a size_t value */
+        sz_val = db_strtoull(str_val, &err);
+
+        /* check for parsing error (e.g. not a number) */
+        if (err == EINVAL) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: value %s for attr %s is not a number\n",
+                                  str_val, attr_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_set", "Value %s for attr %s is not a number\n",
+                          str_val, attr_name);
+            return LDAP_UNWILLING_TO_PERFORM;
+        /* check for overflow */
+        } else if (err == ERANGE) {
+            slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: value %s for attr %s is outside the range of representable values\n",
+                                  str_val, attr_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_config_set", "Value %s for attr %s is outside the range of representable values\n",
+                          str_val, attr_name);
+            return LDAP_UNWILLING_TO_PERFORM;
+        }
+        retval = config->config_set_fn(arg, (void *)sz_val, err_buf, phase, apply_mod);
+        break;
+    case CONFIG_TYPE_STRING:
+        if (use_default) {
+            retval = config->config_set_fn(arg, config->config_default_value, err_buf, phase, apply_mod);
+        } else {
+            retval = config->config_set_fn(arg, bval->bv_val, err_buf, phase, apply_mod);
+        }
+        break;
+    case CONFIG_TYPE_ONOFF:
+        if (use_default) {
+            int_val = !strcasecmp(config->config_default_value, "on");
+        } else {
+            int_val = !strcasecmp((char *)bval->bv_val, "on");
+        }
+        retval = config->config_set_fn(arg, (void *)((uintptr_t)int_val), err_buf, phase, apply_mod);
+        break;
+    }
+
+    /* operation was successful and we applied the value? */
+    if (!retval && apply_mod) {
+        /* Since we are setting the value for the config attribute, we
+         * need to turn on the CONFIG_FLAG_PREVIOUSLY_SET flag to make
+         * sure this attribute is shown. */
+        if (use_default) {
+            /* attr deleted or we are using the default value */
+            config->config_flags &= ~CONFIG_FLAG_PREVIOUSLY_SET;
+        } else {
+            /* attr set explicitly */
+            config->config_flags |= CONFIG_FLAG_PREVIOUSLY_SET;
+        }
+    }
+
+    return retval;
+}
+
+static void
+split_bdb_config_entry(struct ldbminfo *li, Slapi_Entry *ldbm_conf_e,Slapi_Entry *bdb_conf_e, config_info *config_array, Slapi_Mods *smods)
+{
+    Slapi_Attr *attr = NULL;
+
+    for (slapi_entry_first_attr(ldbm_conf_e, &attr); attr; slapi_entry_next_attr(ldbm_conf_e, attr, &attr)) {
+        char *attr_name = NULL;
+        Slapi_Value *sval = NULL;
+
+        slapi_attr_get_type(attr, &attr_name);
+
+        /* There are some attributes that we don't care about, like objectclass. */
+        if (bdb_config_ignored_attr(attr_name)) {
+            continue;
+        }
+        if (NULL == config_info_get(config_array, attr_name)) {
+            /* this attr is not bdb specific */
+            continue;
+        }
+        slapi_attr_first_value(attr, &sval);
+        slapi_entry_add_string(bdb_conf_e, attr_name, slapi_value_get_string(sval));
+        slapi_mods_add(smods, LDAP_MOD_DELETE, attr_name, 0, NULL);
+    }
+}
+
+static int
+parse_bdb_config_entry(struct ldbminfo *li, Slapi_Entry *e, config_info *config_array)
+{
+    Slapi_Attr *attr = NULL;
+
+    for (slapi_entry_first_attr(e, &attr); attr; slapi_entry_next_attr(e, attr, &attr)) {
+        char *attr_name = NULL;
+        Slapi_Value *sval = NULL;
+        struct berval *bval;
+        char err_buf[SLAPI_DSE_RETURNTEXT_SIZE];
+
+        slapi_attr_get_type(attr, &attr_name);
+
+        /* There are some attributes that we don't care about, like objectclass. */
+        if (bdb_config_ignored_attr(attr_name)) {
+            continue;
+        }
+        slapi_attr_first_value(attr, &sval);
+        bval = (struct berval *)slapi_value_get_berval(sval);
+
+        if (bdb_config_set(li, attr_name, config_array, bval, err_buf, CONFIG_PHASE_STARTUP, 1 /* apply */, LDAP_MOD_REPLACE) != LDAP_SUCCESS) {
+            slapi_log_err(SLAPI_LOG_ERR, "parse_bdb_config_entry", "Error with config attribute %s : %s\n", attr_name, err_buf);
+            return 1;
+        }
+    }
+    return 0;
+}
+
+/* helper for deleting mods (we do not want to be applied) from the mods array */
+static void
+mod_free(LDAPMod *mod)
+{
+    ber_bvecfree(mod->mod_bvalues);
+    slapi_ch_free((void **)&(mod->mod_type));
+    slapi_ch_free((void **)&mod);
+}
+
+/*
+ * Returns:
+ *   SLAPI_DSE_CALLBACK_ERROR on failure
+ *   SLAPI_DSE_CALLBACK_OK on success
+ */
+int
+bdb_config_modify_entry_callback(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *e, int *returncode, char *returntext, void *arg)
+{
+    int i;
+    char *attr_name;
+    LDAPMod **mods;
+    int rc = LDAP_SUCCESS;
+    int apply_mod = 0;
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int reapply_mods = 0;
+    int idx = 0;
+
+    /* This lock is probably way too conservative, but we don't expect much
+     * contention for it. */
+    PR_Lock(li->li_config_mutex);
+
+    slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
+
+    returntext[0] = '\0';
+
+    /*
+     * First pass: set apply mods to 0 so only input validation will be done;
+     * 2nd pass: set apply mods to 1 to apply changes to internal storage
+     */
+    for (apply_mod = 0; apply_mod <= 1 && LDAP_SUCCESS == rc; apply_mod++) {
+        for (i = 0; mods && mods[i] && LDAP_SUCCESS == rc; i++) {
+            attr_name = mods[i]->mod_type;
+
+            /* There are some attributes that we don't care about, like modifiersname. */
+            if (bdb_config_ignored_attr(attr_name)) {
+                if (apply_mod) {
+                    Slapi_Attr *origattr = NULL;
+                    Slapi_ValueSet *origvalues = NULL;
+                    mods[idx++] = mods[i];
+                    /* we also need to restore the entryAfter e to its original
+                       state, because the dse code will attempt to reapply
+                       the mods again */
+                    slapi_entry_attr_find(entryBefore, attr_name, &origattr);
+                    if (NULL != origattr) {
+                        slapi_attr_get_valueset(origattr, &origvalues);
+                        if (NULL != origvalues) {
+                            slapi_entry_add_valueset(e, attr_name, origvalues);
+                            slapi_valueset_free(origvalues);
+                        }
+                    }
+                    reapply_mods = 1; /* there is at least one mod we removed */
+                }
+                continue;
+            }
+
+            /* when deleting a value, and this is the last or only value, set
+               the config param to its default value
+               when adding a value, if the value is set to its default value, replace
+               it with the new value - otherwise, if it is single valued, reject the
+               operation with TYPE_OR_VALUE_EXISTS */
+            /* This assumes there is only one bval for this mod. */
+            rc = bdb_config_set((void *)li, attr_name, bdb_config_param,
+                                 (mods[i]->mod_bvalues == NULL) ? NULL
+                                                                : mods[i]->mod_bvalues[0],
+                                 returntext,
+                                 ((li->li_flags & LI_FORCE_MOD_CONFIG) ? CONFIG_PHASE_INTERNAL : CONFIG_PHASE_RUNNING),
+                                 apply_mod, mods[i]->mod_op);
+            if (apply_mod) {
+                mod_free(mods[i]);
+                mods[i] = NULL;
+            }
+        }
+    }
+
+    PR_Unlock(li->li_config_mutex);
+
+    if (reapply_mods) {
+        mods[idx] = NULL;
+        slapi_pblock_set(pb, SLAPI_DSE_REAPPLY_MODS, &reapply_mods);
+    }
+
+    *returncode = rc;
+    if (LDAP_SUCCESS == rc) {
+        return SLAPI_DSE_CALLBACK_OK;
+    } else {
+        return SLAPI_DSE_CALLBACK_ERROR;
+    }
+}
+
+
+/* This function is used to set config attributes. It can be used as a
+ * shortcut to doing an internal modify operation on the config DSE.
+ */
+int
+bdb_config_internal_set(struct ldbminfo *li, char *attrname, char *value)
+{
+    char err_buf[SLAPI_DSE_RETURNTEXT_SIZE];
+    struct berval bval;
+
+    bval.bv_val = value;
+    bval.bv_len = strlen(value);
+
+    if (bdb_config_set((void *)li, attrname, bdb_config_param, &bval,
+                        err_buf, CONFIG_PHASE_INTERNAL, 1 /* apply */,
+                        LDAP_MOD_REPLACE) != LDAP_SUCCESS) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_config_internal_set", "Error setting instance config attr %s to %s: %s\n",
+                      attrname, value, err_buf);
+        exit(1);
+    }
+    return LDAP_SUCCESS;
+}
+
+void
+bdb_public_config_get(struct ldbminfo *li, char *attrname, char *value)
+{
+    config_info *config = config_info_get(bdb_config_param, attrname);
+    if (NULL == config) {
+        slapi_log_err(SLAPI_LOG_CONFIG, "bdb_public_config_get", "Unknown config attribute %s\n", attrname);
+        value[0] = '\0';
+    } else {
+        bdb_config_get(li, config, value);
+    }
+}
+int
+bdb_public_config_set(struct ldbminfo *li, char *attrname, int apply_mod, int mod_op, int phase, char *value)
+{
+    char err_buf[SLAPI_DSE_RETURNTEXT_SIZE];
+    struct berval bval;
+    int rc = LDAP_SUCCESS;
+
+    if (!value) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_public_internal_set", "Error: no value for config attr: %s\n",
+                      attrname);
+        return -1;
+    }
+    bval.bv_val = value;
+    bval.bv_len = strlen(value);
+
+    rc = bdb_config_set((void *)li, attrname, bdb_config_param, &bval,
+                        err_buf, phase, apply_mod,
+                        mod_op);
+    if (rc != LDAP_SUCCESS) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_public_config_set", "Error setting instance config attr %s to %s: %s\n",
+                      attrname, value, err_buf);
+    }
+    return rc;
+}
+
+void
+bdb_set_env_debugging(DB_ENV *pEnv, bdb_config *conf)
+{
+    pEnv->set_errpfx(pEnv, "ns-slapd");
+    if (conf->bdb_debug_verbose) {
+        pEnv->set_verbose(pEnv, DB_VERB_DEADLOCK, 1); /* 1 means on */
+        pEnv->set_verbose(pEnv, DB_VERB_RECOVERY, 1); /* 1 means on */
+        pEnv->set_verbose(pEnv, DB_VERB_WAITSFOR, 1); /* 1 means on */
+    }
+    if (conf->bdb_debug) {
+        pEnv->set_errcall(pEnv, dblayer_log_print);
+    }
+}

+ 3405 - 0
ldap/servers/slapd/back-ldbm/db-bdb/bdb_import.c

@@ -0,0 +1,3405 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (C) 2019 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+/*
+ * the "new" ("deluxe") backend import code
+ *
+ * please make sure you use 4-space indentation on this file.
+ */
+
+#include "bdb_layer.h"
+#include "../vlv_srch.h"
+#include "../import.h"
+
+#define ERR_IMPORT_ABORTED -23
+#define NEED_DN_NORM -24
+#define NEED_DN_NORM_SP -25
+#define NEED_DN_NORM_BT -26
+
+static char *sourcefile = "bdb_import.c";
+
+static int bdb_ancestorid_create_index(backend *be, ImportJob *job);
+static int bdb_ancestorid_default_create_index(backend *be, ImportJob *job);
+static int bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job);
+
+/* Start of definitions for a simple cache using a hash table */
+
+typedef struct id2idl
+{
+    ID keyid;
+    IDList *idl;
+    struct id2idl *next;
+} id2idl;
+
+static void id2idl_free(id2idl **ididl);
+static int id2idl_same_key(const void *ididl, const void *k);
+
+typedef Hashtable id2idl_hash;
+
+#define id2idl_new_hash(size) new_hash(size, HASHLOC(id2idl, next), NULL, id2idl_same_key)
+#define id2idl_hash_lookup(ht, key, he) find_hash(ht, key, sizeof(ID), (void **)(he))
+#define id2idl_hash_add(ht, key, he, alt) add_hash(ht, key, sizeof(ID), he, (void **)(alt))
+#define id2idl_hash_remove(ht, key) remove_hash(ht, key, sizeof(ID))
+
+static void id2idl_hash_destroy(id2idl_hash *ht);
+/* End of definitions for a simple cache using a hash table */
+
+static int bdb_parentid(backend *be, DB_TXN *txn, ID id, ID *ppid);
+static int check_cache(id2idl_hash *ht);
+static IDList *idl_union_allids(backend *be, struct attrinfo *ai, IDList *a, IDList *b);
+
+/********** routines to manipulate the entry fifo **********/
+
+/* this is pretty bogus -- could be a HUGE amount of memory */
+/* Not anymore with the Import Queue Adaptative Algorithm (Regulation) */
+#define MAX_FIFO_SIZE 8000
+
+static int
+import_fifo_init(ImportJob *job)
+{
+    ldbm_instance *inst = job->inst;
+
+    /* Work out how big the entry fifo can be */
+    if (inst->inst_cache.c_maxentries > 0)
+        job->fifo.size = inst->inst_cache.c_maxentries;
+    else
+        job->fifo.size = inst->inst_cache.c_maxsize / 1024; /* guess */
+
+    /* byte limit that should be respected to avoid memory starvation */
+    /* Rather than cachesize * .8, we set it to cachesize for clarity */
+    job->fifo.bsize = inst->inst_cache.c_maxsize;
+
+    job->fifo.c_bsize = 0;
+
+    if (job->fifo.size > MAX_FIFO_SIZE)
+        job->fifo.size = MAX_FIFO_SIZE;
+    /* has to be at least 1 or 2, and anything less than about 100 destroys
+     * the point of doing all this optimization in the first place. */
+    if (job->fifo.size < 100)
+        job->fifo.size = 100;
+
+    /* Get memory for the entry fifo */
+    /* This is used to keep a ref'ed pointer to the last <cachesize>
+     * processed entries */
+    PR_ASSERT(NULL == job->fifo.item);
+    job->fifo.item = (FifoItem *)slapi_ch_calloc(job->fifo.size,
+                                                 sizeof(FifoItem));
+    if (NULL == job->fifo.item) {
+        /* Memory allocation error */
+        return -1;
+    }
+    return 0;
+}
+
+/*
+ * import_fifo_validate_capacity_or_expand
+ *
+ * This is used to check if the capacity of the fifo is able to accomodate
+ * the entry of the size entrysize. If it is enable to hold the entry the
+ * fifo buffer is automatically expanded.
+ *
+ * \param job The ImportJob queue
+ * \param entrysize The size to check for
+ *
+ * \return int: If able to hold the entry, returns 0. If unable to, but resize was sucessful, so now able to hold the entry, 0. If unable to hold the entry and unable to resize, 1.
+ */
+int
+import_fifo_validate_capacity_or_expand(ImportJob *job, size_t entrysize)
+{
+    int result = 1;
+    /* We shoot for four times as much to start with. */
+    uint64_t request = entrysize * 4;
+    util_cachesize_result sane;
+
+    if (entrysize > job->fifo.bsize) {
+        /* Check the amount of memory on the system */
+        slapi_pal_meminfo *mi = spal_meminfo_get();
+        sane = util_is_cachesize_sane(mi, &request);
+        spal_meminfo_destroy(mi);
+        if (sane == UTIL_CACHESIZE_REDUCED && entrysize <= request) {
+            /* Did the amount cachesize set still exceed entrysize? It'll do ... */
+            job->fifo.bsize = request;
+            result = 0;
+        } else if (sane != UTIL_CACHESIZE_VALID) {
+            /* Can't allocate! No!!! */
+            result = 1;
+        } else {
+            /* Our request was okay, go ahead .... */
+            job->fifo.bsize = request;
+            result = 0;
+        }
+    } else {
+        result = 0;
+    }
+    return result;
+}
+
+FifoItem *
+import_fifo_fetch(ImportJob *job, ID id, int worker)
+{
+    int idx = id % job->fifo.size;
+    FifoItem *fi;
+
+    if (job->fifo.item) {
+        fi = &(job->fifo.item[idx]);
+    } else {
+        return NULL;
+    }
+    if (fi->entry) {
+        if (worker) {
+            if (fi->bad) {
+                if (fi->bad == FIFOITEM_BAD) {
+                    fi->bad = FIFOITEM_BAD_PRINTED;
+                    if (!(job->flags & FLAG_UPGRADEDNFORMAT_V1)) {
+                        import_log_notice(job, SLAPI_LOG_WARNING, "import_fifo_fetch",
+                                          "Bad entry: ID %d", id);
+                    }
+                }
+                return NULL;
+            }
+            PR_ASSERT(fi->entry->ep_refcnt > 0);
+        }
+    }
+    return fi;
+}
+
+static void
+import_fifo_destroy(ImportJob *job)
+{
+    /* Free any entries in the fifo first */
+    struct backentry *be = NULL;
+    size_t i = 0;
+
+    for (i = 0; i < job->fifo.size; i++) {
+        be = job->fifo.item[i].entry;
+        backentry_free(&be);
+        job->fifo.item[i].entry = NULL;
+        job->fifo.item[i].filename = NULL;
+    }
+    slapi_ch_free((void **)&job->fifo.item);
+    job->fifo.item = NULL;
+}
+
+
+/********** logging stuff **********/
+
+#define LOG_BUFFER 512
+
+/* this changes the 'nsTaskStatus' value, which is transient (anything logged
+ * here wipes out any previous status)
+ */
+static void
+import_log_status_start(ImportJob *job)
+{
+    if (!job->task_status)
+        job->task_status = (char *)slapi_ch_malloc(10 * LOG_BUFFER);
+    if (!job->task_status)
+        return; /* out of memory? */
+
+    job->task_status[0] = 0;
+}
+
+static void
+import_log_status_add_line(ImportJob *job, char *format, ...)
+{
+    va_list ap;
+    int len = 0;
+
+    if (!job->task_status)
+        return;
+    len = strlen(job->task_status);
+    if (len + 5 > (10 * LOG_BUFFER))
+        return; /* no room */
+
+    if (job->task_status[0])
+        strcat(job->task_status, "\n");
+
+    va_start(ap, format);
+    PR_vsnprintf(job->task_status + len, (10 * LOG_BUFFER) - len, format, ap);
+    va_end(ap);
+}
+
+static void
+import_log_status_done(ImportJob *job)
+{
+    if (job->task) {
+        slapi_task_log_status(job->task, "%s", job->task_status);
+    }
+}
+
+static void
+import_task_destroy(Slapi_Task *task)
+{
+    ImportJob *job = (ImportJob *)slapi_task_get_data(task);
+
+    if (!job) {
+        return;
+    }
+
+    while (task->task_state == SLAPI_TASK_RUNNING) {
+        /* wait for the job to finish before freeing it */
+        DS_Sleep(PR_SecondsToInterval(1));
+    }
+    if (job->task_status) {
+        slapi_ch_free((void **)&job->task_status);
+        job->task_status = NULL;
+    }
+    FREE(job);
+    slapi_task_set_data(task, NULL);
+}
+
+static void
+import_task_abort(Slapi_Task *task)
+{
+    ImportJob *job;
+
+    /* don't log anything from here, because we're still holding the
+     * DSE lock for modify...
+     */
+
+    if (slapi_task_get_state(task) == SLAPI_TASK_FINISHED) {
+        /* too late */
+    }
+
+    /*
+     * Race condition.
+     * If the import thread happens to finish right now we're in trouble
+     * because it will free the job.
+     */
+
+    job = (ImportJob *)slapi_task_get_data(task);
+
+    import_abort_all(job, 0);
+    while (slapi_task_get_state(task) != SLAPI_TASK_FINISHED)
+        DS_Sleep(PR_MillisecondsToInterval(100));
+}
+
+
+/********** helper functions for importing **********/
+
+/*
+ * Get parentid of an id by reading the operational attr from id2entry.
+ */
+static int
+bdb_parentid(backend *be, DB_TXN *txn, ID id, ID *ppid)
+{
+    int ret = 0;
+    DB *db = NULL;
+    DBT key = {0};
+    DBT data = {0};
+    ID stored_id;
+    char *p;
+
+    /* Open the id2entry file */
+    ret = dblayer_get_id2entry(be, &db);
+    if (ret != 0) {
+        ldbm_nasty("bdb_parentid", sourcefile, 13100, ret);
+        goto out;
+    }
+
+    /* Initialize key and data DBTs */
+    id_internal_to_stored(id, (char *)&stored_id);
+    key.data = (char *)&stored_id;
+    key.size = sizeof(stored_id);
+    key.flags = DB_DBT_USERMEM;
+    data.flags = DB_DBT_MALLOC;
+
+    /* Read id2entry */
+    ret = db->get(db, txn, &key, &data, 0);
+    if (ret != 0) {
+        ldbm_nasty("bdb_parentid", sourcefile, 13110, ret);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_parentid",
+                      "Unable to find entry id [" ID_FMT "] (original [" ID_FMT "])"
+                      " in id2entry\n",
+                      stored_id, id);
+        goto out;
+    }
+
+/* Extract the parentid value */
+#define PARENTID_STR "\nparentid:"
+    p = strstr(data.data, PARENTID_STR);
+    if (p == NULL) {
+        *ppid = NOID;
+        goto out;
+    }
+    *ppid = strtoul(p + strlen(PARENTID_STR), NULL, 10);
+
+out:
+    /* Free the entry value */
+    slapi_ch_free(&(data.data));
+
+    /* Release the id2entry file */
+    if (db != NULL) {
+        dblayer_release_id2entry(be, db);
+    }
+    return ret;
+}
+
+static void
+id2idl_free(id2idl **ididl)
+{
+    idl_free(&((*ididl)->idl));
+    slapi_ch_free((void **)ididl);
+}
+
+static int
+id2idl_same_key(const void *ididl, const void *k)
+{
+    return (((id2idl *)ididl)->keyid == *(ID *)k);
+}
+
+static int
+check_cache(id2idl_hash *ht)
+{
+    id2idl *e;
+    u_long i, found = 0;
+    int ret = 0;
+
+    if (ht == NULL)
+        return 0;
+
+    for (i = 0; i < ht->size; i++) {
+        e = (id2idl *)ht->slot[i];
+        while (e) {
+            found++;
+            e = e->next;
+        }
+    }
+
+    if (found > 0) {
+        slapi_log_err(SLAPI_LOG_ERR, "check_cache",
+                      "parentid index is not complete (%lu extra keys in ancestorid cache)\n", found);
+        ret = -1;
+    }
+
+    return ret;
+}
+
+static void
+id2idl_hash_destroy(id2idl_hash *ht)
+{
+    u_long i;
+    id2idl *e, *next;
+
+    if (ht == NULL)
+        return;
+
+    for (i = 0; i < ht->size; i++) {
+        e = (id2idl *)ht->slot[i];
+        while (e) {
+            next = e->next;
+            id2idl_free(&e);
+            e = next;
+        }
+    }
+    slapi_ch_free((void **)&ht);
+}
+
+/*
+ * idl_union_allids - return a union b
+ * takes attr index allids setting into account
+ */
+static IDList *
+idl_union_allids(backend *be, struct attrinfo *ai, IDList *a, IDList *b)
+{
+    if (!idl_get_idl_new()) {
+        if (a != NULL && b != NULL) {
+            if (ALLIDS(a) || ALLIDS(b) ||
+                (IDL_NIDS(a) + IDL_NIDS(b) > idl_get_allidslimit(ai, 0))) {
+                return (idl_allids(be));
+            }
+        }
+    }
+    return idl_union(be, a, b);
+}
+static int
+bdb_get_nonleaf_ids(backend *be, DB_TXN *txn, IDList **idl, ImportJob *job)
+{
+    int ret = 0;
+    DB *db = NULL;
+    DBC *dbc = NULL;
+    DBT key = {0};
+    DBT data = {0};
+    struct attrinfo *ai = NULL;
+    IDList *nodes = NULL;
+    ID id;
+    int started_progress_logging = 0;
+    int key_count = 0;
+
+    /* Open the parentid index */
+    ainfo_get(be, LDBM_PARENTID_STR, &ai);
+
+    /* Open the parentid index file */
+    ret = dblayer_get_index_file(be, ai, &db, DBOPEN_CREATE);
+    if (ret != 0) {
+        ldbm_nasty("bdb_get_nonleaf_ids", sourcefile, 13010, ret);
+        goto out;
+    }
+
+    /* Get a cursor so we can walk through the parentid */
+    ret = db->cursor(db, txn, &dbc, 0);
+    if (ret != 0) {
+        ldbm_nasty("bdb_get_nonleaf_ids", sourcefile, 13020, ret);
+        goto out;
+    }
+    import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids", "Gathering ancestorid non-leaf IDs...");
+    /* For each key which is an equality key */
+    do {
+        ret = dbc->c_get(dbc, &key, &data, DB_NEXT_NODUP);
+        if ((ret == 0) && (*(char *)key.data == EQ_PREFIX)) {
+            id = (ID)strtoul((char *)key.data + 1, NULL, 10);
+            idl_insert(&nodes, id);
+        }
+        key_count++;
+        if (!(key_count % PROGRESS_INTERVAL)) {
+            import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
+                              "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
+                              (key_count * 100 / job->numsubordinates), key_count);
+            started_progress_logging = 1;
+        }
+    } while (ret == 0 && !(job->flags & FLAG_ABORT));
+
+    if (started_progress_logging) {
+        /* finish what we started logging */
+        import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
+                          "Gathering ancestorid non-leaf IDs: processed %d%% (ID count %d)",
+                          (key_count * 100 / job->numsubordinates), key_count);
+    }
+    import_log_notice(job, SLAPI_LOG_INFO, "bdb_get_nonleaf_ids",
+                      "Finished gathering ancestorid non-leaf IDs.");
+    /* Check for success */
+    if (ret == DB_NOTFOUND)
+        ret = 0;
+    if (ret != 0)
+        ldbm_nasty("bdb_get_nonleaf_ids", sourcefile, 13030, ret);
+
+out:
+    /* Close the cursor */
+    if (dbc != NULL) {
+        if (ret == 0) {
+            ret = dbc->c_close(dbc);
+            if (ret != 0)
+                ldbm_nasty("bdb_get_nonleaf_ids", sourcefile, 13040, ret);
+        } else {
+            (void)dbc->c_close(dbc);
+        }
+    }
+
+    /* Release the parentid file */
+    if (db != NULL) {
+        dblayer_release_index_file(be, ai, db);
+    }
+
+    /* Return the idlist */
+    if (ret == 0) {
+        *idl = nodes;
+        slapi_log_err(SLAPI_LOG_TRACE, "bdb_get_nonleaf_ids", "Found %lu nodes for ancestorid\n",
+                      (u_long)IDL_NIDS(nodes));
+    } else {
+        idl_free(&nodes);
+        *idl = NULL;
+    }
+
+    return ret;
+}
+/*
+ * XXX: This function creates ancestorid index, which is a sort of hack.
+ *      This function handles idl directly,
+ *      which should have been implemented in the idl file(s).
+ *      When the idl code would be updated in the future,
+ *      this function may also get affected.
+ *      (see also bug#: 605535)
+ *
+ * Construct the ancestorid index. Requirements:
+ * - The backend is read only.
+ * - The parentid index is accurate.
+ * - Non-leaf entries have IDs less than their descendants
+ *   (guaranteed after a database import but not after a subtree move)
+ *
+ */
+static int
+bdb_ancestorid_create_index(backend *be, ImportJob *job)
+{
+    return (idl_get_idl_new()) ? bdb_ancestorid_new_idl_create_index(be, job) : bdb_ancestorid_default_create_index(be, job);
+}
+
+/*
+ * Create the ancestorid index.  This version is safe to
+ * use whichever IDL mode is active.  However, it may be
+ * quite a bit slower than bdb_ancestorid_new_idl_create_index()
+ * when the new mode is used, particularly with large databases.
+ */
+static int
+bdb_ancestorid_default_create_index(backend *be, ImportJob *job)
+{
+    int key_count = 0;
+    int ret = 0;
+    DB *db_pid = NULL;
+    DB *db_aid = NULL;
+    DBT key = {0};
+    DB_TXN *txn = NULL;
+    struct attrinfo *ai_pid = NULL;
+    struct attrinfo *ai_aid = NULL;
+    char keybuf[24];
+    IDList *nodes = NULL;
+    IDList *children = NULL, *descendants = NULL;
+    NIDS nids;
+    ID id, parentid;
+    id2idl_hash *ht = NULL;
+    id2idl *ididl;
+    int started_progress_logging = 0;
+
+    /*
+     * We need to iterate depth-first through the non-leaf nodes
+     * in the tree amassing an idlist of descendant ids for each node.
+     * We would prefer to go through the parentid keys just once from
+     * highest id to lowest id but the btree ordering is by string
+     * rather than number. So we go through the parentid keys in btree
+     * order first of all to create an idlist of all the non-leaf nodes.
+     * Then we can use the idlist to iterate through parentid in the
+     * correct order.
+     */
+
+    /* Get the non-leaf node IDs */
+    ret = bdb_get_nonleaf_ids(be, txn, &nodes, job);
+    if (ret != 0)
+        return ret;
+
+    /* Get the ancestorid index */
+    ainfo_get(be, LDBM_ANCESTORID_STR, &ai_aid);
+
+    /* Prevent any other use of the index */
+    ai_aid->ai_indexmask |= INDEX_OFFLINE;
+
+    /* Open the ancestorid index file */
+    ret = dblayer_get_index_file(be, ai_aid, &db_aid, DBOPEN_CREATE);
+    if (ret != 0) {
+        ldbm_nasty("bdb_ancestorid_default_create_index", sourcefile, 13050, ret);
+        goto out;
+    }
+
+    /* Maybe nothing to do */
+    if (nodes == NULL || nodes->b_nids == 0) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_ancestorid_default_create_index",
+                      "Nothing to do to build ancestorid index\n");
+        goto out;
+    }
+
+    /* Create an ancestorid cache */
+    ht = id2idl_new_hash(nodes->b_nids);
+
+    /* Get the parentid index */
+    ainfo_get(be, LDBM_PARENTID_STR, &ai_pid);
+
+    /* Open the parentid index file */
+    ret = dblayer_get_index_file(be, ai_pid, &db_pid, DBOPEN_CREATE);
+    if (ret != 0) {
+        ldbm_nasty("bdb_ancestorid_default_create_index", sourcefile, 13060, ret);
+        goto out;
+    }
+
+    /* Initialize key DBT */
+    key.data = keybuf;
+    key.ulen = sizeof(keybuf);
+    key.flags = DB_DBT_USERMEM;
+
+    import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+                      "Creating ancestorid index (old idl)...");
+    /* Iterate from highest to lowest ID */
+    nids = nodes->b_nids;
+    do {
+
+        nids--;
+        id = nodes->b_ids[nids];
+
+        /* Get immediate children from parentid index */
+        key.size = PR_snprintf(key.data, key.ulen, "%c%lu",
+                               EQ_PREFIX, (u_long)id);
+        key.size++; /* include the null terminator */
+        ret = NEW_IDL_NO_ALLID;
+        children = idl_fetch(be, db_pid, &key, txn, ai_pid, &ret);
+        if (ret != 0) {
+            ldbm_nasty("bdb_ancestorid_default_create_index", sourcefile, 13070, ret);
+            break;
+        }
+
+        /* check if we need to abort */
+        if (job->flags & FLAG_ABORT) {
+            import_log_notice(job, SLAPI_LOG_ERR, "bdb_ancestorid_default_create_index",
+                              "ancestorid creation aborted.");
+            ret = -1;
+            break;
+        }
+
+        key_count++;
+        if (!(key_count % PROGRESS_INTERVAL)) {
+            import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+                              "Creating ancestorid index: processed %d%% (ID count %d)",
+                              (key_count * 100 / job->numsubordinates), key_count);
+            started_progress_logging = 1;
+        }
+
+        /* Insert into ancestorid for this node */
+        if (id2idl_hash_lookup(ht, &id, &ididl)) {
+            descendants = idl_union_allids(be, ai_aid, ididl->idl, children);
+            idl_free(&children);
+            if (id2idl_hash_remove(ht, &id) == 0) {
+                slapi_log_err(SLAPI_LOG_ERR, "bdb_ancestorid_default_create_index",
+                              "id2idl_hash_remove() failed\n");
+            } else {
+                id2idl_free(&ididl);
+            }
+        } else {
+            descendants = children;
+        }
+        ret = idl_store_block(be, db_aid, &key, descendants, txn, ai_aid);
+        if (ret != 0)
+            break;
+
+        /* Get parentid for this entry */
+        ret = bdb_parentid(be, txn, id, &parentid);
+        if (ret != 0) {
+            idl_free(&descendants);
+            break;
+        }
+
+        /* A suffix entry does not have a parent */
+        if (parentid == NOID) {
+            idl_free(&descendants);
+            continue;
+        }
+
+        /* Insert into ancestorid for this node's parent */
+        if (id2idl_hash_lookup(ht, &parentid, &ididl)) {
+            IDList *idl = idl_union_allids(be, ai_aid, ididl->idl, descendants);
+            idl_free(&descendants);
+            idl_free(&(ididl->idl));
+            ididl->idl = idl;
+        } else {
+            ididl = (id2idl *)slapi_ch_calloc(1, sizeof(id2idl));
+            ididl->keyid = parentid;
+            ididl->idl = descendants;
+            if (id2idl_hash_add(ht, &parentid, ididl, NULL) == 0) {
+                slapi_log_err(SLAPI_LOG_ERR, "bdb_ancestorid_default_create_index ",
+                              "id2idl_hash_add failed\n");
+            }
+        }
+
+    } while (nids > 0);
+
+    if (ret != 0) {
+        goto out;
+    }
+
+    /* We're expecting the cache to be empty */
+    ret = check_cache(ht);
+
+out:
+
+    /* Destroy the cache */
+    id2idl_hash_destroy(ht);
+
+    /* Free any leftover idlists */
+    idl_free(&nodes);
+
+    /* Release the parentid file */
+    if (db_pid != NULL) {
+        dblayer_release_index_file(be, ai_pid, db_pid);
+    }
+
+    /* Release the ancestorid file */
+    if (db_aid != NULL) {
+        dblayer_release_index_file(be, ai_aid, db_aid);
+    }
+
+    /* Enable the index */
+    if (ret == 0) {
+        if (started_progress_logging) {
+            /* finish what we started logging */
+            import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+                              "Creating ancestorid index: processed %d%% (ID count %d)",
+                              (key_count * 100 / job->numsubordinates), key_count);
+        }
+        import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_default_create_index",
+                          "Created ancestorid index (old idl).");
+        ai_aid->ai_indexmask &= ~INDEX_OFFLINE;
+    }
+
+    return ret;
+}
+
+/*
+ * Create the ancestorid index.  This version expects to use
+ * idl_new_store_block() and should be used when idl_new != 0.
+ * It has lower overhead and can be faster than
+ * bdb_ancestorid_default_create_index(), particularly on
+ * large databases.  Cf. bug 469800.
+ */
+static int
+bdb_ancestorid_new_idl_create_index(backend *be, ImportJob *job)
+{
+    int key_count = 0;
+    int ret = 0;
+    DB *db_pid = NULL;
+    DB *db_aid = NULL;
+    DBT key = {0};
+    DB_TXN *txn = NULL;
+    struct attrinfo *ai_pid = NULL;
+    struct attrinfo *ai_aid = NULL;
+    char keybuf[24];
+    IDList *nodes = NULL;
+    IDList *children = NULL;
+    NIDS nids;
+    ID id, parentid;
+    int started_progress_logging = 0;
+
+    /*
+     * We need to iterate depth-first through the non-leaf nodes
+     * in the tree amassing an idlist of descendant ids for each node.
+     * We would prefer to go through the parentid keys just once from
+     * highest id to lowest id but the btree ordering is by string
+     * rather than number. So we go through the parentid keys in btree
+     * order first of all to create an idlist of all the non-leaf nodes.
+     * Then we can use the idlist to iterate through parentid in the
+     * correct order.
+     */
+
+    /* Bail now if we did not get here honestly. */
+    if (!idl_get_idl_new()) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_ancestorid_new_idl_create_index",
+                      "Cannot create ancestorid index.  "
+                      "New IDL version called but idl_new is false!\n");
+        return 1;
+    }
+
+    /* Get the non-leaf node IDs */
+    ret = bdb_get_nonleaf_ids(be, txn, &nodes, job);
+    if (ret != 0)
+        return ret;
+
+    /* Get the ancestorid index */
+    ainfo_get(be, LDBM_ANCESTORID_STR, &ai_aid);
+
+    /* Prevent any other use of the index */
+    ai_aid->ai_indexmask |= INDEX_OFFLINE;
+
+    /* Open the ancestorid index file */
+    ret = dblayer_get_index_file(be, ai_aid, &db_aid, DBOPEN_CREATE);
+    if (ret != 0) {
+        ldbm_nasty("bdb_ancestorid_new_idl_create_index", sourcefile, 13050, ret);
+        goto out;
+    }
+
+    /* Maybe nothing to do */
+    if (nodes == NULL || nodes->b_nids == 0) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_ancestorid_new_idl_create_index",
+                      "Nothing to do to build ancestorid index\n");
+        goto out;
+    }
+
+    /* Get the parentid index */
+    ainfo_get(be, LDBM_PARENTID_STR, &ai_pid);
+
+    /* Open the parentid index file */
+    ret = dblayer_get_index_file(be, ai_pid, &db_pid, DBOPEN_CREATE);
+    if (ret != 0) {
+        ldbm_nasty("bdb_ancestorid_new_idl_create_index", sourcefile, 13060, ret);
+        goto out;
+    }
+
+    /* Initialize key DBT */
+    key.data = keybuf;
+    key.ulen = sizeof(keybuf);
+    key.flags = DB_DBT_USERMEM;
+
+    import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+                      "Creating ancestorid index (new idl)...");
+    /* Iterate from highest to lowest ID */
+    nids = nodes->b_nids;
+    do {
+
+        nids--;
+        id = nodes->b_ids[nids];
+
+        /* Get immediate children from parentid index */
+        key.size = PR_snprintf(key.data, key.ulen, "%c%lu",
+                               EQ_PREFIX, (u_long)id);
+        key.size++; /* include the null terminator */
+        ret = NEW_IDL_NO_ALLID;
+        children = idl_fetch(be, db_pid, &key, txn, ai_pid, &ret);
+        if (ret != 0) {
+            ldbm_nasty("bdb_ancestorid_new_idl_create_index", sourcefile, 13070, ret);
+            break;
+        }
+
+        /* check if we need to abort */
+        if (job->flags & FLAG_ABORT) {
+            import_log_notice(job, SLAPI_LOG_ERR, "bdb_ancestorid_new_idl_create_index",
+                              "ancestorid creation aborted.");
+            ret = -1;
+            break;
+        }
+
+        key_count++;
+        if (!(key_count % PROGRESS_INTERVAL)) {
+            import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+                              "Creating ancestorid index: progress %d%% (ID count %d)",
+                              (key_count * 100 / job->numsubordinates), key_count);
+            started_progress_logging = 1;
+        }
+
+        /* Instead of maintaining a full accounting of IDs in a hashtable
+         * as is done with bdb_ancestorid_default_create_index(), perform
+         * incremental updates straight to the DB with idl_new_store_block()
+         * (used by idl_store_block() when idl_get_idl_new() is true).  This
+         * can be a significant performance improvement with large databases,
+         * where  the overhead of maintaining and copying the lists is very
+         * expensive, particularly when the allids threshold is not being
+         * used to provide any cut off.  Cf. bug 469800.
+         * TEL 20081029 */
+
+        /* Insert into ancestorid for this node */
+        ret = idl_store_block(be, db_aid, &key, children, txn, ai_aid);
+        if (ret != 0) {
+            idl_free(&children);
+            break;
+        }
+
+        /* Get parentid(s) for this entry */
+        while (1) {
+            ret = bdb_parentid(be, txn, id, &parentid);
+            if (ret != 0) {
+                slapi_log_err(SLAPI_LOG_ERR, "bdb_ancestorid_new_idl_create_index",
+                              "Failure: bdb_parentid on node index [" ID_FMT "] of [" ID_FMT "]\n",
+                              nids, nodes->b_nids);
+                idl_free(&children);
+                goto out;
+            }
+
+            /* A suffix entry does not have a parent */
+            if (parentid == NOID) {
+                idl_free(&children);
+                break;
+            }
+
+            /* Reset the key to the parent id */
+            key.size = PR_snprintf(key.data, key.ulen, "%c%lu",
+                                   EQ_PREFIX, (u_long)parentid);
+            key.size++;
+
+            /* Insert into ancestorid for this node's parent */
+            ret = idl_store_block(be, db_aid, &key, children, txn, ai_aid);
+            if (ret != 0) {
+                idl_free(&children);
+                goto out;
+            }
+            id = parentid;
+        }
+    } while (nids > 0);
+
+    if (ret != 0) {
+        goto out;
+    }
+
+out:
+    if (ret == 0) {
+        if (started_progress_logging) {
+            /* finish what we started logging */
+            import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+                              "Creating ancestorid index: processed %d%% (ID count %d)",
+                              (key_count * 100 / job->numsubordinates), key_count);
+        }
+        import_log_notice(job, SLAPI_LOG_INFO, "bdb_ancestorid_new_idl_create_index",
+                          "Created ancestorid index (new idl).");
+    } else {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_ancestorid_new_idl_create_index",
+                      "Failed to create ancestorid index\n");
+    }
+
+    /* Free any leftover idlists */
+    idl_free(&nodes);
+
+    /* Release the parentid file */
+    if (db_pid != NULL) {
+        dblayer_release_index_file(be, ai_pid, db_pid);
+    }
+
+    /* Release the ancestorid file */
+    if (db_aid != NULL) {
+        dblayer_release_index_file(be, ai_aid, db_aid);
+    }
+
+    /* Enable the index */
+    if (ret == 0) {
+        ai_aid->ai_indexmask &= ~INDEX_OFFLINE;
+    }
+
+    return ret;
+}
+/* Update subordinate count in a hint list, given the parent's ID */
+int
+import_subcount_mother_init(import_subcount_stuff *mothers, ID parent_id, size_t count)
+{
+    PR_ASSERT(NULL == PL_HashTableLookup(mothers->hashtable, (void *)((uintptr_t)parent_id)));
+    PL_HashTableAdd(mothers->hashtable, (void *)((uintptr_t)parent_id), (void *)count);
+    return 0;
+}
+
+/* Look for a subordinate count in a hint list, given the parent's ID */
+static int
+import_subcount_mothers_lookup(import_subcount_stuff *mothers,
+                               ID parent_id,
+                               size_t *count)
+{
+    size_t stored_count = 0;
+
+    *count = 0;
+    /* Lookup hash table for ID */
+    stored_count = (size_t)PL_HashTableLookup(mothers->hashtable,
+                                              (void *)((uintptr_t)parent_id));
+    /* If present, return the count found */
+    if (0 != stored_count) {
+        *count = stored_count;
+        return 0;
+    }
+    return -1;
+}
+
+/* Update subordinate count in a hint list, given the parent's ID */
+int
+import_subcount_mother_count(import_subcount_stuff *mothers, ID parent_id)
+{
+    size_t stored_count = 0;
+
+    /* Lookup the hash table for the target ID */
+    stored_count = (size_t)PL_HashTableLookup(mothers->hashtable,
+                                              (void *)((uintptr_t)parent_id));
+    PR_ASSERT(0 != stored_count);
+    /* Increment the count */
+    stored_count++;
+    PL_HashTableAdd(mothers->hashtable, (void *)((uintptr_t)parent_id), (void *)stored_count);
+    return 0;
+}
+
+static int
+import_update_entry_subcount(backend *be, ID parentid, size_t sub_count, int isencrypted)
+{
+    ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
+    int ret = 0;
+    modify_context mc = {0};
+    char value_buffer[22] = {0}; /* enough digits for 2^64 children */
+    struct backentry *e = NULL;
+    int isreplace = 0;
+    char *numsub_str = numsubordinates;
+
+    /* Get hold of the parent */
+    e = id2entry(be, parentid, NULL, &ret);
+    if ((NULL == e) || (0 != ret)) {
+        ldbm_nasty("import_update_entry_subcount", sourcefile, 5, ret);
+        return (0 == ret) ? -1 : ret;
+    }
+    /* Lock it (not really required since we're single-threaded here, but
+     * let's do it so we can reuse the modify routines) */
+    cache_lock_entry(&inst->inst_cache, e);
+    modify_init(&mc, e);
+    mc.attr_encrypt = isencrypted;
+    sprintf(value_buffer, "%lu", (long unsigned int)sub_count);
+    /* If it is a tombstone entry, add tombstonesubordinates instead of
+     * numsubordinates. */
+    if (slapi_entry_flag_is_set(e->ep_entry, SLAPI_ENTRY_FLAG_TOMBSTONE)) {
+        numsub_str = tombstone_numsubordinates;
+    }
+    /* attr numsubordinates/tombstonenumsubordinates could already exist in
+     * the entry, let's check whether it's already there or not */
+    isreplace = (attrlist_find(e->ep_entry->e_attrs, numsub_str) != NULL);
+    {
+        int op = isreplace ? LDAP_MOD_REPLACE : LDAP_MOD_ADD;
+        Slapi_Mods *smods = slapi_mods_new();
+
+        slapi_mods_add(smods, op | LDAP_MOD_BVALUES, numsub_str,
+                       strlen(value_buffer), value_buffer);
+        ret = modify_apply_mods(&mc, smods); /* smods passed in */
+    }
+    if (0 == ret || LDAP_TYPE_OR_VALUE_EXISTS == ret) {
+        /* This will correctly index subordinatecount: */
+        ret = modify_update_all(be, NULL, &mc, NULL);
+        if (0 == ret) {
+            modify_switch_entries(&mc, be);
+        }
+    }
+    /* entry is unlocked and returned to the cache in modify_term */
+    modify_term(&mc, be);
+    return ret;
+}
+struct _import_subcount_trawl_info
+{
+    struct _import_subcount_trawl_info *next;
+    ID id;
+    size_t sub_count;
+};
+typedef struct _import_subcount_trawl_info import_subcount_trawl_info;
+
+static void
+import_subcount_trawl_add(import_subcount_trawl_info **list, ID id)
+{
+    import_subcount_trawl_info *new_info = CALLOC(import_subcount_trawl_info);
+
+    new_info->next = *list;
+    new_info->id = id;
+    *list = new_info;
+}
+
+static int
+import_subcount_trawl(backend *be,
+                      import_subcount_trawl_info *trawl_list,
+                      int isencrypted)
+{
+    ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
+    ID id = 1;
+    int ret = 0;
+    import_subcount_trawl_info *current = NULL;
+    char value_buffer[20]; /* enough digits for 2^64 children */
+
+    /* OK, we do */
+    /* We open id2entry and iterate through it */
+    /* Foreach entry, we check to see if its parentID matches any of the
+     * values in the trawl list . If so, we bump the sub count for that
+     * parent in the list.
+     */
+    while (1) {
+        struct backentry *e = NULL;
+
+        /* Get the next entry */
+        e = id2entry(be, id, NULL, &ret);
+        if ((NULL == e) || (0 != ret)) {
+            if (DB_NOTFOUND == ret) {
+                break;
+            } else {
+                ldbm_nasty("import_subcount_trawl", sourcefile, 8, ret);
+                return ret;
+            }
+        }
+        for (current = trawl_list; current != NULL; current = current->next) {
+            sprintf(value_buffer, "%lu", (u_long)current->id);
+            if (slapi_entry_attr_hasvalue(e->ep_entry, LDBM_PARENTID_STR, value_buffer)) {
+                /* If this entry's parent ID matches one we're trawling for,
+                 * bump its count */
+                current->sub_count++;
+            }
+        }
+        /* Free the entry */
+        CACHE_REMOVE(&inst->inst_cache, e);
+        CACHE_RETURN(&inst->inst_cache, &e);
+        id++;
+    }
+    /* Now update the parent entries from the list */
+    for (current = trawl_list; current != NULL; current = current->next) {
+        /* Update the parent entry with the correctly counted subcount */
+        ret = import_update_entry_subcount(be, current->id,
+                                           current->sub_count, isencrypted);
+        if (0 != ret) {
+            ldbm_nasty("import_subcount_trawl", sourcefile, 10, ret);
+            break;
+        }
+    }
+    return ret;
+}
+
+/*
+ * Function: bdb_update_subordinatecounts
+ *
+ * Returns: Nothing
+ *
+ */
+static int
+bdb_update_subordinatecounts(backend *be, ImportJob *job, DB_TXN *txn)
+{
+    import_subcount_stuff *mothers = job->mothers;
+    int isencrypted = job->encrypt;
+    int started_progress_logging = 0;
+    int key_count = 0;
+    int ret = 0;
+    DB *db = NULL;
+    DBC *dbc = NULL;
+    struct attrinfo *ai = NULL;
+    DBT key = {0};
+    DBT data = {0};
+    import_subcount_trawl_info *trawl_list = NULL;
+
+    /* Open the parentid index */
+    ainfo_get(be, LDBM_PARENTID_STR, &ai);
+
+    /* Open the parentid index file */
+    if ((ret = dblayer_get_index_file(be, ai, &db, DBOPEN_CREATE)) != 0) {
+        ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 67, ret);
+        return (ret);
+    }
+    /* Get a cursor so we can walk through the parentid */
+    ret = db->cursor(db, txn, &dbc, 0);
+    if (ret != 0) {
+        ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 68, ret);
+        dblayer_release_index_file(be, ai, db);
+        return ret;
+    }
+
+    /* Walk along the index */
+    while (1) {
+        size_t sub_count = 0;
+        int found_count = 1;
+        ID parentid = 0;
+
+        /* Foreach key which is an equality key : */
+        data.flags = DB_DBT_MALLOC;
+        key.flags = DB_DBT_MALLOC;
+        ret = dbc->c_get(dbc, &key, &data, DB_NEXT_NODUP);
+        if (NULL != data.data) {
+            slapi_ch_free(&(data.data));
+            data.data = NULL;
+        }
+        if (0 != ret) {
+            if (ret != DB_NOTFOUND) {
+                ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 62, ret);
+            }
+            if (NULL != key.data) {
+                slapi_ch_free(&(key.data));
+                key.data = NULL;
+            }
+            break;
+        }
+        /* check if we need to abort */
+        if (job->flags & FLAG_ABORT) {
+            import_log_notice(job, SLAPI_LOG_ERR, "bdb_update_subordinatecounts",
+                              "numsubordinate generation aborted.");
+            break;
+        }
+        /*
+         * Do an update count
+         */
+        key_count++;
+        if (!(key_count % PROGRESS_INTERVAL)) {
+            import_log_notice(job, SLAPI_LOG_INFO, "bdb_update_subordinatecounts",
+                              "numsubordinate generation: processed %d entries...",
+                              key_count);
+            started_progress_logging = 1;
+        }
+
+        if (*(char *)key.data == EQ_PREFIX) {
+            char *idptr = NULL;
+
+            /* construct the parent's ID from the key */
+            /* Look for the ID in the hint list supplied by the caller */
+            /* If its there, we know the answer already */
+            idptr = (((char *)key.data) + 1);
+            parentid = (ID)atol(idptr);
+            PR_ASSERT(0 != parentid);
+            ret = import_subcount_mothers_lookup(mothers, parentid, &sub_count);
+            if (0 != ret) {
+                IDList *idl = NULL;
+
+                /* If it's not, we need to compute it ourselves: */
+                /* Load the IDL matching the key */
+                key.flags = DB_DBT_REALLOC;
+                ret = NEW_IDL_NO_ALLID;
+                idl = idl_fetch(be, db, &key, NULL, NULL, &ret);
+                if ((NULL == idl) || (0 != ret)) {
+                    ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 4, ret);
+                    dblayer_release_index_file(be, ai, db);
+                    return (0 == ret) ? -1 : ret;
+                }
+                /* The number of IDs in the IDL tells us the number of
+                 * subordinates for the entry */
+                /* Except, the number might be above the allidsthreshold,
+                 * in which case */
+                if (ALLIDS(idl)) {
+                    /* We add this ID to the list for which to trawl */
+                    import_subcount_trawl_add(&trawl_list, parentid);
+                    found_count = 0;
+                } else {
+                    /* We get the count from the IDL */
+                    sub_count = idl->b_nids;
+                }
+                idl_free(&idl);
+            }
+            /* Did we get the count ? */
+            if (found_count) {
+                PR_ASSERT(0 != sub_count);
+                /* If so, update the parent now */
+                import_update_entry_subcount(be, parentid, sub_count, isencrypted);
+            }
+        }
+        if (NULL != key.data) {
+            slapi_ch_free(&(key.data));
+            key.data = NULL;
+        }
+    }
+    if (started_progress_logging) {
+        /* Finish what we started... */
+        import_log_notice(job, SLAPI_LOG_INFO, "bdb_update_subordinatecounts",
+                          "numsubordinate generation: processed %d entries.",
+                          key_count);
+        job->numsubordinates = key_count;
+    }
+
+    ret = dbc->c_close(dbc);
+    if (0 != ret) {
+        ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 6, ret);
+    }
+    dblayer_release_index_file(be, ai, db);
+
+    /* Now see if we need to go trawling through id2entry for the info
+     * we need */
+    if (NULL != trawl_list) {
+        ret = import_subcount_trawl(be, trawl_list, isencrypted);
+        if (0 != ret) {
+            ldbm_nasty("bdb_update_subordinatecounts", sourcefile, 7, ret);
+        }
+    }
+    return (ret);
+}
+
+/* Function used to gather a list of indexed attrs */
+static int
+import_attr_callback(void *node, void *param)
+{
+    ImportJob *job = (ImportJob *)param;
+    struct attrinfo *a = (struct attrinfo *)node;
+
+    if (job->flags & FLAG_DRYRUN) { /* dryrun; we don't need the workers */
+        return 0;
+    }
+    if (job->flags & (FLAG_UPGRADEDNFORMAT | FLAG_UPGRADEDNFORMAT_V1)) {
+        /* Bring up import workers just for indexes having DN syntax
+         * attribute type. (except entrydn -- taken care below) */
+        int rc = 0;
+        Slapi_Attr attr = {0};
+
+        /*
+         * Treat cn and ou specially.  Bring up the import workers for
+         * cn and ou even though they are not DN syntax attribute.
+         * This is done because they have some exceptional case to store
+         * DN format in the admin entries such as UserPreferences.
+         */
+        if ((0 == PL_strcasecmp("cn", a->ai_type)) ||
+            (0 == PL_strcasecmp("commonname", a->ai_type)) ||
+            (0 == PL_strcasecmp("ou", a->ai_type)) ||
+            (0 == PL_strcasecmp("organizationalUnit", a->ai_type))) {
+            ;
+        } else {
+            slapi_attr_init(&attr, a->ai_type);
+            rc = slapi_attr_is_dn_syntax_attr(&attr);
+            attr_done(&attr);
+            if (0 == rc) {
+                return 0;
+            }
+        }
+    }
+
+    /* OK, so we now have hold of the attribute structure and the job info,
+     * let's see what we have.  Remember that although this function is called
+     * many times, all these calls are in the context of a single thread, so we
+     * don't need to worry about protecting the data in the job structure.
+     */
+
+    /* We need to specifically exclude the (entrydn, entryrdn) & parentid &
+     * ancestorid indexes because we build those in the foreman thread.
+     */
+    if (IS_INDEXED(a->ai_indexmask) &&
+        (strcasecmp(a->ai_type, LDBM_ENTRYDN_STR) != 0) &&
+        (strcasecmp(a->ai_type, LDBM_ENTRYRDN_STR) != 0) &&
+        (strcasecmp(a->ai_type, LDBM_PARENTID_STR) != 0) &&
+        (strcasecmp(a->ai_type, LDBM_ANCESTORID_STR) != 0) &&
+        (strcasecmp(a->ai_type, numsubordinates) != 0)) {
+        /* Make an import_index_info structure, fill it in and insert into the
+         * job's list */
+        IndexInfo *info = CALLOC(IndexInfo);
+
+        if (NULL == info) {
+            /* Memory allocation error */
+            return -1;
+        }
+        info->name = slapi_ch_strdup(a->ai_type);
+        info->ai = a;
+        if (NULL == info->name) {
+            /* Memory allocation error */
+            FREE(info);
+            return -1;
+        }
+        info->next = job->index_list;
+        job->index_list = info;
+        job->number_indexers++;
+    }
+    return 0;
+}
+
+static void
+import_set_index_buffer_size(ImportJob *job)
+{
+    IndexInfo *current_index = NULL;
+    size_t substring_index_count = 0;
+    size_t proposed_size = 0;
+
+    /* Count the substring indexes we have */
+    for (current_index = job->index_list; current_index != NULL;
+         current_index = current_index->next) {
+        if (current_index->ai->ai_indexmask & INDEX_SUB) {
+            substring_index_count++;
+        }
+    }
+    if (substring_index_count > 0) {
+        /* Make proposed size such that if all substring indices were
+     * reasonably full, we'd hit the target space */
+        proposed_size = (job->job_index_buffer_size / substring_index_count) /
+                        IMPORT_INDEX_BUFFER_SIZE_CONSTANT;
+        if (proposed_size > IMPORT_MAX_INDEX_BUFFER_SIZE) {
+            proposed_size = IMPORT_MAX_INDEX_BUFFER_SIZE;
+        }
+        if (proposed_size < IMPORT_MIN_INDEX_BUFFER_SIZE) {
+            proposed_size = 0;
+        }
+    }
+
+    job->job_index_buffer_suggestion = proposed_size;
+}
+
+static void
+import_free_thread_data(ImportJob *job)
+{
+    /* DBDB free the lists etc */
+    ImportWorkerInfo *worker = job->worker_list;
+
+    while (worker != NULL) {
+        ImportWorkerInfo *asabird = worker;
+        worker = worker->next;
+        if (asabird->work_type != PRODUCER)
+            slapi_ch_free((void **)&asabird);
+    }
+}
+
+void
+import_free_job(ImportJob *job)
+{
+    /* DBDB free the lists etc */
+    IndexInfo *index = job->index_list;
+
+    import_free_thread_data(job);
+    while (index != NULL) {
+        IndexInfo *asabird = index;
+        index = index->next;
+        slapi_ch_free((void **)&asabird->name);
+        slapi_ch_free((void **)&asabird);
+    }
+    job->index_list = NULL;
+    if (NULL != job->mothers) {
+        import_subcount_stuff_term(job->mothers);
+        slapi_ch_free((void **)&job->mothers);
+    }
+
+    bdb_back_free_incl_excl(job->include_subtrees, job->exclude_subtrees);
+    charray_free(job->input_filenames);
+    if (job->fifo.size) {
+        /* bulk_import_queue is running, while holding the job lock.
+         * bulk_import_queue is using the fifo queue.
+         * To avoid freeing fifo queue under bulk_import_queue use
+         * job lock to synchronize
+         */
+        if (job->wire_lock)
+            PR_Lock(job->wire_lock);
+
+        import_fifo_destroy(job);
+
+        if (job->wire_lock)
+            PR_Unlock(job->wire_lock);
+    }
+
+    if (NULL != job->uuid_namespace)
+        slapi_ch_free((void **)&job->uuid_namespace);
+    if (job->wire_lock)
+        PR_DestroyLock(job->wire_lock);
+    if (job->wire_cv)
+        PR_DestroyCondVar(job->wire_cv);
+    slapi_ch_free((void **)&job->task_status);
+}
+
+/* determine if we are the correct backend for this entry
+ * (in a distributed suffix, some entries may be for other backends).
+ * if the entry's dn actually matches one of the suffixes of the be, we
+ * automatically take it as a belonging one, for such entries must be
+ * present in EVERY backend independently of the distribution applied.
+ */
+int
+import_entry_belongs_here(Slapi_Entry *e, backend *be)
+{
+    Slapi_Backend *retbe;
+    Slapi_DN *sdn = slapi_entry_get_sdn(e);
+
+    if (slapi_be_issuffix(be, sdn))
+        return 1;
+
+    retbe = slapi_mapping_tree_find_backend_for_sdn(sdn);
+    return (retbe == be);
+}
+
+
+/********** starting threads and stuff **********/
+
+/* Solaris is weird---we need an LWP per thread but NSPR doesn't give us
+ * one unless we make this magic belshe-call */
+/* Fixed on Solaris 8; NSPR supports PR_GLOBAL_BOUND_THREAD */
+#define CREATE_THREAD PR_CreateThread
+
+static void
+import_init_worker_info(ImportWorkerInfo *info, ImportJob *job)
+{
+    info->command = PAUSE;
+    info->job = job;
+    info->first_ID = job->first_ID;
+    info->index_buffer_size = job->job_index_buffer_suggestion;
+}
+
+static int
+import_start_threads(ImportJob *job)
+{
+    IndexInfo *current_index = NULL;
+    ImportWorkerInfo *foreman = NULL, *worker = NULL;
+
+    foreman = CALLOC(ImportWorkerInfo);
+    if (!foreman)
+        goto error;
+
+    /* start the foreman */
+    import_init_worker_info(foreman, job);
+    foreman->work_type = FOREMAN;
+    if (!CREATE_THREAD(PR_USER_THREAD, (VFP)import_foreman, foreman,
+                       PR_PRIORITY_NORMAL, PR_GLOBAL_BOUND_THREAD,
+                       PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+        PRErrorCode prerr = PR_GetError();
+        slapi_log_err(SLAPI_LOG_ERR, "import_start_threads",
+                      "Unable to spawn import foreman thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                      prerr, slapd_pr_strerror(prerr));
+        FREE(foreman);
+        goto error;
+    }
+
+    foreman->next = job->worker_list;
+    job->worker_list = foreman;
+
+    /* Start follower threads, if we are doing attribute indexing */
+    current_index = job->index_list;
+    if (job->flags & FLAG_INDEX_ATTRS) {
+        while (current_index) {
+            /* make a new thread info structure */
+            worker = CALLOC(ImportWorkerInfo);
+            if (!worker)
+                goto error;
+
+            /* fill it in */
+            import_init_worker_info(worker, job);
+            worker->index_info = current_index;
+            worker->work_type = WORKER;
+
+            /* Start the thread */
+            if (!CREATE_THREAD(PR_USER_THREAD, (VFP)import_worker, worker,
+                               PR_PRIORITY_NORMAL, PR_GLOBAL_BOUND_THREAD,
+                               PR_UNJOINABLE_THREAD,
+                               SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+                PRErrorCode prerr = PR_GetError();
+                slapi_log_err(SLAPI_LOG_ERR, "import_start_threads",
+                              "Unable to spawn import worker thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                              prerr, slapd_pr_strerror(prerr));
+                FREE(worker);
+                goto error;
+            }
+
+            /* link it onto the job's thread list */
+            worker->next = job->worker_list;
+            job->worker_list = worker;
+            current_index = current_index->next;
+        }
+    }
+    return 0;
+
+error:
+    import_log_notice(job, SLAPI_LOG_ERR, "import_start_threads", "Import thread creation failed.");
+    import_log_notice(job, SLAPI_LOG_ERR, "import_start_threads", "Aborting all import threads...");
+    import_abort_all(job, 1);
+    import_log_notice(job, SLAPI_LOG_ERR, "import_start_threads", "Import threads aborted.");
+    return -1;
+}
+
+
+/********** monitoring the worker threads **********/
+
+static void
+import_clear_progress_history(ImportJob *job)
+{
+    int i = 0;
+
+    for (i = 0; i < IMPORT_JOB_PROG_HISTORY_SIZE /*- 1*/; i++) {
+        job->progress_history[i] = job->first_ID;
+        job->progress_times[i] = job->start_time;
+    }
+    /* reset libdb cache stats */
+    job->inst->inst_cache_hits = job->inst->inst_cache_misses = 0;
+}
+
+static double
+import_grok_db_stats(ldbm_instance *inst)
+{
+    DB_MPOOL_STAT *mpstat = NULL;
+    DB_MPOOL_FSTAT **mpfstat = NULL;
+    int return_value = -1;
+    double cache_hit_ratio = 0.0;
+
+    return_value = bdb_memp_stat_instance(inst, &mpstat, &mpfstat);
+
+    if (!mpstat) {
+        goto out;
+    }
+
+    if (0 == return_value) {
+        unsigned long current_cache_hits = mpstat->st_cache_hit;
+        unsigned long current_cache_misses = mpstat->st_cache_miss;
+
+        if (inst->inst_cache_hits) {
+            unsigned long hit_delta, miss_delta;
+
+            hit_delta = current_cache_hits - inst->inst_cache_hits;
+            miss_delta = current_cache_misses - inst->inst_cache_misses;
+            if (hit_delta != 0) {
+                cache_hit_ratio = (double)hit_delta /
+                                  (double)(hit_delta + miss_delta);
+            }
+        }
+        inst->inst_cache_misses = current_cache_misses;
+        inst->inst_cache_hits = current_cache_hits;
+    }
+
+out:
+    if (mpstat)
+        slapi_ch_free((void **)&mpstat);
+    if (mpfstat) {
+        slapi_ch_free((void **)&mpfstat);
+    }
+    return cache_hit_ratio;
+}
+
+static char *
+import_decode_worker_state(int state)
+{
+    switch (state) {
+    case WAITING:
+        return "W";
+    case RUNNING:
+        return "R";
+    case FINISHED:
+        return "F";
+    case ABORTED:
+        return "A";
+    default:
+        return "?";
+    }
+}
+
+static void
+import_print_worker_status(ImportWorkerInfo *info)
+{
+    char *name = (info->work_type == PRODUCER ? "Producer" : (info->work_type == FOREMAN ? "Foreman" : info->index_info->name));
+
+    import_log_status_add_line(info->job,
+                               "%-25s %s%10ld %7.1f", name,
+                               import_decode_worker_state(info->state),
+                               info->last_ID_processed, info->rate);
+}
+
+
+#define IMPORT_CHUNK_TEST_HOLDOFF_TIME (5 * 60) /* Seconds */
+
+/* Got to be lower than this: */
+#define IMPORT_CHUNK_TEST_CACHE_HIT_RATIO (0.99)
+/* Less than half as fast as we were doing: */
+#define IMPORT_CHUNK_TEST_SLOWDOWN_RATIO_A (0.5)
+/* A lot less fast than we were doing: */
+#define IMPORT_CHUNK_TEST_SLOWDOWN_RATIO_B (0.1)
+
+static int
+import_throw_in_towel(ImportJob *job, time_t current_time, ID trailing_ID)
+{
+    static int number_of_times_here = 0;
+
+    /* secret -c option allows specific chunk size to be set... */
+    if (job->merge_chunk_size != 0) {
+        if ((0 != job->lead_ID) &&
+            (trailing_ID > job->first_ID) &&
+            (trailing_ID - job->first_ID > job->merge_chunk_size)) {
+            return 1;
+        }
+        return 0;
+    }
+
+    /* Check stats to decide whether we're getting bogged down and should
+     * terminate this pass.
+     */
+
+    /* Check #1 : are we more than 10 minutes into the chunk ? */
+    if (current_time - job->start_time > IMPORT_CHUNK_TEST_HOLDOFF_TIME) {
+        /* Check #2 : Have we slowed down considerably recently ? */
+        if ((job->recent_progress_rate / job->average_progress_rate) <
+            IMPORT_CHUNK_TEST_SLOWDOWN_RATIO_A) {
+            /* Check #3: Cache performing poorly---the puported reason
+         * for the slowdown */
+            if (job->cache_hit_ratio < IMPORT_CHUNK_TEST_CACHE_HIT_RATIO) {
+                /* We have a winner ! */
+                import_log_notice(job, SLAPI_LOG_INFO, "import_throw_in_towel",
+                                  "Decided to end this pass because the progress rate has dropped below "
+                                  "the %.0f%% threshold.",
+                                  IMPORT_CHUNK_TEST_SLOWDOWN_RATIO_A * 100.0);
+                return 1;
+            }
+        } else {
+            if ((job->recent_progress_rate / job->average_progress_rate) <
+                IMPORT_CHUNK_TEST_SLOWDOWN_RATIO_B) {
+                /* Alternative check: have we really, really slowed down,
+         * without the test for cache overflow? */
+                /* This is designed to catch the case where the cache has
+         * been misconfigured too large */
+                if (number_of_times_here > 10) {
+                    /* Got to get here ten times at least */
+                    import_log_notice(job, SLAPI_LOG_INFO, "import_throw_in_towel",
+                                      "Decided to end this pass because the progress rate plummeted below %.0f%%",
+                                      IMPORT_CHUNK_TEST_SLOWDOWN_RATIO_B * 100.0);
+                    return 1;
+                }
+                number_of_times_here++;
+            }
+        }
+    }
+
+    number_of_times_here = 0;
+    return 0;
+}
+
+static void
+import_push_progress_history(ImportJob *job, ID current_id, time_t current_time)
+{
+    int i = 0;
+
+    for (i = 0; i < IMPORT_JOB_PROG_HISTORY_SIZE - 1; i++) {
+        job->progress_history[i] = job->progress_history[i + 1];
+        job->progress_times[i] = job->progress_times[i + 1];
+    }
+    job->progress_history[i] = current_id;
+    job->progress_times[i] = current_time;
+}
+
+static void
+import_calc_rate(ImportWorkerInfo *info, int time_interval)
+{
+    size_t ids = info->last_ID_processed - info->previous_ID_counted;
+    double rate = (double)ids / time_interval;
+
+    if ((info->previous_ID_counted != 0) && (info->last_ID_processed != 0)) {
+        info->rate = rate;
+    } else {
+        info->rate = 0;
+    }
+    info->previous_ID_counted = info->last_ID_processed;
+}
+
+/* find the rate (ids/time) of work from a worker thread between history
+ * marks A and B.
+ */
+#define HISTORY(N) (job->progress_history[N])
+#define TIMES(N) (job->progress_times[N])
+#define PROGRESS(A, B) ((HISTORY(B) > HISTORY(A)) ? ((double)(HISTORY(B) - HISTORY(A)) / \
+                                                     (double)(TIMES(B) - TIMES(A)))      \
+                                                  : (double)0)
+
+static int
+import_monitor_threads(ImportJob *job, int *status)
+{
+    PRIntervalTime tenthsecond = PR_MillisecondsToInterval(100);
+    ImportWorkerInfo *current_worker = NULL;
+    ImportWorkerInfo *producer = NULL, *foreman = NULL;
+    int finished = 0;
+    int giveup = 0;
+    int count = 1; /* 1 to prevent premature status report */
+    int producer_done = 0;
+    const int display_interval = 200;
+    time_t time_now = 0;
+    time_t last_time = 0;
+    time_t time_interval = 0;
+    int rc = 0;
+    int corestate = 0;
+
+    for (current_worker = job->worker_list; current_worker != NULL;
+         current_worker = current_worker->next) {
+        current_worker->command = RUN;
+        if (current_worker->work_type == PRODUCER)
+            producer = current_worker;
+        if (current_worker->work_type == FOREMAN)
+            foreman = current_worker;
+    }
+
+
+    if (job->flags & FLAG_USE_FILES)
+        PR_ASSERT(producer != NULL);
+
+    PR_ASSERT(foreman != NULL);
+
+    if (!foreman) {
+        goto error_abort;
+    }
+
+    last_time = slapi_current_utc_time();
+    job->start_time = last_time;
+    import_clear_progress_history(job);
+
+    while (!finished) {
+        ID trailing_ID = NOID;
+
+        DS_Sleep(tenthsecond);
+        finished = 1;
+
+        /* First calculate the time interval since last reported */
+        if (0 == (count % display_interval)) {
+            time_now = slapi_current_utc_time();
+            time_interval = time_now - last_time;
+            last_time = time_now;
+            /* Now calculate our rate of progress overall for this chunk */
+            if (time_now != job->start_time) {
+                /* log a cute chart of the worker progress */
+                import_log_status_start(job);
+                import_log_status_add_line(job,
+                                           "Index status for import of %s:", job->inst->inst_name);
+                import_log_status_add_line(job,
+                                           "-------Index Task-------State---Entry----Rate-");
+
+                import_push_progress_history(job, foreman->last_ID_processed,
+                                             time_now);
+                job->average_progress_rate =
+                    (double)(HISTORY(IMPORT_JOB_PROG_HISTORY_SIZE - 1) + 1 - foreman->first_ID) /
+                    (double)(TIMES(IMPORT_JOB_PROG_HISTORY_SIZE - 1) - job->start_time);
+                job->recent_progress_rate =
+                    PROGRESS(0, IMPORT_JOB_PROG_HISTORY_SIZE - 1);
+                job->cache_hit_ratio = import_grok_db_stats(job->inst);
+            }
+        }
+
+        for (current_worker = job->worker_list; current_worker != NULL;
+             current_worker = current_worker->next) {
+            /* Calculate the ID at which the slowest worker is currently
+             * processing */
+            if ((trailing_ID > current_worker->last_ID_processed) &&
+                (current_worker->work_type == WORKER)) {
+                trailing_ID = current_worker->last_ID_processed;
+            }
+            if (0 == (count % display_interval) && time_interval) {
+                import_calc_rate(current_worker, time_interval);
+                import_print_worker_status(current_worker);
+            }
+            corestate = current_worker->state & CORESTATE;
+            if (current_worker->state == ABORTED) {
+                goto error_abort;
+            } else if ((corestate == QUIT) || (corestate == FINISHED)) {
+                if (DN_NORM_BT == (DN_NORM_BT & current_worker->state)) {
+                    /* upgrading dn norm (both) is needed */
+                    rc = NEED_DN_NORM_BT; /* Set the RC; Don't abort now;
+                                           * We have to stop other
+                                           * threads */
+                } else if (DN_NORM == (DN_NORM_BT & current_worker->state)) {
+                    /* upgrading dn norm is needed */
+                    rc = NEED_DN_NORM; /* Set the RC; Don't abort now;
+                                        * We have to stop other threads
+                                        */
+                } else if (DN_NORM_SP == (DN_NORM_BT & current_worker->state)) {
+                    /* upgrading spaces in dn norm is needed */
+                    rc = NEED_DN_NORM_SP; /* Set the RC; Don't abort now;
+                                           * We have to stop other
+                                           * threads */
+                }
+                current_worker->state = corestate;
+            } else if (current_worker->state != FINISHED) {
+                finished = 0;
+            }
+        }
+
+        if ((0 == (count % display_interval)) &&
+            (job->start_time != time_now)) {
+            char buffer[256], *p = buffer;
+
+            import_log_status_done(job);
+            p += sprintf(p, "Processed %lu entries ", (u_long)job->ready_ID);
+            if (job->total_pass > 1)
+                p += sprintf(p, "(pass %d) ", job->total_pass);
+
+            p += sprintf(p, "-- average rate %.1f/sec, ",
+                         job->average_progress_rate);
+            p += sprintf(p, "recent rate %.1f/sec, ",
+                         job->recent_progress_rate);
+            p += sprintf(p, "hit ratio %.0f%%", job->cache_hit_ratio * 100.0);
+            import_log_notice(job, SLAPI_LOG_INFO, "import_monitor_threads", "%s", buffer);
+        }
+
+        /* Then let's see if it's time to complete this import pass */
+        if (!giveup) {
+            giveup = import_throw_in_towel(job, time_now, trailing_ID);
+            if (giveup) {
+                /* If so, signal the lead thread to stop */
+                import_log_notice(job, SLAPI_LOG_INFO, "import_monitor_threads",
+                                  "Ending pass number %d ...", job->total_pass);
+                foreman->command = STOP;
+                while (foreman->state != FINISHED) {
+                    DS_Sleep(tenthsecond);
+                }
+                import_log_notice(job, SLAPI_LOG_INFO, "import_monitor_threads",
+                                  "Foreman is done; waiting for workers to finish...");
+            }
+        }
+
+        /* if the producer is finished, and the foreman has caught up... */
+        if (producer) {
+            producer_done = (producer->state == FINISHED) ||
+                            (producer->state == QUIT);
+        } else {
+            /* set in ldbm_back_wire_import */
+            producer_done = (job->flags & FLAG_PRODUCER_DONE);
+        }
+        if (producer_done && (job->lead_ID == job->ready_ID)) {
+            /* tell the foreman to stop if he's still working. */
+            if (foreman->state != FINISHED)
+                foreman->command = STOP;
+
+            /* if all the workers are caught up too, we're done */
+            if (trailing_ID == job->lead_ID)
+                break;
+        }
+
+        /* if the foreman is done (end of pass) and the worker threads
+         * have caught up...
+         */
+        if ((foreman->state == FINISHED) && (job->ready_ID == trailing_ID)) {
+            break;
+        }
+
+        count++;
+    }
+
+    import_log_notice(job, SLAPI_LOG_INFO, "import_monitor_threads",
+                      "Workers finished; cleaning up...");
+
+    /* Now tell all the workers to stop */
+    for (current_worker = job->worker_list; current_worker != NULL;
+         current_worker = current_worker->next) {
+        if (current_worker->work_type != PRODUCER)
+            current_worker->command = STOP;
+    }
+
+    /* Having done that, wait for them to say that they've stopped */
+    for (current_worker = job->worker_list; current_worker != NULL;) {
+        if ((current_worker->state != FINISHED) &&
+            (current_worker->state != ABORTED) &&
+            (current_worker->state != QUIT) &&
+            (current_worker->work_type != PRODUCER)) {
+            DS_Sleep(tenthsecond); /* Only sleep if we hit a thread that is still not done */
+            continue;
+        } else {
+            current_worker = current_worker->next;
+        }
+    }
+    import_log_notice(job, SLAPI_LOG_INFO, "import_monitor_threads", "Workers cleaned up.");
+
+    /* If we're here and giveup is true, and the primary hadn't finished
+     * processing the input files, we need to return IMPORT_INCOMPLETE_PASS */
+    if (giveup && (job->input_filenames || (job->flags & FLAG_ONLINE) ||
+                   (job->flags & FLAG_REINDEXING /* support multi-pass */))) {
+        if (producer_done && (job->ready_ID == job->lead_ID)) {
+            /* foreman caught up with the producer, and the producer is
+             * done.
+             */
+            *status = IMPORT_COMPLETE_PASS;
+        } else {
+            *status = IMPORT_INCOMPLETE_PASS;
+        }
+    } else {
+        *status = IMPORT_COMPLETE_PASS;
+    }
+    return rc;
+
+error_abort:
+    return ERR_IMPORT_ABORTED;
+}
+
+
+/********** running passes **********/
+
+static int
+import_run_pass(ImportJob *job, int *status)
+{
+    int ret = 0;
+
+    /* Start the threads running */
+    ret = import_start_threads(job);
+    if (ret != 0) {
+        import_log_notice(job, SLAPI_LOG_ERR, "import_run_pass", "Starting threads failed: %d\n", ret);
+        goto error;
+    }
+
+    /* Monitor the threads until we're done or fail */
+    ret = import_monitor_threads(job, status);
+    if ((ret == ERR_IMPORT_ABORTED) || (ret == NEED_DN_NORM) ||
+        (ret == NEED_DN_NORM_SP) || (ret == NEED_DN_NORM_BT)) {
+        import_log_notice(job, SLAPI_LOG_ERR, "import_run_pass", "Thread monitoring returned: %d\n", ret);
+        goto error;
+    } else if (ret != 0) {
+        import_log_notice(job, SLAPI_LOG_ERR, "import_run_pass", "Thread monitoring aborted: %d\n", ret);
+        goto error;
+    }
+
+error:
+    return ret;
+}
+
+static void
+import_set_abort_flag_all(ImportJob *job, int wait_for_them)
+{
+
+    ImportWorkerInfo *worker;
+
+    /* tell all the worker threads to abort */
+    job->flags |= FLAG_ABORT;
+
+    /* setting of the flag in the job will be detected in the worker, foreman
+     * threads and if there are any threads which have a sleeptime  200 msecs
+     * = import_sleep_time; after that time, they will examine the condition
+     * (job->flags & FLAG_ABORT) which will unblock the thread to proceed to
+     * abort. Hence, we will sleep here for atleast 3 sec to make sure clean
+     * up occurs */
+    /* allow all the aborts to be processed */
+    DS_Sleep(PR_MillisecondsToInterval(3000));
+
+    if (wait_for_them) {
+        /* Having done that, wait for them to say that they've stopped */
+        for (worker = job->worker_list; worker != NULL;) {
+            DS_Sleep(PR_MillisecondsToInterval(100));
+            if ((worker->state != FINISHED) && (worker->state != ABORTED) &&
+                (worker->state != QUIT)) {
+                continue;
+            } else {
+                worker = worker->next;
+            }
+        }
+    }
+}
+
+
+/* tell all the threads to abort */
+void
+import_abort_all(ImportJob *job, int wait_for_them)
+{
+    ImportWorkerInfo *worker;
+
+    /* tell all the worker threads to abort */
+    job->flags |= FLAG_ABORT;
+
+    for (worker = job->worker_list; worker; worker = worker->next)
+        worker->command = ABORT;
+
+    if (wait_for_them) {
+        /* Having done that, wait for them to say that they've stopped */
+        for (worker = job->worker_list; worker != NULL;) {
+            DS_Sleep(PR_MillisecondsToInterval(100));
+            if ((worker->state != FINISHED) && (worker->state != ABORTED) &&
+                (worker->state != QUIT)) {
+                continue;
+            } else {
+                worker = worker->next;
+            }
+        }
+    }
+}
+
+/* Helper function to make up filenames */
+int
+import_make_merge_filenames(char *directory, char *indexname, int pass, char **oldname, char **newname)
+{
+    /* Filenames look like this: attributename<LDBM_FILENAME_SUFFIX>
+       and need to be renamed to: attributename<LDBM_FILENAME_SUFFIX>.n
+       where n is the pass number.
+       */
+    *oldname = slapi_ch_smprintf("%s/%s%s", directory, indexname, LDBM_FILENAME_SUFFIX);
+    *newname = slapi_ch_smprintf("%s/%s.%d%s", directory, indexname, pass,
+                                 LDBM_FILENAME_SUFFIX);
+    if (!*oldname || !*newname) {
+        slapi_ch_free_string(oldname);
+        slapi_ch_free_string(newname);
+        return -1;
+    }
+    return 0;
+}
+
+/* Task here is as follows:
+ * First, if this is pass #1, check for the presence of a merge
+ *     directory. If it is not present, create it.
+ * If it is present, delete all the files in it.
+ * Then, flush the dblayer and close files.
+ * Now create a numbered subdir of the merge directory for this pass.
+ * Next, move the index files, except entrydn, parentid and id2entry to
+ *     the merge subdirectory. Important to move if we can, because
+ *     that can be millions of times faster than a copy.
+ * Finally open the dblayer back up because the caller expects
+ *     us to not muck with it.
+ */
+static int
+import_sweep_after_pass(ImportJob *job)
+{
+    backend *be = job->inst->inst_be;
+    int ret = 0;
+
+    import_log_notice(job, SLAPI_LOG_INFO, "import_sweep_after_pass",
+                      "Sweeping files for merging later...");
+
+    ret = dblayer_instance_close(be);
+
+    if (0 == ret) {
+        /* Walk the list of index jobs */
+        ImportWorkerInfo *current_worker = NULL;
+
+        for (current_worker = job->worker_list; current_worker != NULL;
+             current_worker = current_worker->next) {
+            /* Foreach job, rename the file to <filename>.n, where n is the
+         * pass number */
+            if ((current_worker->work_type != FOREMAN) &&
+                (current_worker->work_type != PRODUCER) &&
+                (strcasecmp(current_worker->index_info->name, LDBM_PARENTID_STR) != 0)) {
+                char *newname = NULL;
+                char *oldname = NULL;
+
+                ret = import_make_merge_filenames(job->inst->inst_dir_name,
+                                                  current_worker->index_info->name, job->current_pass,
+                                                  &oldname, &newname);
+                if (0 != ret) {
+                    break;
+                }
+                if (PR_Access(oldname, PR_ACCESS_EXISTS) == PR_SUCCESS) {
+                    ret = PR_Rename(oldname, newname);
+                    if (ret != PR_SUCCESS) {
+                        PRErrorCode prerr = PR_GetError();
+                        import_log_notice(job, SLAPI_LOG_ERR, "import_sweep_after_pass",
+                                          "Failed to rename file \"%s\" to \"%s\", " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)",
+                                          oldname, newname, prerr, slapd_pr_strerror(prerr));
+                        slapi_ch_free((void **)&newname);
+                        slapi_ch_free((void **)&oldname);
+                        break;
+                    }
+                }
+                slapi_ch_free((void **)&newname);
+                slapi_ch_free((void **)&oldname);
+            }
+        }
+
+        ret = bdb_instance_start(be, DBLAYER_IMPORT_MODE);
+    }
+
+    if (0 == ret) {
+        import_log_notice(job, SLAPI_LOG_INFO, "import_sweep_after_pass", "Sweep done.");
+    } else {
+        if (ENOSPC == ret) {
+            import_log_notice(job, LOG_CRIT, "import_sweep_after_pass",
+                              "NO DISK SPACE LEFT in sweep phase");
+        } else {
+            import_log_notice(job, SLAPI_LOG_ERR, "import_sweep_after_pass",
+                              "Sweep phase error %d (%s)", ret,
+                              dblayer_strerror(ret));
+        }
+    }
+
+    return ret;
+}
+
+/* when the import is done, this function is called to bring stuff back up.
+ * returns 0 on success; anything else is an error
+ */
+static int
+import_all_done(ImportJob *job, int ret)
+{
+    ldbm_instance *inst = job->inst;
+
+    /* Writing this file indicates to future server startups that
+     * the db is OK unless it's in the dry run mode. */
+    if ((ret == 0) && !(job->flags & FLAG_DRYRUN)) {
+        char inst_dir[MAXPATHLEN * 2];
+        char *inst_dirp = NULL;
+        inst_dirp = dblayer_get_full_inst_dir(inst->inst_li, inst,
+                                              inst_dir, MAXPATHLEN * 2);
+        ret = bdb_version_write(inst->inst_li, inst_dirp, NULL, DBVERSION_ALL);
+        if (inst_dirp != inst_dir)
+            slapi_ch_free_string(&inst_dirp);
+    }
+
+    if ((job->task != NULL) && (0 == slapi_task_get_refcount(job->task))) {
+        slapi_task_finish(job->task, ret);
+    }
+
+    if (job->flags & FLAG_ONLINE) {
+        /* make sure the indexes are online as well */
+        /* richm 20070919 - if index entries are added online, they
+           are created and marked as INDEX_OFFLINE, in anticipation
+           of someone doing a db2index.  In this case, the db2index
+           code will correctly unset the INDEX_OFFLINE flag.
+           However, if import is used to create the indexes, the
+           INDEX_OFFLINE flag will not be cleared.  So, we do that
+           here
+        */
+        IndexInfo *index = job->index_list;
+        while (index != NULL) {
+            index->ai->ai_indexmask &= ~INDEX_OFFLINE;
+            index = index->next;
+        }
+        /* start up the instance */
+        ret = bdb_instance_start(job->inst->inst_be, DBLAYER_NORMAL_MODE);
+        if (ret != 0)
+            return ret;
+
+        /* Reset USN slapi_counter with the last key of the entryUSN index */
+        ldbm_set_last_usn(inst->inst_be);
+
+        /* bring backend online again */
+        slapi_mtn_be_enable(inst->inst_be);
+    }
+
+    return ret;
+}
+
+
+int
+bdb_import_main(void *arg)
+{
+    ImportJob *job = (ImportJob *)arg;
+    ldbm_instance *inst = job->inst;
+    backend *be = inst->inst_be;
+    int ret = 0;
+    time_t beginning = 0;
+    time_t end = 0;
+    int finished = 0;
+    int status = 0;
+    int verbose = 1;
+    int aborted = 0;
+    ImportWorkerInfo *producer = NULL;
+    char *opstr = "Import";
+
+    if (job->task) {
+        slapi_task_inc_refcount(job->task);
+    }
+
+    if (job->flags & (FLAG_UPGRADEDNFORMAT | FLAG_UPGRADEDNFORMAT_V1)) {
+        if (job->flags & FLAG_DRYRUN) {
+            opstr = "Upgrade Dn Dryrun";
+        } else if ((job->flags & (FLAG_UPGRADEDNFORMAT | FLAG_UPGRADEDNFORMAT_V1)) == (FLAG_UPGRADEDNFORMAT | FLAG_UPGRADEDNFORMAT_V1)) {
+            opstr = "Upgrade Dn (Full)";
+        } else if (job->flags & FLAG_UPGRADEDNFORMAT_V1) {
+            opstr = "Upgrade Dn (Spaces)";
+        } else {
+            opstr = "Upgrade Dn (RFC 4514)";
+        }
+    } else if (job->flags & FLAG_REINDEXING) {
+        opstr = "Reindexing";
+    }
+    PR_ASSERT(inst != NULL);
+    beginning = slapi_current_utc_time();
+
+    /* Decide which indexes are needed */
+    if (job->flags & FLAG_INDEX_ATTRS) {
+        /* Here, we get an AVL tree which contains nodes for all attributes
+         * in the schema.  Given this tree, we need to identify those nodes
+         * which are marked for indexing. */
+        avl_apply(job->inst->inst_attrs, (IFP)import_attr_callback,
+                  (caddr_t)job, -1, AVL_INORDER);
+        vlv_getindices((IFP)import_attr_callback, (void *)job, be);
+    }
+
+    /* Determine how much index buffering space to allocate to each index */
+    import_set_index_buffer_size(job);
+
+    /* initialize the entry FIFO */
+    ret = import_fifo_init(job);
+    if (ret) {
+        if (!(job->flags & FLAG_USE_FILES)) {
+            PR_Lock(job->wire_lock);
+            PR_NotifyCondVar(job->wire_cv);
+            PR_Unlock(job->wire_lock);
+        }
+        goto error;
+    }
+
+    if (job->flags & FLAG_USE_FILES) {
+        /* importing from files: start up a producer thread to read the
+         * files and queue them
+         */
+        producer = CALLOC(ImportWorkerInfo);
+        if (!producer)
+            goto error;
+
+        /* start the producer */
+        import_init_worker_info(producer, job);
+        producer->work_type = PRODUCER;
+        if (job->flags & (FLAG_UPGRADEDNFORMAT | FLAG_UPGRADEDNFORMAT_V1)) {
+            if (!CREATE_THREAD(PR_USER_THREAD, (VFP)upgradedn_producer,
+                               producer, PR_PRIORITY_NORMAL, PR_GLOBAL_BOUND_THREAD,
+                               PR_UNJOINABLE_THREAD, SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+                PRErrorCode prerr = PR_GetError();
+                slapi_log_err(SLAPI_LOG_ERR, "import_main_offline",
+                              "Unable to spawn upgrade dn producer thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                              prerr, slapd_pr_strerror(prerr));
+                goto error;
+            }
+        } else if (job->flags & FLAG_REINDEXING) {
+            if (!CREATE_THREAD(PR_USER_THREAD, (VFP)index_producer, producer,
+                               PR_PRIORITY_NORMAL, PR_GLOBAL_BOUND_THREAD,
+                               PR_UNJOINABLE_THREAD,
+                               SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+                PRErrorCode prerr = PR_GetError();
+                slapi_log_err(SLAPI_LOG_ERR, "import_main_offline",
+                              "Unable to spawn index producer thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                              prerr, slapd_pr_strerror(prerr));
+                goto error;
+            }
+        } else {
+            import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline", "Beginning import job...");
+            if (!CREATE_THREAD(PR_USER_THREAD, (VFP)import_producer, producer,
+                               PR_PRIORITY_NORMAL, PR_GLOBAL_BOUND_THREAD,
+                               PR_UNJOINABLE_THREAD,
+                               SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+                PRErrorCode prerr = PR_GetError();
+                slapi_log_err(SLAPI_LOG_ERR, "import_main_offline",
+                              "Unable to spawn import producer thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                              prerr, slapd_pr_strerror(prerr));
+                goto error;
+            }
+        }
+
+        if (0 == job->job_index_buffer_suggestion)
+            import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline", "Index buffering is disabled.");
+        else
+            import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline",
+                              "Index buffering enabled with bucket size %lu",
+                              (long unsigned int)job->job_index_buffer_suggestion);
+
+        job->worker_list = producer;
+    } else {
+        /* release the startup lock and let the entries start queueing up
+         * in for import */
+        PR_Lock(job->wire_lock);
+        PR_NotifyCondVar(job->wire_cv);
+        PR_Unlock(job->wire_lock);
+    }
+
+    /* Run as many passes as we need to complete the job or die honourably in
+     * the attempt */
+    while (!finished) {
+        job->current_pass++;
+        job->total_pass++;
+        ret = import_run_pass(job, &status);
+        /* The following could have happened:
+         *     (a) Some error happened such that we're hosed.
+         *         This is indicated by a non-zero return code.
+         *     (b) We finished the complete file without needing a second pass
+         *         This is indicated by a zero return code and a status of
+         *         IMPORT_COMPLETE_PASS and current_pass == 1;
+         *     (c) We completed a pass and need at least another one
+         *         This is indicated by a zero return code and a status of
+         *         IMPORT_INCOMPLETE_PASS
+         *     (d) We just completed what turned out to be the last in a
+         *         series of passes
+         *         This is indicated by a zero return code and a status of
+         *         IMPORT_COMPLETE_PASS and current_pass > 1
+         */
+        if (ret == ERR_IMPORT_ABORTED) {
+            /* at least one of the threads has aborted -- shut down ALL
+             * of the threads */
+            import_log_notice(job, SLAPI_LOG_ERR, "import_main_offline",
+                              "Aborting all %s threads...", opstr);
+            /* this abort sets the  abort flag on the threads and will block for
+             * the exit of all threads
+             */
+            import_set_abort_flag_all(job, 1);
+            import_log_notice(job, SLAPI_LOG_ERR, "import_main_offline",
+                              "%s threads aborted.", opstr);
+            aborted = 1;
+            goto error;
+        }
+        if ((ret == NEED_DN_NORM) || (ret == NEED_DN_NORM_SP) ||
+            (ret == NEED_DN_NORM_BT)) {
+            goto error;
+        } else if (0 != ret) {
+            /* Some horrible fate has befallen the import */
+            import_log_notice(job, SLAPI_LOG_ERR, "import_main_offline",
+                              "Fatal pass error %d", ret);
+            goto error;
+        }
+
+        /* No error, but a number of possibilities */
+        if (IMPORT_COMPLETE_PASS == status) {
+            if (1 == job->current_pass) {
+                /* We're done !!!! */;
+            } else {
+                /* Save the files, then merge */
+                ret = import_sweep_after_pass(job);
+                if (0 != ret) {
+                    goto error;
+                }
+                ret = import_mega_merge(job);
+                if (0 != ret) {
+                    goto error;
+                }
+            }
+            finished = 1;
+        } else {
+            if (IMPORT_INCOMPLETE_PASS == status) {
+                /* Need to go round again */
+                /* Time to save the files we've built for later */
+                ret = import_sweep_after_pass(job);
+                if (0 != ret) {
+                    goto error;
+                }
+                if ((inst->inst_li->li_maxpassbeforemerge != 0) &&
+                    (job->current_pass > inst->inst_li->li_maxpassbeforemerge)) {
+                    ret = import_mega_merge(job);
+                    if (0 != ret) {
+                        goto error;
+                    }
+                    job->current_pass = 1;
+                    ret = import_sweep_after_pass(job);
+                    if (0 != ret) {
+                        goto error;
+                    }
+                }
+
+                /* Fixup the first_ID value to reflect previous work */
+                job->first_ID = job->ready_ID + 1;
+                import_free_thread_data(job);
+                job->worker_list = producer;
+                import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline",
+                                  "Beginning pass number %d", job->total_pass + 1);
+            } else {
+                /* Bizarro-slapd */
+                goto error;
+            }
+        }
+    }
+
+    /* kill the producer now; we're done */
+    if (producer) {
+        import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline", "Cleaning up producer thread...");
+        producer->command = STOP;
+        /* wait for the lead thread to stop */
+        while (producer->state != FINISHED) {
+            DS_Sleep(PR_MillisecondsToInterval(100));
+        }
+    }
+
+    import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline", "Indexing complete.  Post-processing...");
+    /* Now do the numsubordinates attribute */
+    /* [610066] reindexed db cannot be used in the following backup/restore */
+    import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline",
+                      "Generating numsubordinates (this may take several minutes to complete)...");
+    if ((!(job->flags & FLAG_REINDEXING) || (job->flags & FLAG_DN2RDN)) &&
+        (ret = bdb_update_subordinatecounts(be, job, NULL)) != 0) {
+        import_log_notice(job, SLAPI_LOG_ERR, "import_main_offline",
+                          "Failed to update numsubordinates attributes");
+        goto error;
+    }
+    import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline",
+                      "Generating numSubordinates complete.");
+
+    if (!entryrdn_get_noancestorid()) {
+        /* And the ancestorid index */
+        /* Creating ancestorid from the scratch; delete the index file first. */
+        struct attrinfo *ai = NULL;
+
+        ainfo_get(be, "ancestorid", &ai);
+        dblayer_erase_index_file(be, ai, PR_TRUE, 0);
+        if ((ret = bdb_ancestorid_create_index(be, job)) != 0) {
+            import_log_notice(job, SLAPI_LOG_ERR, "import_main_offline", "Failed to create ancestorid index");
+            goto error;
+        }
+    }
+
+    import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline", "Flushing caches...");
+
+/* New way to exit the routine: check the return code.
+     * If it's non-zero, delete the database files.
+     * Otherwise don't, but always close the database layer properly.
+     * Then return. This ensures that we can't make a half-good/half-bad
+     * Database. */
+
+error:
+    /* If we fail, the database is now in a mess, so we delete it
+       except dry run mode */
+    import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline", "Closing files...");
+    cache_clear(&job->inst->inst_cache, CACHE_TYPE_ENTRY);
+    if (entryrdn_get_switch()) {
+        cache_clear(&job->inst->inst_dncache, CACHE_TYPE_DN);
+    }
+    if (aborted) {
+        /* If aborted, it's safer to rebuild the caches. */
+        cache_destroy_please(&job->inst->inst_cache, CACHE_TYPE_ENTRY);
+        if (entryrdn_get_switch()) { /* subtree-rename: on */
+            cache_destroy_please(&job->inst->inst_dncache, CACHE_TYPE_DN);
+        }
+        /* initialize the entry cache */
+        if (!cache_init(&(inst->inst_cache), DEFAULT_CACHE_SIZE,
+                        DEFAULT_CACHE_ENTRIES, CACHE_TYPE_ENTRY)) {
+            slapi_log_err(SLAPI_LOG_ERR, "import_main_offline",
+                          "cache_init failed.  Server should be restarted.\n");
+        }
+
+        /* initialize the dn cache */
+        if (!cache_init(&(inst->inst_dncache), DEFAULT_DNCACHE_SIZE,
+                        DEFAULT_DNCACHE_MAXCOUNT, CACHE_TYPE_DN)) {
+            slapi_log_err(SLAPI_LOG_ERR, "import_main_offline",
+                          "dn cache_init failed.  Server should be restarted.\n");
+        }
+    }
+    if (0 != ret) {
+        dblayer_instance_close(job->inst->inst_be);
+        if (!(job->flags & (FLAG_DRYRUN | FLAG_UPGRADEDNFORMAT_V1))) {
+            /* If not dryrun NOR upgradedn space */
+            /* if running in the dry run mode, don't touch the db */
+            dblayer_delete_instance_dir(be);
+        }
+    } else {
+        if (0 != (ret = dblayer_instance_close(job->inst->inst_be))) {
+            import_log_notice(job, SLAPI_LOG_WARNING, "import_main_offline", "Failed to close database");
+        }
+    }
+    if (!(job->flags & FLAG_ONLINE))
+        dblayer_close(job->inst->inst_li, DBLAYER_IMPORT_MODE);
+
+    end = slapi_current_utc_time();
+    if (verbose && (0 == ret)) {
+        int seconds_to_import = end - beginning;
+        size_t entries_processed = job->lead_ID - (job->starting_ID - 1);
+        double entries_per_second =
+            seconds_to_import ? (double)entries_processed / (double)seconds_to_import : 0;
+
+        if (job->not_here_skipped) {
+            if (job->skipped) {
+                import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline",
+                                  "%s complete.  Processed %lu entries "
+                                  "(%d bad entries were skipped, "
+                                  "%d entries were skipped because they don't "
+                                  "belong to this database) in %d seconds. "
+                                  "(%.2f entries/sec)",
+                                  opstr, (long unsigned int)entries_processed,
+                                  job->skipped, job->not_here_skipped,
+                                  seconds_to_import, entries_per_second);
+            } else {
+                import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline",
+                                  "%s complete.  Processed %lu entries "
+                                  "(%d entries were skipped because they don't "
+                                  "belong to this database) "
+                                  "in %d seconds. (%.2f entries/sec)",
+                                  opstr, (long unsigned int)entries_processed,
+                                  job->not_here_skipped, seconds_to_import,
+                                  entries_per_second);
+            }
+        } else {
+            if (job->skipped) {
+                import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline",
+                                  "%s complete.  Processed %lu entries "
+                                  "(%d were skipped) in %d seconds. "
+                                  "(%.2f entries/sec)",
+                                  opstr, (long unsigned int)entries_processed,
+                                  job->skipped, seconds_to_import,
+                                  entries_per_second);
+            } else {
+                import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline",
+                                  "%s complete.  Processed %lu entries "
+                                  "in %d seconds. (%.2f entries/sec)",
+                                  opstr, (long unsigned int)entries_processed,
+                                  seconds_to_import, entries_per_second);
+            }
+        }
+    }
+
+    if (job->flags & (FLAG_DRYRUN | FLAG_UPGRADEDNFORMAT_V1)) {
+        if (0 == ret) {
+            import_log_notice(job, SLAPI_LOG_INFO, "import_main_offline", "%s complete.  %s is up-to-date.",
+                              opstr, job->inst->inst_name);
+            ret = 0;
+            if (job->task) {
+                slapi_task_dec_refcount(job->task);
+            }
+            import_all_done(job, ret);
+        } else if (NEED_DN_NORM_BT == ret) {
+            import_log_notice(job, SLAPI_LOG_NOTICE, "import_main_offline",
+                              "%s complete. %s needs upgradednformat all.",
+                              opstr, job->inst->inst_name);
+            if (job->task) {
+                slapi_task_dec_refcount(job->task);
+            }
+            import_all_done(job, ret);
+            ret = 1;
+        } else if (NEED_DN_NORM == ret) {
+            import_log_notice(job, SLAPI_LOG_NOTICE, "import_main_offline",
+                              "%s complete. %s needs upgradednformat.",
+                              opstr, job->inst->inst_name);
+            if (job->task) {
+                slapi_task_dec_refcount(job->task);
+            }
+            import_all_done(job, ret);
+            ret = 2;
+        } else if (NEED_DN_NORM_SP == ret) {
+            import_log_notice(job, SLAPI_LOG_NOTICE, "import_main_offline",
+                              "%s complete. %s needs upgradednformat spaces.",
+                              opstr, job->inst->inst_name);
+            if (job->task) {
+                slapi_task_dec_refcount(job->task);
+            }
+            import_all_done(job, ret);
+            ret = 3;
+        } else {
+            ret = -1;
+            if (job->task != NULL) {
+                slapi_task_finish(job->task, ret);
+            }
+        }
+    } else if (0 != ret) {
+        import_log_notice(job, SLAPI_LOG_ERR, "import_main_offline", "%s failed.", opstr);
+        if (job->task != NULL) {
+            slapi_task_finish(job->task, ret);
+        }
+    } else {
+        if (job->task) {
+            slapi_task_dec_refcount(job->task);
+        }
+        import_all_done(job, ret);
+    }
+
+    /* This instance isn't busy anymore */
+    instance_set_not_busy(job->inst);
+
+    import_free_job(job);
+    if (!job->task) {
+        FREE(job);
+    }
+    if (producer)
+        FREE(producer);
+
+    return (ret);
+}
+
+/*
+ * to be called by online import using PR_CreateThread()
+ * offline import directly calls import_main_offline()
+ *
+ */
+void
+import_main(void *arg)
+{
+    import_main_offline(arg);
+}
+
+int
+bdb_back_ldif2db(Slapi_PBlock *pb)
+{
+    backend *be = NULL;
+    int noattrindexes = 0;
+    ImportJob *job = NULL;
+    char **name_array = NULL;
+    int total_files, i;
+    int up_flags = 0;
+    PRThread *thread = NULL;
+
+    slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+    if (be == NULL) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_back_ldif2db", "Backend is not set\n");
+        return -1;
+    }
+    job = CALLOC(ImportJob);
+    job->inst = (ldbm_instance *)be->be_instance_info;
+    slapi_pblock_get(pb, SLAPI_LDIF2DB_NOATTRINDEXES, &noattrindexes);
+    slapi_pblock_get(pb, SLAPI_LDIF2DB_FILE, &name_array);
+    slapi_pblock_get(pb, SLAPI_SEQ_TYPE, &up_flags); /* For upgrade dn and
+                                                        dn2rdn */
+
+    /* the removedupvals field is blatantly overloaded here to mean
+     * the chunk size too.  (chunk size = number of entries that should
+     * be imported before starting a new pass.  usually for debugging.)
+     */
+    slapi_pblock_get(pb, SLAPI_LDIF2DB_REMOVEDUPVALS, &job->merge_chunk_size);
+    if (job->merge_chunk_size == 1)
+        job->merge_chunk_size = 0;
+    /* get list of specifically included and/or excluded subtrees from
+     * the front-end */
+    bdb_back_fetch_incl_excl(pb, &job->include_subtrees,
+                              &job->exclude_subtrees);
+    /* get cn=tasks info, if any */
+    slapi_pblock_get(pb, SLAPI_BACKEND_TASK, &job->task);
+    slapi_pblock_get(pb, SLAPI_LDIF2DB_ENCRYPT, &job->encrypt);
+    /* get uniqueid info */
+    slapi_pblock_get(pb, SLAPI_LDIF2DB_GENERATE_UNIQUEID, &job->uuid_gen_type);
+    if (job->uuid_gen_type == SLAPI_UNIQUEID_GENERATE_NAME_BASED) {
+        char *namespaceid;
+
+        slapi_pblock_get(pb, SLAPI_LDIF2DB_NAMESPACEID, &namespaceid);
+        job->uuid_namespace = slapi_ch_strdup(namespaceid);
+    }
+
+    job->flags = FLAG_USE_FILES;
+    if (NULL == name_array) { /* no ldif file is given -> reindexing or
+                                                             upgradedn */
+        if (up_flags & (SLAPI_UPGRADEDNFORMAT | SLAPI_UPGRADEDNFORMAT_V1)) {
+            if (up_flags & SLAPI_UPGRADEDNFORMAT) {
+                job->flags |= FLAG_UPGRADEDNFORMAT;
+            }
+            if (up_flags & SLAPI_UPGRADEDNFORMAT_V1) {
+                job->flags |= FLAG_UPGRADEDNFORMAT_V1;
+            }
+            if (up_flags & SLAPI_DRYRUN) {
+                job->flags |= FLAG_DRYRUN;
+            }
+        } else {
+            job->flags |= FLAG_REINDEXING; /* call index_producer */
+            if (up_flags & SLAPI_UPGRADEDB_DN2RDN) {
+                if (entryrdn_get_switch()) {
+                    job->flags |= FLAG_DN2RDN; /* migrate to the rdn format */
+                } else {
+                    slapi_log_err(SLAPI_LOG_ERR, "bdb_back_ldif2db",
+                                  "DN to RDN option is specified, "
+                                  "but %s is not enabled\n",
+                                  CONFIG_ENTRYRDN_SWITCH);
+                    import_free_job(job);
+                    FREE(job);
+                    return -1;
+                }
+            }
+        }
+    }
+    if (!noattrindexes) {
+        job->flags |= FLAG_INDEX_ATTRS;
+    }
+    for (i = 0; name_array && name_array[i] != NULL; i++) {
+        charray_add(&job->input_filenames, slapi_ch_strdup(name_array[i]));
+    }
+    job->starting_ID = 1;
+    job->first_ID = 1;
+    job->mothers = CALLOC(import_subcount_stuff);
+
+    /* how much space should we allocate to index buffering? */
+    job->job_index_buffer_size = import_get_index_buffer_size();
+    if (job->job_index_buffer_size == 0) {
+        /* 10% of the allocated cache size + one meg */
+        PR_Lock(job->inst->inst_li->li_config_mutex);
+        job->job_index_buffer_size =
+            (job->inst->inst_li->li_import_cachesize / 10) + (1024 * 1024);
+        PR_Unlock(job->inst->inst_li->li_config_mutex);
+    }
+    import_subcount_stuff_init(job->mothers);
+
+    if (job->task != NULL) {
+        /* count files, use that to track "progress" in cn=tasks */
+        total_files = 0;
+        while (name_array && name_array[total_files] != NULL)
+            total_files++;
+        /* add 1 to account for post-import cleanup (which can take a
+         * significant amount of time)
+         */
+        /* NGK - This should eventually be cleaned up to use the public
+         * task API. */
+        if (0 == total_files) { /* reindexing */
+            job->task->task_work = 2;
+        } else {
+            job->task->task_work = total_files + 1;
+        }
+        job->task->task_progress = 0;
+        job->task->task_state = SLAPI_TASK_RUNNING;
+        slapi_task_set_data(job->task, job);
+        slapi_task_set_destructor_fn(job->task, import_task_destroy);
+        slapi_task_set_cancel_fn(job->task, import_task_abort);
+        job->flags |= FLAG_ONLINE;
+
+        /* create thread for import_main, so we can return */
+        thread = PR_CreateThread(PR_USER_THREAD, import_main, (void *)job,
+                                 PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+                                 PR_UNJOINABLE_THREAD,
+                                 SLAPD_DEFAULT_THREAD_STACKSIZE);
+        if (thread == NULL) {
+            PRErrorCode prerr = PR_GetError();
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_back_ldif2db",
+                          "Unable to spawn import thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                          prerr, slapd_pr_strerror(prerr));
+            import_free_job(job);
+            FREE(job);
+            return -2;
+        }
+        return 0;
+    }
+
+    /* old style -- do it all synchronously (THIS IS GOING AWAY SOON) */
+    return import_main_offline((void *)job);
+}
+
+struct _import_merge_thang
+{
+    int type;
+#define IMPORT_MERGE_THANG_IDL 1 /* Values for type */
+#define IMPORT_MERGE_THANG_VLV 2
+    union
+    {
+        IDList *idl;  /* if type == IMPORT_MERGE_THANG_IDL */
+        DBT vlv_data; /* if type == IMPORT_MERGE_THANG_VLV */
+    } payload;
+};
+typedef struct _import_merge_thang import_merge_thang;
+
+struct _import_merge_queue_entry
+{
+    int *file_referenced_list;
+    import_merge_thang thang;
+    DBT key;
+    struct _import_merge_queue_entry *next;
+};
+typedef struct _import_merge_queue_entry import_merge_queue_entry;
+
+static int
+import_merge_get_next_thang(backend *be, DBC *cursor, DB *db, import_merge_thang *thang, DBT *key, int type)
+{
+    int ret = 0;
+    DBT value = {0};
+
+    value.flags = DB_DBT_MALLOC;
+    key->flags = DB_DBT_MALLOC;
+
+    thang->type = type;
+    if (IMPORT_MERGE_THANG_IDL == type) {
+    /* IDL case */
+    around:
+        ret = cursor->c_get(cursor, key, &value, DB_NEXT_NODUP);
+        if (0 == ret) {
+            /* Check that we've not reached the beginning of continuation
+             * blocks */
+            if (CONT_PREFIX != ((char *)key->data)[0]) {
+                /* If not, read the IDL using idl_fetch() */
+                key->flags = DB_DBT_REALLOC;
+                ret = NEW_IDL_NO_ALLID;
+                thang->payload.idl = idl_fetch(be, db, key, NULL, NULL, &ret);
+                PR_ASSERT(NULL != thang->payload.idl);
+            } else {
+                slapi_ch_free(&(value.data));
+                slapi_ch_free(&(key->data));
+                key->flags = DB_DBT_MALLOC;
+                goto around; /* Just skip these */
+            }
+            slapi_ch_free(&(value.data));
+        } else {
+            if (DB_NOTFOUND == ret) {
+                /* This means that we're at the end of the file */
+                ret = EOF;
+            }
+        }
+    } else {
+        /* VLV case */
+        ret = cursor->c_get(cursor, key, &value, DB_NEXT);
+        if (0 == ret) {
+            thang->payload.vlv_data = value;
+            thang->payload.vlv_data.flags = 0;
+            key->flags = 0;
+        } else {
+            if (DB_NOTFOUND == ret) {
+                /* This means that we're at the end of the file */
+                ret = EOF;
+            }
+        }
+    }
+
+    return ret;
+}
+
+static import_merge_queue_entry *
+import_merge_make_new_queue_entry(import_merge_thang *thang, DBT *key, int fileno, int passes)
+{
+    /* Make a new entry */
+    import_merge_queue_entry *new_entry = (import_merge_queue_entry *)slapi_ch_calloc(1, sizeof(import_merge_queue_entry));
+
+    new_entry->key = *key;
+    new_entry->thang = *thang;
+    new_entry->file_referenced_list =
+        (int *)slapi_ch_calloc(passes, sizeof(fileno));
+
+    (new_entry->file_referenced_list)[fileno] = 1;
+    return new_entry;
+}
+
+/* Put an IDL onto the priority queue */
+static int
+import_merge_insert_input_queue(backend *be, import_merge_queue_entry **queue, int fileno, DBT *key, import_merge_thang *thang, int passes)
+{
+    /* Walk the list, looking for a key value which is greater than or equal
+     * to the presented key */
+    /* If an equal key is found, compute the union of the IDLs and store that
+     * back in the queue entry */
+    /* If a key greater than is found, or no key greater than is found, insert
+     * a new queue entry */
+    import_merge_queue_entry *current_entry = NULL;
+    import_merge_queue_entry *previous_entry = NULL;
+
+    PR_ASSERT(NULL != thang);
+    if (NULL == *queue) {
+        /* Queue was empty--- put ourselves at the head */
+        *queue = import_merge_make_new_queue_entry(thang, key, fileno, passes);
+        if (NULL == *queue) {
+            return -1;
+        }
+    } else {
+        for (current_entry = *queue; current_entry != NULL;
+             current_entry = current_entry->next) {
+            int cmp = strcmp(key->data, current_entry->key.data);
+
+            if (0 == cmp) {
+                if (IMPORT_MERGE_THANG_IDL == thang->type) { /* IDL case */
+                    IDList *idl = thang->payload.idl;
+                    /* Equal --- merge into the stored IDL, add file ID
+                     * to the list */
+                    IDList *new_idl =
+                        idl_union(be, current_entry->thang.payload.idl, idl);
+
+                    idl_free(&(current_entry->thang.payload.idl));
+                    idl_free(&idl);
+                    current_entry->thang.payload.idl = new_idl;
+                    /* Add this file id into the entry's referenced list */
+                    (current_entry->file_referenced_list)[fileno] = 1;
+                    /* Because we merged the entries, we no longer need the
+                     * key, so free it */
+                    slapi_ch_free(&(key->data));
+                    goto done;
+                } else {
+                    /* VLV case, we can see exact keys, this is not a bug ! */
+                    /* We want to ensure that they key read most recently is
+                     * put later in the queue than any others though */
+                }
+            } else {
+                if (cmp < 0) {
+                    /* We compare smaller than the stored key, so we should
+                     * insert ourselves before this entry */
+                    break;
+                } else {
+                    /* We compare greater than this entry, so we should keep
+                     * going */;
+                }
+            }
+            previous_entry = current_entry;
+        }
+
+        /* Now insert */
+        {
+            import_merge_queue_entry *new_entry =
+                import_merge_make_new_queue_entry(thang, key, fileno, passes);
+
+            if (NULL == new_entry) {
+                return -1;
+            }
+
+            /* If not, then we must need to insert ourselves after the last
+             * entry */
+            new_entry->next = current_entry;
+            if (NULL == previous_entry) {
+                *queue = new_entry;
+            } else {
+                previous_entry->next = new_entry;
+            }
+        }
+    }
+
+done:
+    return 0;
+}
+
+static int
+import_merge_remove_input_queue(backend *be, import_merge_queue_entry **queue, import_merge_thang *thang, DBT *key, DBC **input_cursors, DB **input_files, int passes)
+{
+    import_merge_queue_entry *head = NULL;
+    int file_referenced = 0;
+    int i = 0;
+    int ret = 0;
+
+    PR_ASSERT(NULL != queue);
+    head = *queue;
+    if (head == NULL) {
+        /* Means we've exhausted the queue---we're done */
+        return EOF;
+    }
+    /* Remove the head of the queue */
+    *queue = head->next;
+    /* Get the IDL */
+    *thang = head->thang;
+    *key = head->key;
+    PR_ASSERT(NULL != thang);
+    /* Walk the list of referenced files, reading in the next IDL from each
+     * one to the queue */
+    for (i = 0; i < passes; i++) {
+        import_merge_thang new_thang = {0};
+        DBT new_key = {0};
+
+        file_referenced = (head->file_referenced_list)[i];
+        if (file_referenced) {
+            ret = import_merge_get_next_thang(be, input_cursors[i],
+                                              input_files[i], &new_thang, &new_key, thang->type);
+            if (0 != ret) {
+                if (EOF == ret) {
+                    /* Means that we walked off the end of the list,
+                     * do nothing */
+                    ret = 0;
+                } else {
+                    /* Some other error */
+                    break;
+                }
+            } else {
+                /* This function is responsible for any freeing needed */
+                import_merge_insert_input_queue(be, queue, i, &new_key,
+                                                &new_thang, passes);
+            }
+        }
+    }
+    slapi_ch_free((void **)&(head->file_referenced_list));
+    slapi_ch_free((void **)&head);
+
+    return ret;
+}
+
+static int
+import_merge_open_input_cursors(DB **files, int passes, DBC ***cursors)
+{
+    int i = 0;
+    int ret = 0;
+    *cursors = (DBC **)slapi_ch_calloc(passes, sizeof(DBC *));
+    if (NULL == *cursors) {
+        return -1;
+    }
+
+    for (i = 0; i < passes; i++) {
+        DB *pDB = files[i];
+        DBC *pDBC = NULL;
+        if (NULL != pDB) {
+            /* Try to open a cursor onto the file */
+            ret = pDB->cursor(pDB, NULL, &pDBC, 0);
+            if (0 != ret) {
+                break;
+            } else {
+                (*cursors)[i] = pDBC;
+            }
+        }
+    }
+
+    return ret;
+}
+
+static int
+import_count_merge_input_files(ldbm_instance *inst,
+                               char *indexname,
+                               int passes,
+                               int *number_found,
+                               int *pass_number)
+{
+    int i = 0;
+    int found_one = 0;
+
+    *number_found = 0;
+    *pass_number = 0;
+
+    for (i = 0; i < passes; i++) {
+        int fd;
+        char *filename = slapi_ch_smprintf("%s/%s.%d%s", inst->inst_dir_name, indexname, i + 1,
+                                           LDBM_FILENAME_SUFFIX);
+
+        if (NULL == filename) {
+            return -1;
+        }
+
+        fd = bdb_open_huge_file(filename, O_RDONLY, 0);
+        slapi_ch_free((void **)&filename);
+        if (fd >= 0) {
+            close(fd);
+            if (found_one == 0) {
+                *pass_number = i + 1;
+            }
+            found_one = 1;
+            (*number_found)++;
+        } else {
+            ; /* Not finding a file is OK */
+        }
+    }
+
+    return 0;
+}
+
+static int
+import_open_merge_input_files(backend *be, IndexInfo *index_info, int passes, DB ***input_files, int *number_found, int *pass_number)
+{
+    int i = 0;
+    int ret = 0;
+    int found_one = 0;
+
+    *number_found = 0;
+    *pass_number = 0;
+    *input_files = (DB **)slapi_ch_calloc(passes, sizeof(DB *));
+    if (NULL == *input_files) {
+        /* Memory allocation error */
+        return -1;
+    }
+    for (i = 0; i < passes; i++) {
+        DB *pDB = NULL;
+        char *filename = slapi_ch_smprintf("%s.%d", index_info->name, i + 1);
+
+        if (NULL == filename) {
+            return -1;
+        }
+
+        if (vlv_isvlv(filename)) {
+            /* not sure why the file would be marked as a vlv index but
+           not the index configuration . . . but better make sure
+           the new code works with the old semantics */
+            int saved_mask = index_info->ai->ai_indexmask;
+            index_info->ai->ai_indexmask |= INDEX_VLV;
+            ret = dblayer_open_file(be, filename, 0, index_info->ai, &pDB);
+            index_info->ai->ai_indexmask = saved_mask;
+        } else {
+            ret = dblayer_open_file(be, filename, 0, index_info->ai, &pDB);
+        }
+
+        slapi_ch_free((void **)&filename);
+        if (0 == ret) {
+            if (found_one == 0) {
+                *pass_number = i + 1;
+            }
+            found_one = 1;
+            (*number_found)++;
+            (*input_files)[i] = pDB;
+        } else {
+            if (ENOENT == ret) {
+                ret = 0; /* Not finding a file is OK */
+            } else {
+                break;
+            }
+        }
+    }
+
+    return ret;
+}
+
+/* Performs the n-way merge on one file */
+static int
+import_merge_one_file(ImportWorkerInfo *worker, int passes, int *key_count)
+{
+    ldbm_instance *inst = worker->job->inst;
+    backend *be = inst->inst_be;
+    DB *output_file = NULL;
+    int ret = 0;
+    int preclose_ret = 0;
+    int number_found = 0;
+    int pass_number = 0;
+    DB **input_files = NULL;
+    DBC **input_cursors = NULL;
+
+    PR_ASSERT(NULL != inst);
+
+    /* Try to open all the input files.
+       If we can't open file a file, we assume that is
+       because there was no data in it. */
+    ret = import_count_merge_input_files(inst, worker->index_info->name,
+                                         passes, &number_found, &pass_number);
+    if (0 != ret) {
+        goto error;
+    }
+    /* If there were no input files, then we're finished ! */
+    if (0 == number_found) {
+        ret = 0;
+        goto error;
+    }
+    /* Special-case where there's only one input file---just rename it */
+    if (1 == number_found) {
+        char *newname = NULL;
+        char *oldname = NULL;
+
+        ret = import_make_merge_filenames(inst->inst_dir_name,
+                                          worker->index_info->name, pass_number, &oldname, &newname);
+        if (0 != ret) {
+            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file",
+                              "Failed making filename in merge");
+            goto error;
+        }
+        ret = PR_Rename(newname, oldname);
+        if (0 != ret) {
+            PRErrorCode prerr = PR_GetError();
+            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file",
+                              "Failed to rename file \"%s\" to \"%s\" "
+                              "in merge, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)",
+                              oldname, newname, prerr, slapd_pr_strerror(prerr));
+            slapi_ch_free((void **)&newname);
+            slapi_ch_free((void **)&oldname);
+            goto error;
+        }
+        slapi_ch_free((void **)&newname);
+        slapi_ch_free((void **)&oldname);
+        *key_count = -1;
+    } else {
+        /* We really need to merge */
+        import_merge_queue_entry *merge_queue = NULL;
+        DBT key = {0};
+        import_merge_thang thang = {0};
+        int i = 0;
+        int not_finished = 1;
+        int vlv_index = (INDEX_VLV == worker->index_info->ai->ai_indexmask);
+
+        ret = dblayer_instance_close(be);
+        if (0 != ret) {
+            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 8i %d\n", ret);
+            goto error;
+        }
+        ret = bdb_instance_start(be, DBLAYER_IMPORT_MODE);
+        if (0 != ret) {
+            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 8j %d\n", ret);
+            goto error;
+        }
+
+        ret = import_open_merge_input_files(be, worker->index_info,
+                                            passes, &input_files, &number_found, &pass_number);
+        if (0 != ret) {
+            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 10");
+            goto error;
+        }
+
+        ret = dblayer_open_file(be, worker->index_info->name, 1,
+                                worker->index_info->ai, &output_file);
+        if (0 != ret) {
+            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "Failed to open output file for "
+                                                                                   "index %s in merge",
+                              worker->index_info->name);
+            goto error;
+        }
+
+        /* OK, so we now have input and output files open and can proceed to
+     * merge */
+        /* We want to pre-fill the input IDL queue */
+        /* Open cursors onto the input files */
+        ret = import_merge_open_input_cursors(input_files, passes,
+                                              &input_cursors);
+        if (0 != ret) {
+            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 2 %s %d",
+                              worker->index_info->name, ret);
+            goto error;
+        }
+
+        /* Now read from the first location in each file and insert into the
+     * queue */
+        for (i = 0; i < passes; i++)
+            if (input_files[i]) {
+                import_merge_thang prime_thang = {0};
+
+                /* Read an IDL from the file */
+                ret = import_merge_get_next_thang(be, input_cursors[i],
+                                                  input_files[i], &prime_thang, &key,
+                                                  vlv_index ? IMPORT_MERGE_THANG_VLV : IMPORT_MERGE_THANG_IDL);
+                if (0 != ret) {
+                    import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 1 %s %d",
+                                      worker->index_info->name, ret);
+                    goto error;
+                }
+                /* Put it on the queue */
+                ret = import_merge_insert_input_queue(be, &merge_queue, i, &key,
+                                                      &prime_thang, passes);
+                if (0 != ret) {
+                    import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 0 %s",
+                                      worker->index_info->name);
+                    goto error;
+                }
+            }
+
+        /* We now have a pre-filled queue, so we may now proceed to remove the
+       head entry and write it to the output file, and repeat this process
+       until we've finished reading all the input data */
+        while (not_finished && (0 == ret)) {
+            ret = import_merge_remove_input_queue(be, &merge_queue, &thang,
+                                                  &key, input_cursors, input_files, passes);
+            if (0 != ret) {
+                /* Have we finished cleanly ? */
+                if (EOF == ret) {
+                    not_finished = 0;
+                } else {
+                    import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 3 %s, %d",
+                                      worker->index_info->name, ret);
+                }
+            } else {
+                /* Write it out */
+                (*key_count)++;
+                if (vlv_index) {
+                    /* Write the vlv index */
+                    ret = output_file->put(output_file, NULL, &key,
+                                           &(thang.payload.vlv_data), 0);
+                    slapi_ch_free(&(thang.payload.vlv_data.data));
+                    thang.payload.vlv_data.data = NULL;
+                } else {
+                    /* Write the IDL index */
+                    ret = idl_store_block(be, output_file, &key,
+                                          thang.payload.idl, NULL, worker->index_info->ai);
+                    /* Free the key we got back from the queue */
+                    idl_free(&(thang.payload.idl));
+                    thang.payload.idl = NULL;
+                }
+                slapi_ch_free(&(key.data));
+                key.data = NULL;
+                if (0 != ret) {
+                    /* Failed to write--- most obvious cause being out of
+                   disk space, let's make sure that we at least print a
+                   sensible error message right here. The caller should
+                   really handle this properly, but we're always bad at
+                   this. */
+                    if (ret == DB_RUNRECOVERY || ret == ENOSPC) {
+                        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file",
+                                          "OUT OF SPACE ON DISK, failed writing index file %s",
+                                          worker->index_info->name);
+                    } else {
+                        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file",
+                                          "Failed to write index file %s, errno=%d (%s)\n",
+                                          worker->index_info->name, errno,
+                                          dblayer_strerror(errno));
+                    }
+                }
+            }
+        }
+        preclose_ret = ret;
+        /* Now close the files */
+        bdb_close_file(&output_file);
+        /* Close the cursors */
+        /* Close and delete the files */
+        for (i = 0; i < passes; i++) {
+            DBC *cursor = input_cursors[i];
+            DB *db = input_files[i];
+            if (NULL != db) {
+                PR_ASSERT(NULL != cursor);
+                ret = cursor->c_close(cursor);
+                if (0 != ret) {
+                    import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 4");
+                }
+                ret = bdb_close_file(&db);
+                if (0 != ret) {
+                    import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 5");
+                }
+                /* Now make the filename and delete the file */
+                {
+                    char *newname = NULL;
+                    char *oldname = NULL;
+                    ret = import_make_merge_filenames(inst->inst_dir_name,
+                                                      worker->index_info->name, i + 1, &oldname, &newname);
+                    if (0 != ret) {
+                        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 6");
+                    } else {
+                        ret = PR_Delete(newname);
+                        if (0 != ret) {
+                            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 7");
+                        }
+                        slapi_ch_free((void **)&newname);
+                        slapi_ch_free((void **)&oldname);
+                    }
+                }
+            }
+        }
+        if (preclose_ret != 0)
+            ret = preclose_ret;
+    }
+    if (EOF == ret) {
+        ret = 0;
+    }
+
+error:
+    slapi_ch_free((void **)&input_cursors);
+    slapi_ch_free((void **)&input_files);
+    if (ret) {
+        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file",
+                          "%s: Import merge failed. "
+                          "If this is an online-import, shutdown the server "
+                          "and try the offline command line import (ldif2db)",
+                          inst->inst_name);
+    }
+    return ret;
+}
+
+/********** the real deal here: **********/
+
+/* Our mission here is as follows:
+ * for each index job except entrydn and id2entry:
+ *     open all the pass files
+ *     open a new output file
+ *     iterate cursors over all of the input files picking each distinct
+ *         key and combining the input IDLs into a merged IDL. Put that
+ *         IDL to the output file.
+ */
+int
+import_mega_merge(ImportJob *job)
+{
+    ImportWorkerInfo *current_worker = NULL;
+    int ret = 0;
+    time_t beginning = 0;
+    time_t end = 0;
+    int passes = job->current_pass;
+
+    if (1 == job->number_indexers) {
+        import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge",
+                          "Beginning %d-way merge of one file...", passes);
+    } else {
+        import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge",
+                          "Beginning %d-way merge of up to %lu files...",
+                          passes, (long unsigned int)job->number_indexers);
+    }
+
+    beginning = slapi_current_utc_time();
+    /* Iterate over the files */
+    for (current_worker = job->worker_list;
+         (ret == 0) && (current_worker != NULL);
+         current_worker = current_worker->next) {
+        /* We need to ignore the primary index */
+        if ((current_worker->work_type != FOREMAN) &&
+            (current_worker->work_type != PRODUCER)) {
+            time_t file_beginning = 0;
+            time_t file_end = 0;
+            int key_count = 0;
+
+            file_beginning = slapi_current_utc_time();
+            ret = import_merge_one_file(current_worker, passes, &key_count);
+            file_end = slapi_current_utc_time();
+            if (key_count == 0) {
+                import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "No files to merge for \"%s\".",
+                                  current_worker->index_info->name);
+            } else {
+                if (-1 == key_count) {
+                    import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "Merged \"%s\": Simple merge - "
+                                                                                "file renamed.",
+                                      current_worker->index_info->name);
+                } else {
+                    import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "Merged \"%s\": %d keys merged "
+                                                                                "in %ld seconds.",
+                                      current_worker->index_info->name,
+                                      key_count, file_end - file_beginning);
+                }
+            }
+        }
+    }
+
+    end = slapi_current_utc_time();
+    if (0 == ret) {
+        int seconds_to_merge = end - beginning;
+        import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "Merging completed in %d seconds.",
+                          seconds_to_merge);
+    }
+
+    return ret;
+}

+ 22 - 46
ldap/servers/slapd/back-ldbm/import-threads.c → ldap/servers/slapd/back-ldbm/db-bdb/bdb_import_threads.c

@@ -1,6 +1,5 @@
 /** BEGIN COPYRIGHT BLOCK
- * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
+ * Copyright (C) 2019 Red Hat, Inc.
  * All rights reserved.
  *
  * License: GPL (version 3 or any later version).
@@ -20,9 +19,9 @@
  * a wire import (aka "fast replica" import) won't have a producer thread.
  */
 
-#include "back-ldbm.h"
-#include "vlv_srch.h"
-#include "import.h"
+#include "bdb_layer.h"
+#include "../vlv_srch.h"
+#include "../import.h"
 
 static void import_wait_for_space_in_fifo(ImportJob *job, size_t new_esize);
 static int import_get_and_add_parent_rdns(ImportWorkerInfo *info, ldbm_instance *inst, DB *db, ID id, ID *total_id, Slapi_RDN *srdn, int *curr_entry);
@@ -444,7 +443,7 @@ import_producer(void *param)
                 fd = STDIN_FILENO;
             } else {
                 int o_flag = O_RDONLY;
-                fd = dblayer_open_huge_file(curr_filename, o_flag, 0);
+                fd = bdb_open_huge_file(curr_filename, o_flag, 0);
             }
             if (fd < 0) {
                 import_log_notice(job, SLAPI_LOG_ERR, "import_producer",
@@ -632,7 +631,7 @@ import_producer(void *param)
         }
 
         /* check for include/exclude subtree lists */
-        if (!ldbm_back_ok_to_dump(backentry_get_ndn(ep),
+        if (!bdb_back_ok_to_dump(backentry_get_ndn(ep),
                                   job->include_subtrees,
                                   job->exclude_subtrees)) {
             backentry_free(&ep);
@@ -920,14 +919,14 @@ index_producer(void *param)
     info->state = RUNNING;
 
     /* open id2entry with dedicated db env and db handler */
-    if (dblayer_get_aux_id2entry(be, &db, &env, &id2entry) != 0 ||
+    if (bdb_get_aux_id2entry(be, &db, &env, &id2entry) != 0 ||
         db == NULL || env == NULL) {
         slapi_log_err(SLAPI_LOG_ERR, "index_producer", "Could not open id2entry\n");
         goto error;
     }
     if (job->flags & FLAG_DN2RDN) {
         /* open new id2entry for the rdn format entries */
-        if (dblayer_get_aux_id2entry_ext(be, &tmp_db, &env, &tmpid2entry,
+        if (bdb_get_aux_id2entry_ext(be, &tmp_db, &env, &tmpid2entry,
                                          DBLAYER_AUX_ID2ENTRY_TMP) != 0 ||
             tmp_db == NULL || env == NULL) {
             slapi_log_err(SLAPI_LOG_ERR, "index_producer", "Could not open new id2entry\n");
@@ -1149,7 +1148,7 @@ index_producer(void *param)
             goto bail;
         }
     }
-    dblayer_release_aux_id2entry(be, NULL, env);
+    bdb_release_aux_id2entry(be, NULL, env);
     slapi_ch_free_string(&id2entry);
     slapi_ch_free_string(&tmpid2entry);
     info->state = FINISHED;
@@ -1179,7 +1178,7 @@ error:
         }
     }
 bail:
-    dblayer_release_aux_id2entry(be, db, env);
+    bdb_release_aux_id2entry(be, db, env);
     slapi_ch_free_string(&id2entry);
     slapi_ch_free_string(&tmpid2entry);
     info->state = ABORTED;
@@ -1492,7 +1491,7 @@ upgradedn_producer(void *param)
     info->state = RUNNING;
 
     /* open id2entry with dedicated db env and db handler */
-    if (dblayer_get_aux_id2entry(be, &db, &env, NULL) != 0 || db == NULL ||
+    if (bdb_get_aux_id2entry(be, &db, &env, NULL) != 0 || db == NULL ||
         env == NULL) {
         slapi_log_err(SLAPI_LOG_ERR, "upgradedn_producer",
                       "Could not open id2entry\n");
@@ -2168,7 +2167,7 @@ upgradedn_producer(void *param)
     }
 bail:
     dbc->c_close(dbc);
-    dblayer_release_aux_id2entry(be, db, env);
+    bdb_release_aux_id2entry(be, db, env);
     info->state = FINISHED | info_state;
     goto done;
 
@@ -2176,7 +2175,7 @@ error:
     if (dbc) {
         dbc->c_close(dbc);
     }
-    dblayer_release_aux_id2entry(be, db, env);
+    bdb_release_aux_id2entry(be, db, env);
     info->state = ABORTED;
 
 done:
@@ -3162,9 +3161,9 @@ bulk_import_start(Slapi_PBlock *pb)
     dblayer_delete_instance_dir(be);
     /* it's okay to fail -- it might already be gone */
 
-    /* dblayer_instance_start will init the id2entry index. */
+    /* bdb_instance_start will init the id2entry index. */
     /* it also (finally) fills in inst_dir_name */
-    ret = dblayer_instance_start(be, DBLAYER_IMPORT_MODE);
+    ret = bdb_instance_start(be, DBLAYER_IMPORT_MODE);
     if (ret != 0)
         goto fail;
 
@@ -3674,7 +3673,7 @@ dse_conf_backup(struct ldbminfo *li, char *dest_dir)
  * [547427] index config must not change between backup and restore
  */
 int
-dse_conf_verify_core(struct ldbminfo *li, char *src_dir, char *file_name, char *filter, char *log_str, char *entry_filter)
+dse_conf_verify_core(struct ldbminfo *li, char *src_dir, char *file_name, char *filter, char *log_str)
 {
     char *filename = NULL;
     int rval = 0;
@@ -3698,7 +3697,7 @@ dse_conf_verify_core(struct ldbminfo *li, char *src_dir, char *file_name, char *
         goto out;
     }
 
-    fd = dblayer_open_huge_file(filename, O_RDONLY, 0);
+    fd = bdb_open_huge_file(filename, O_RDONLY, 0);
     if (fd < 0) {
         slapi_log_err(SLAPI_LOG_ERR, "dse_conf_verify_core",
                       "Can't open config backup file: %s\n", filename);
@@ -3718,14 +3717,6 @@ dse_conf_verify_core(struct ldbminfo *li, char *src_dir, char *file_name, char *
         if (!estr)
             break;
 
-        if (entry_filter != NULL) /* Single instance restoration */
-        {
-            if (NULL == PL_strcasestr(estr, entry_filter)) {
-                slapi_ch_free_string(&estr);
-                continue;
-            }
-        }
-
         e = slapi_str2entry(estr, 0);
         slapi_ch_free_string(&estr);
         if (!e) {
@@ -3748,11 +3739,7 @@ dse_conf_verify_core(struct ldbminfo *li, char *src_dir, char *file_name, char *
         *bep = NULL;
     }
 
-    if (entry_filter != NULL) { /* Single instance restoration */
-        search_scope = slapi_ch_smprintf("%s,%s", entry_filter, li->li_plugin->plg_dn);
-    } else { /* Normal restoration */
-        search_scope = slapi_ch_strdup(li->li_plugin->plg_dn);
-    }
+    search_scope = slapi_ch_strdup(li->li_plugin->plg_dn);
 
     Slapi_PBlock *srch_pb = slapi_pblock_new();
 
@@ -3792,29 +3779,18 @@ out:
 }
 
 int
-dse_conf_verify(struct ldbminfo *li, char *src_dir, char *bename)
+dse_conf_verify(struct ldbminfo *li, char *src_dir)
 {
     int rval;
-    char *entry_filter = NULL;
     char *instance_entry_filter = NULL;
 
-    if (bename != NULL) /* This was a restore of a single backend */
-    {
-        /* Entry filter string */
-        entry_filter = slapi_ch_smprintf("cn=%s", bename);
-
-        /* Instance search filter */
-        instance_entry_filter = slapi_ch_smprintf("(&%s(cn=%s))", DSE_INSTANCE_FILTER, bename);
-    } else {
-        instance_entry_filter = slapi_ch_strdup(DSE_INSTANCE_FILTER);
-    }
+    instance_entry_filter = slapi_ch_strdup(DSE_INSTANCE_FILTER);
 
     rval = dse_conf_verify_core(li, src_dir, DSE_INSTANCE, instance_entry_filter,
-                                "Instance Config", entry_filter);
+                                "Instance Config");
     rval += dse_conf_verify_core(li, src_dir, DSE_INDEX, DSE_INDEX_FILTER,
-                                 "Index Config", entry_filter);
+                                 "Index Config");
 
-    slapi_ch_free_string(&entry_filter);
     slapi_ch_free_string(&instance_entry_filter);
 
     return rval;

+ 293 - 0
ldap/servers/slapd/back-ldbm/db-bdb/bdb_instance_config.c

@@ -0,0 +1,293 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (C) 2019 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+/* This file handles configuration information that is specific
+ * to ldbm instances.
+ */
+
+#include "bdb_layer.h"
+
+
+/*------------------------------------------------------------------------
+ * Get and set functions for bdb instance variables
+ *----------------------------------------------------------------------*/
+
+static void *
+bdb_instance_config_instance_dir_get(void *arg)
+{
+    ldbm_instance *inst = (ldbm_instance *)arg;
+
+    if (inst->inst_dir_name == NULL)
+        return slapi_ch_strdup("");
+    else if (inst->inst_parent_dir_name) {
+        int len = strlen(inst->inst_parent_dir_name) +
+                  strlen(inst->inst_dir_name) + 2;
+        char *full_inst_dir = (char *)slapi_ch_malloc(len);
+        PR_snprintf(full_inst_dir, len, "%s%c%s",
+                    inst->inst_parent_dir_name, get_sep(inst->inst_parent_dir_name),
+                    inst->inst_dir_name);
+        return full_inst_dir;
+    } else
+        return slapi_ch_strdup(inst->inst_dir_name);
+}
+
+static int
+bdb_instance_config_instance_dir_set(void *arg,
+                                      void *value,
+                                      char *errorbuf __attribute__((unused)),
+                                      int phase __attribute__((unused)),
+                                      int apply)
+{
+    ldbm_instance *inst = (ldbm_instance *)arg;
+
+    if (!apply) {
+        return LDAP_SUCCESS;
+    }
+
+    if ((value == NULL) || (strlen(value) == 0)) {
+        inst->inst_dir_name = NULL;
+        inst->inst_parent_dir_name = NULL;
+    } else {
+        char *dir = (char *)value;
+        if (is_fullpath(dir)) {
+            char sep = get_sep(dir);
+            char *p = strrchr(dir, sep);
+            if (NULL == p) /* should not happens, tho */
+            {
+                inst->inst_parent_dir_name = NULL;
+                inst->inst_dir_name = rel2abspath(dir); /* normalize dir;
+                                                           strdup'ed in
+                                                           rel2abspath */
+            } else {
+                *p = '\0';
+                inst->inst_parent_dir_name = rel2abspath(dir); /* normalize dir;
+                                                                  strdup'ed in
+                                                                  rel2abspath */
+                inst->inst_dir_name = slapi_ch_strdup(p + 1);
+                *p = sep;
+            }
+        } else {
+            inst->inst_parent_dir_name = NULL;
+            inst->inst_dir_name = slapi_ch_strdup(dir);
+        }
+    }
+    return LDAP_SUCCESS;
+}
+
+/*------------------------------------------------------------------------
+ * bdb instance configuration array
+ *
+ * BDB allows tp specify data directories for each instance database
+ *----------------------------------------------------------------------*/
+static config_info bdb_instance_config[] = {
+    {CONFIG_INSTANCE_DIR, CONFIG_TYPE_STRING, NULL, &bdb_instance_config_instance_dir_get, &bdb_instance_config_instance_dir_set, CONFIG_FLAG_ALWAYS_SHOW},
+    {NULL, 0, NULL, NULL, NULL, 0}};
+
+void
+bdb_instance_config_setup_default(ldbm_instance *inst)
+{
+    config_info *config;
+
+    for (config = bdb_instance_config; config->config_name != NULL; config++) {
+        bdb_config_set((void *)inst, config->config_name, bdb_instance_config, NULL /* use default */, NULL, CONFIG_PHASE_INITIALIZATION, 1 /* apply */, LDAP_MOD_REPLACE);
+    }
+}
+/* Returns LDAP_SUCCESS on success */
+int
+bdb_instance_config_set(ldbm_instance *inst, char *attrname, int mod_apply, int mod_op, int phase, struct berval *value)
+{
+    config_info *config = config_info_get(bdb_instance_config, attrname);
+
+    if (config == NULL) {
+        /* ignore unknown attr */
+        return LDAP_SUCCESS;
+    } else {
+        return bdb_config_set((void *)inst, config->config_name, bdb_instance_config, value, NULL, phase, mod_apply, mod_op);
+    } 
+}
+
+
+/*------------------------------------------------------------------------
+ * callback for instence entry handling in the bdb layer
+ * so far only used for post delete operations, but for
+ * completeness all potential callbacks are defined
+ *----------------------------------------------------------------------*/
+int
+bdb_instance_postadd_instance_entry_callback(struct ldbminfo *li, struct ldbm_instance *inst)
+{
+
+    /* callback to be defined, does nothing for now */
+
+    return SLAPI_DSE_CALLBACK_OK;
+}
+
+int
+bdb_instance_add_instance_entry_callback(struct ldbminfo *li, struct ldbm_instance *inst)
+{
+
+    /* callback to be defined, does nothing for now */
+
+    return SLAPI_DSE_CALLBACK_OK;
+}
+
+int
+bdb_instance_post_delete_instance_entry_callback(struct ldbminfo *li, struct ldbm_instance *inst)
+{
+    dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
+    struct bdb_db_env *pEnv = priv->dblayer_env;
+    if (pEnv) {
+        PRDir *dirhandle = NULL;
+        char inst_dir[MAXPATHLEN * 2];
+        char *inst_dirp = NULL;
+
+        if (inst->inst_dir_name == NULL) {
+            dblayer_get_instance_data_dir(inst->inst_be);
+        }
+        inst_dirp = dblayer_get_full_inst_dir(li, inst,
+                                              inst_dir, MAXPATHLEN * 2);
+        if (NULL != inst_dirp) {
+            dirhandle = PR_OpenDir(inst_dirp);
+            /* the db dir instance may have been removed already */
+            if (dirhandle) {
+                PRDirEntry *direntry = NULL;
+                char *dbp = NULL;
+                char *p = NULL;
+                while (NULL != (direntry = PR_ReadDir(dirhandle,
+                                                      PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+                    int rc;
+                    if (!direntry->name)
+                        break;
+
+                    dbp = PR_smprintf("%s/%s", inst_dirp, direntry->name);
+                    if (NULL == dbp) {
+                        slapi_log_err(SLAPI_LOG_ERR,
+                                      "bdb_instance_post_delete_instance_entry_callback",
+                                      "Failed to generate db path: %s/%s\n",
+                                      inst_dirp, direntry->name);
+                        break;
+                    }
+
+                    p = strstr(dbp, LDBM_FILENAME_SUFFIX);
+                    if (NULL != p &&
+                        strlen(p) == strlen(LDBM_FILENAME_SUFFIX)) {
+                        rc = bdb_db_remove(pEnv, dbp, 0);
+                    } else {
+                        rc = PR_Delete(dbp);
+                    }
+                    PR_ASSERT(rc == 0);
+                    if (rc != 0) {
+                        slapi_log_err(SLAPI_LOG_ERR,
+                                      "bdb_instance_post_delete_instance_entry_callback",
+                                      "Failed to delete %s, error %d\n", dbp, rc);
+                    }
+                    PR_smprintf_free(dbp);
+                }
+                PR_CloseDir(dirhandle);
+            }
+        } /* non-null dirhandle */
+        if (inst_dirp != inst_dir) {
+            slapi_ch_free_string(&inst_dirp);
+        }
+    } /* non-null pEnv */
+    return SLAPI_DSE_CALLBACK_OK;
+}
+
+int
+bdb_instance_delete_instance_entry_callback(struct ldbminfo *li, struct ldbm_instance *inst)
+{
+
+    /* callback to be defined, does nothing for now */
+
+    return SLAPI_DSE_CALLBACK_OK;
+}
+
+/* adding bdb instance specific attributes, instance lock must be held */
+int
+bdb_instance_search_callback(Slapi_Entry *e, int *returncode, char *returntext, ldbm_instance *inst)
+{
+    char buf[BUFSIZ];
+    struct berval *vals[2];
+    struct berval val;
+    config_info *config;
+
+    vals[0] = &val;
+    vals[1] = NULL;
+
+    for (config = bdb_instance_config; config->config_name != NULL; config++) {
+        /* Go through the ldbm_config table and fill in the entry. */
+
+        if (!(config->config_flags & (CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_PREVIOUSLY_SET))) {
+            /* This config option shouldn't be shown */
+            continue;
+        }
+
+        bdb_config_get((void *)inst, config, buf);
+
+        val.bv_val = buf;
+        val.bv_len = strlen(buf);
+        slapi_entry_attr_replace(e, config->config_name, vals);
+    }
+
+    return LDAP_SUCCESS;
+}
+
+int
+bdb_instance_cleanup(struct ldbm_instance *inst)
+{
+    int return_value = 0;
+    /* ignore the value of env, close, because at this point,
+    * work is done with import env by calling env.close,
+    * env and all the associated db handles will be closed, ignore,
+    * if sleepycat complains, that db handles are open at env close time */
+    bdb_db_env *inst_env = (bdb_db_env *)inst->inst_db;
+    DB_ENV *env = 0;
+    return_value |= ((bdb_db_env *)inst->inst_db)->bdb_DB_ENV->close(((bdb_db_env *)inst->inst_db)->bdb_DB_ENV, 0);
+    return_value = db_env_create(&env, 0);
+    if (return_value == 0) {
+        char inst_dir[MAXPATHLEN];
+        char *inst_dirp = dblayer_get_full_inst_dir(inst->inst_li, inst,
+                                                    inst_dir, MAXPATHLEN);
+        if (inst_dirp && *inst_dir) {
+            return_value = env->remove(env, inst_dirp, 0);
+        } else {
+            return_value = -1;
+        }
+        if (return_value == EBUSY) {
+            return_value = 0; /* something else is using the env so ignore */
+        }
+        if (inst_dirp != inst_dir)
+            slapi_ch_free_string(&inst_dirp);
+    }
+    slapi_destroy_rwlock(inst_env->bdb_env_lock);
+    PR_DestroyCondVar(inst_env->bdb_thread_count_cv);
+    inst_env->bdb_thread_count_cv = NULL;
+    PR_DestroyLock(inst_env->bdb_thread_count_lock);
+    inst_env->bdb_thread_count_lock = NULL;
+    slapi_ch_free((void **)&inst->inst_db);
+    /* 
+    slapi_destroy_rwlock(((bdb_db_env *)inst->inst_db)->bdb_env_lock);
+    slapi_ch_free((void **)&inst->inst_db);
+    */
+
+    return return_value;
+}
+
+int
+bdb_instance_create(struct ldbm_instance *inst)
+{
+    int return_value = 0;
+
+    /* Initialize the fields with some default values. */
+    bdb_instance_config_setup_default(inst);
+
+    return return_value;
+}

+ 6054 - 0
ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.c

@@ -0,0 +1,6054 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (C) 2019 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+#include "bdb_layer.h"
+#include <prthread.h>
+#include <prclist.h>
+#include <sys/types.h>
+#include <sys/statvfs.h>
+
+#define DB_OPEN(oflags, db, txnid, file, database, type, flags, mode, rval)                                     \
+    {                                                                                                           \
+        if (((oflags)&DB_INIT_TXN) && ((oflags)&DB_INIT_LOG)) {                                                 \
+            (rval) = ((db)->open)((db), (txnid), (file), (database), (type), (flags) | DB_AUTO_COMMIT, (mode)); \
+        } else {                                                                                                \
+            (rval) = ((db)->open)((db), (txnid), (file), (database), (type), (flags), (mode));                  \
+        }                                                                                                       \
+    }
+
+#define TXN_BEGIN(env, parent_txn, tid, flags) \
+    (env)->txn_begin((env), (parent_txn), (tid), (flags))
+#define TXN_COMMIT(txn, flags) (txn)->commit((txn), (flags))
+#define TXN_ABORT(txn) (txn)->abort(txn)
+#define TXN_CHECKPOINT(env, kbyte, min, flags) \
+    (env)->txn_checkpoint((env), (kbyte), (min), (flags))
+#define MEMP_STAT(env, gsp, fsp, flags, malloc) \
+    (env)->memp_stat((env), (gsp), (fsp), (flags))
+#define MEMP_TRICKLE(env, pct, nwrotep) \
+    (env)->memp_trickle((env), (pct), (nwrotep))
+#define LOG_ARCHIVE(env, listp, flags, malloc) \
+    (env)->log_archive((env), (listp), (flags))
+#define LOG_FLUSH(env, lsn) (env)->log_flush((env), (lsn))
+
+/* Use these macros to incr/decrement the thread count for the
+   database housekeeping threads.  This ensures that the
+   value is changed in a thread safe manner, and safely notifies
+   the main thread during cleanup. INCR_THREAD_COUNT should be
+   the first real statement in the thread function, before any
+   actual work is done, other than perhaps variable assignments.
+   DECR_THREAD_COUNT should be called as the next to last thing
+   in the thread function, just before the trace log message and
+   return.
+*/
+#define INCR_THREAD_COUNT(pEnv)       \
+    PR_Lock(pEnv->bdb_thread_count_lock); \
+    ++pEnv->bdb_thread_count;     \
+    PR_Unlock(pEnv->bdb_thread_count_lock)
+
+#define DECR_THREAD_COUNT(pEnv)                  \
+    PR_Lock(pEnv->bdb_thread_count_lock);            \
+    if (--pEnv->bdb_thread_count == 0) {     \
+        PR_NotifyCondVar(pEnv->bdb_thread_count_cv); \
+    }                                            \
+    PR_Unlock(pEnv->bdb_thread_count_lock)
+
+#define NEWDIR_MODE 0755
+#define DB_REGION_PREFIX "__db."
+
+static int perf_threadmain(void *param);
+static int checkpoint_threadmain(void *param);
+static int trickle_threadmain(void *param);
+static int deadlock_threadmain(void *param);
+static int commit_good_database(bdb_config *priv, int mode);
+static int read_metadata(struct ldbminfo *li);
+static int count_dbfiles_in_dir(char *directory, int *count, int recurse);
+static int dblayer_override_libdb_functions(void);
+static int dblayer_force_checkpoint(struct ldbminfo *li);
+static int log_flush_threadmain(void *param);
+static int dblayer_delete_transaction_logs(const char *log_dir);
+static int dblayer_is_logfilename(const char *path);
+static int bdb_start_log_flush_thread(struct ldbminfo *li);
+static int bdb_start_deadlock_thread(struct ldbminfo *li);
+static int bdb_start_checkpoint_thread(struct ldbminfo *li);
+static int bdb_start_trickle_thread(struct ldbminfo *li);
+static int bdb_start_perf_thread(struct ldbminfo *li);
+static int bdb_start_txn_test_thread(struct ldbminfo *li);
+static int trans_batch_count = 0;
+static int trans_batch_limit = 0;
+static int trans_batch_txn_min_sleep = 50; /* ms */
+static int trans_batch_txn_max_sleep = 50;
+static PRBool log_flush_thread = PR_FALSE;
+static int txn_in_progress_count = 0;
+static int *txn_log_flush_pending = NULL;
+static PRLock *sync_txn_log_flush = NULL;
+static PRCondVar *sync_txn_log_flush_done = NULL;
+static PRCondVar *sync_txn_log_do_flush = NULL;
+static int bdb_db_remove_ex(bdb_db_env *env, char const path[], char const dbName[], PRBool use_lock);
+static int bdb_restore_file_check(struct ldbminfo *li);
+
+#define MEGABYTE (1024 * 1024)
+#define GIGABYTE (1024 * MEGABYTE)
+
+/* env. vars. you can set to stress txn handling */
+#define TXN_TESTING "TXN_TESTING"               /* enables the txn test thread */
+#define TXN_TEST_HOLD_MSEC "TXN_TEST_HOLD_MSEC" /* time to hold open the db cursors */
+#define TXN_TEST_LOOP_MSEC "TXN_TEST_LOOP_MSEC" /* time to wait before looping again */
+#define TXN_TEST_USE_TXN "TXN_TEST_USE_TXN"     /* use transactions or not */
+#define TXN_TEST_USE_RMW "TXN_TEST_USE_RMW"     /* use DB_RMW for c_get flags or not */
+#define TXN_TEST_INDEXES "TXN_TEST_INDEXES"     /* list of indexes to use - comma delimited - id2entry,entryrdn,etc. */
+#define TXN_TEST_VERBOSE "TXN_TEST_VERBOSE"     /* be wordy */
+
+/* This function compares two index keys.  It is assumed
+   that the values are already normalized, since they should have
+   been when the index was created (by int_values2keys).
+
+   richm - actually, the current syntax compare functions
+   always normalize both arguments.  We need to add an additional
+   syntax compare function that does not normalize or takes
+   an argument like value_cmp to specify to normalize or not.
+
+   More fun - this function is used to compare both raw database
+   keys (e.g. with the prefix '=' or '+' or '*' etc.) and without
+   (in the case of two equality keys, we want to strip off the
+   leading '=' to compare the actual values).  We only use the
+   value_compare function if both keys are equality keys with
+   some data after the equality prefix.  In every other case,
+   we will just use a standard berval cmp function.
+
+   see also DBTcmp
+*/
+
+int
+bdb_bt_compare(DB *db, const DBT *dbt1, const DBT *dbt2)
+{
+    struct berval bv1, bv2;
+    value_compare_fn_type syntax_cmp_fn = (value_compare_fn_type)db->app_private;
+
+    if ((dbt1->data && (dbt1->size > 1) && (*((char *)dbt1->data) == EQ_PREFIX)) &&
+        (dbt2->data && (dbt2->size > 1) && (*((char *)dbt2->data) == EQ_PREFIX))) {
+        bv1.bv_val = (char *)dbt1->data + 1; /* remove leading '=' */
+        bv1.bv_len = (ber_len_t)dbt1->size - 1;
+
+        bv2.bv_val = (char *)dbt2->data + 1; /* remove leading '=' */
+        bv2.bv_len = (ber_len_t)dbt2->size - 1;
+
+        return syntax_cmp_fn(&bv1, &bv2);
+    }
+
+    /* else compare two "raw" index keys */
+    bv1.bv_val = (char *)dbt1->data;
+    bv1.bv_len = (ber_len_t)dbt1->size;
+
+    bv2.bv_val = (char *)dbt2->data;
+    bv2.bv_len = (ber_len_t)dbt2->size;
+
+    return slapi_berval_cmp(&bv1, &bv2);
+}
+
+
+/* this flag is used if user remotely turned batching off */
+#define FLUSH_REMOTEOFF 0
+
+/* routine that allows batch value to be changed remotely:
+
+    1. value = 0 turns batching off
+    2. value = 1 makes behavior be like 5.0 but leaves batching on
+    3. value > 1 changes batch value
+
+    2 and 3 assume that nsslapd-db-transaction-batch-val is greater 0 at startup
+*/
+
+int
+bdb_set_batch_transactions(void *arg __attribute__((unused)), void *value, char *errorbuf __attribute__((unused)), int phase, int apply)
+{
+    int val = (int)((uintptr_t)value);
+    int retval = LDAP_SUCCESS;
+
+    if (apply) {
+        if (phase == CONFIG_PHASE_STARTUP) {
+            trans_batch_limit = val;
+        } else {
+            if (val == 0) {
+                if (log_flush_thread) {
+                    PR_Lock(sync_txn_log_flush);
+                }
+                trans_batch_limit = FLUSH_REMOTEOFF;
+                if (log_flush_thread) {
+                    log_flush_thread = PR_FALSE;
+                    PR_Unlock(sync_txn_log_flush);
+                }
+            } else if (val > 0) {
+                if (trans_batch_limit == FLUSH_REMOTEOFF) {
+                    /* this requires a server restart to take effect */
+                    slapi_log_err(SLAPI_LOG_NOTICE, "dblayer_set_batch_transactions", "Enabling batch transactions "
+                                                                                      "requires a server restart.\n");
+                } else if (!log_flush_thread) {
+                    /* we are already disabled, log a reminder of that fact. */
+                    slapi_log_err(SLAPI_LOG_NOTICE, "dblayer_set_batch_transactions", "Batch transactions was "
+                                                                                      "previously disabled, this update requires a server restart.\n");
+                }
+                trans_batch_limit = val;
+            }
+        }
+    }
+    return retval;
+}
+
+int
+bdb_set_batch_txn_min_sleep(void *arg __attribute__((unused)), void *value, char *errorbuf __attribute__((unused)), int phase, int apply)
+{
+    int val = (int)((uintptr_t)value);
+    int retval = LDAP_SUCCESS;
+
+    if (apply) {
+        if (phase == CONFIG_PHASE_STARTUP || phase == CONFIG_PHASE_INITIALIZATION) {
+            trans_batch_txn_min_sleep = val;
+        } else {
+            if (val == 0) {
+                if (log_flush_thread) {
+                    PR_Lock(sync_txn_log_flush);
+                }
+                trans_batch_txn_min_sleep = FLUSH_REMOTEOFF;
+                if (log_flush_thread) {
+                    log_flush_thread = PR_FALSE;
+                    PR_Unlock(sync_txn_log_flush);
+                }
+            } else if (val > 0) {
+                if (trans_batch_txn_min_sleep == FLUSH_REMOTEOFF || !log_flush_thread) {
+                    /* this really has no effect until batch transactions are enabled */
+                    slapi_log_err(SLAPI_LOG_WARNING, "dblayer_set_batch_txn_min_sleep", "Warning batch transactions "
+                                                                                        "is not enabled.\n");
+                }
+                trans_batch_txn_min_sleep = val;
+            }
+        }
+    }
+    return retval;
+}
+
+int
+bdb_set_batch_txn_max_sleep(void *arg __attribute__((unused)), void *value, char *errorbuf __attribute__((unused)), int phase, int apply)
+{
+    int val = (int)((uintptr_t)value);
+    int retval = LDAP_SUCCESS;
+
+    if (apply) {
+        if (phase == CONFIG_PHASE_STARTUP || phase == CONFIG_PHASE_INITIALIZATION) {
+            trans_batch_txn_max_sleep = val;
+        } else {
+            if (val == 0) {
+                if (log_flush_thread) {
+                    PR_Lock(sync_txn_log_flush);
+                }
+                trans_batch_txn_max_sleep = FLUSH_REMOTEOFF;
+                if (log_flush_thread) {
+                    log_flush_thread = PR_FALSE;
+                    PR_Unlock(sync_txn_log_flush);
+                }
+            } else if (val > 0) {
+                if (trans_batch_txn_max_sleep == FLUSH_REMOTEOFF || !log_flush_thread) {
+                    /* this really has no effect until batch transactions are enabled */
+                    slapi_log_err(SLAPI_LOG_WARNING,
+                                  "dblayer_set_batch_txn_max_sleep", "Warning batch transactions "
+                                                                     "is not enabled.\n");
+                }
+                trans_batch_txn_max_sleep = val;
+            }
+        }
+    }
+    return retval;
+}
+
+void *
+bdb_get_batch_transactions(void *arg __attribute__((unused)))
+{
+    return (void *)((uintptr_t)trans_batch_limit);
+}
+
+void *
+bdb_get_batch_txn_min_sleep(void *arg __attribute__((unused)))
+{
+    return (void *)((uintptr_t)trans_batch_txn_min_sleep);
+}
+
+void *
+bdb_get_batch_txn_max_sleep(void *arg __attribute__((unused)))
+{
+    return (void *)((uintptr_t)trans_batch_txn_max_sleep);
+}
+
+/*
+    Threading: dblayer isolates upper layers from threading considerations
+    Everything in dblayer is free-threaded. That is, you can have multiple
+    threads performing operations on a database and not worry about things.
+    Obviously, if you do something stupid, like move a cursor forward in
+    one thread, and backwards in another at the same time, you get what you
+    deserve. However, such a calling pattern will not crash your application !
+*/
+
+static int
+dblayer_txn_checkpoint(struct ldbminfo *li, bdb_db_env *env, PRBool busy_skip, PRBool db_force)
+{
+    int ret = 0;
+    if (busy_skip && is_anyinstance_busy(li)) {
+        return ret;
+    }
+    ret = TXN_CHECKPOINT(env->bdb_DB_ENV, db_force ? DB_FORCE : 0, 0, 0);
+    return ret;
+}
+
+
+/*
+ * return nsslapd-db-home-directory (bdb_dbhome_directory), if exists.
+ * Otherwise, return nsslapd-directory (bdb_home_directory).
+ *
+ * if bdb_dbhome_directory exists, set 1 to dbhome.
+ */
+char *
+bdb_get_home_dir(struct ldbminfo *li, int *dbhome)
+{
+    bdb_config *priv = (bdb_config *)li->li_dblayer_config;
+    char *home_dir = li->li_directory;
+    if (dbhome)
+        *dbhome = 0;
+
+    if (priv->bdb_dbhome_directory && *(priv->bdb_dbhome_directory)) {
+        if (dbhome)
+            *dbhome = 1;
+        home_dir = priv->bdb_dbhome_directory;
+    }
+    if (NULL == home_dir) {
+        slapi_log_err(SLAPI_LOG_WARNING, "bdb_get_home_dir", "Db home directory is not set. "
+                                                                 "Possibly %s (optionally %s) is missing in the config file.\n",
+                      CONFIG_DIRECTORY, CONFIG_DB_HOME_DIRECTORY);
+    }
+    return home_dir;
+}
+
+/* Helper function which deletes the persistent state of the database library
+ * IMHO this should be in inside libdb, but keith won't have it.
+ * Stop press---libdb now does delete these files on recovery, so we don't call this any more.
+ */
+static void
+dblayer_reset_env(struct ldbminfo *li)
+{
+    /* Remove the memory regions */
+    dblayer_private *priv = li->li_dblayer_private;
+    DB_ENV *pEnv = ((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV;
+    char *home_dir = bdb_get_home_dir(li, NULL);
+    if (home_dir && *home_dir)
+        pEnv->remove(pEnv, home_dir, DB_FORCE);
+}
+
+/* Function which calls libdb to override some system calls which
+ * the library makes. We call this before calling any other function
+ * in libdb.
+ * Several OS use this, either partially or completely.
+ * This will eventually change---we will simply pass to libdb
+ * the addresses of a bunch of NSPR functions, and everything
+ * will magically work on all platforms (Ha!)
+ */
+
+#ifdef DB_USE_64LFS
+/* What is going on here ?
+ * Well, some platforms now support an extended API for dealing with
+ * files larger than 2G.  (This apparently comes from the LFS -- "Large
+ * File Summit"... Summit, indeed.)  Anyway, we try to detect at runtime
+ * whether this machine has the extended API, and use it if it's present.
+ *
+ */
+
+
+/* helper function for open64 */
+static int
+dblayer_open_large(const char *path, int oflag, mode_t mode)
+{
+    int err;
+
+    err = open64(path, oflag, mode);
+    /* weird but necessary: */
+    if (err >= 0)
+        errno = 0;
+    return err;
+}
+
+/* this is REALLY dumb.  but nspr 19980529(x) doesn't support 64-bit files
+ * because of some weirdness we're doing at initialization (?), so we need
+ * to export some function that can open huge files, so that exporting
+ * can work right.  when we fix the nspr problem (or get a more recent
+ * version of nspr that might magically work?), this should be blown away.
+ * (call mode_t an int because NT can't handle that in prototypes.)
+ * -robey, 28oct98
+ */
+int
+bdb_open_huge_file(const char *path, int oflag, int mode)
+{
+    return dblayer_open_large(path, oflag, (mode_t)mode);
+}
+
+/* Helper function for large seeks, db4.3 */
+static int
+dblayer_seek43_large(int fd, off64_t offset, int whence)
+{
+    off64_t ret = 0;
+
+    ret = lseek64(fd, offset, whence);
+
+    return (ret < 0) ? errno : 0;
+}
+
+/* helper function for large fstat -- this depends on 'struct stat64' having
+ * the following members:
+ *    off64_t        st_size;
+ *      long        st_blksize;
+ */
+static int
+dblayer_ioinfo_large(const char *path __attribute__((unused)), int fd, u_int32_t *mbytesp, u_int32_t *bytesp, u_int32_t *iosizep)
+{
+    struct stat64 sb;
+
+    if (fstat64(fd, &sb) < 0)
+        return (errno);
+
+    /* Return the size of the file. */
+    if (mbytesp)
+        *mbytesp = (u_int32_t)(sb.st_size / (off64_t)MEGABYTE);
+    if (bytesp)
+        *bytesp = (u_int32_t)(sb.st_size % (off64_t)MEGABYTE);
+
+    if (iosizep)
+        *iosizep = (u_int32_t)(sb.st_blksize);
+    return 0;
+}
+/* Helper function to tell if a file exists */
+/* On Solaris, if you use stat() on a file >4Gbytes, it fails with EOVERFLOW,
+   causing us to think that the file does not exist when it in fact does */
+static int
+dblayer_exists_large(const char *path, int *isdirp)
+{
+    struct stat64 sb;
+
+    if (stat64(path, &sb) != 0)
+        return (errno);
+
+    if (isdirp != NULL)
+        *isdirp = S_ISDIR(sb.st_mode);
+
+    return (0);
+}
+
+#else /* DB_USE_64LFS */
+
+int
+bdb_open_huge_file(const char *path, int oflag, int mode)
+{
+    return open(path, oflag, mode);
+}
+
+#endif /* DB_USE_64LFS */
+
+
+static int
+dblayer_override_libdb_functions(void)
+{
+#ifdef DB_USE_64LFS
+    int major = 0;
+    int minor = 0;
+
+    /* Find out whether we are talking to a 2.3 or 2.4+ libdb */
+    db_version(&major, &minor, NULL);
+
+#ifndef irix
+    /* irix doesn't have open64() */
+    db_env_set_func_open((int (*)(const char *, int, ...))dblayer_open_large);
+#endif /* !irix */
+    db_env_set_func_ioinfo(dblayer_ioinfo_large);
+    db_env_set_func_exists(dblayer_exists_large);
+    db_env_set_func_seek((int (*)(int, off_t, int))dblayer_seek43_large);
+
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_override_libdb_function", "Enabled 64-bit files\n");
+#endif /* DB_USE_64LFS */
+    return 0;
+}
+
+static void
+dblayer_select_ncache(size_t cachesize, int *ncachep)
+{
+    /* First thing, if the user asked to use a particular ncache,
+     * we let them, and don't override it here.
+     */
+    if (*ncachep) {
+        return;
+    }
+/* If the user asked for a cache that's larger than 4G,
+     * we _must_ select an ncache >0 , such that each
+     * chunk is <4G. This is because DB won't accept a
+     * larger chunk.
+     */
+#if defined(__LP64__) || defined(_LP64)
+    if ((sizeof(cachesize) > 4) && (cachesize > (4L * GIGABYTE))) {
+        *ncachep = (cachesize / (4L * GIGABYTE)) + 1;
+        slapi_log_err(SLAPI_LOG_NOTICE, "dblayer_select_ncache", "Setting ncache to: %d to keep each chunk below 4Gbytes\n",
+                      *ncachep);
+    }
+#endif
+}
+
+void
+dblayer_free(void *ptr)
+{
+    slapi_ch_free(&ptr);
+}
+
+static void
+bdb_init_dbenv(DB_ENV *pEnv, bdb_config *conf, dblayer_private *priv)
+{
+    size_t mysize;
+    int myncache = 1;
+
+    mysize = conf->bdb_cachesize;
+    myncache = conf->bdb_ncache;
+    dblayer_select_ncache(mysize, &myncache);
+    conf->bdb_ncache = myncache;
+
+    bdb_set_env_debugging(pEnv, conf);
+
+    pEnv->set_lg_max(pEnv, conf->bdb_logfile_size);
+    pEnv->set_cachesize(pEnv, mysize / GIGABYTE, mysize % GIGABYTE, myncache);
+    pEnv->set_lk_max_locks(pEnv, conf->bdb_lock_config);
+    pEnv->set_lk_max_objects(pEnv, conf->bdb_lock_config);
+    pEnv->set_lk_max_lockers(pEnv, conf->bdb_lock_config);
+
+    /* shm_key required for named_regions (DB_SYSTEM_MEM) */
+    pEnv->set_shm_key(pEnv, conf->bdb_shm_key);
+
+    /* increase max number of active transactions */
+    pEnv->set_tx_max(pEnv, conf->bdb_tx_max);
+
+    pEnv->set_alloc(pEnv, (void *)slapi_ch_malloc, (void *)slapi_ch_realloc, dblayer_free);
+
+    /*
+     * The log region is used to store filenames and so needs to be
+     * increased in size from the default for a large number of files.
+     */
+    pEnv->set_lg_regionmax(pEnv, 1 * 1048576); /* 1 MB */
+}
+
+
+static void
+dblayer_dump_config_tracing(struct ldbminfo *li)
+{
+    bdb_config *conf =(bdb_config *)li->li_dblayer_config;
+    dblayer_private *priv = li->li_dblayer_private;
+    if (conf->bdb_home_directory) {
+        slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "home_directory=%s\n", conf->bdb_home_directory);
+    }
+    if (conf->bdb_log_directory) {
+        slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "log_directory=%s\n", conf->bdb_log_directory);
+    }
+    if (conf->bdb_dbhome_directory) {
+        slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "dbhome_directory=%s\n", conf->bdb_dbhome_directory);
+    }
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "trickle_percentage=%d\n", conf->bdb_trickle_percentage);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "page_size=%" PRIu32 "\n", conf->bdb_page_size);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "index_page_size=%" PRIu32 "\n", conf->bdb_index_page_size);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "cachesize=%" PRIu64 "\n", conf->bdb_cachesize);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "previous_cachesize=%" PRIu64 "\n", conf->bdb_previous_cachesize);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "ncache=%d\n", conf->bdb_ncache);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "previous_ncache=%d\n", conf->bdb_previous_ncache);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "recovery_required=%d\n", conf->bdb_recovery_required);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "durable_transactions=%d\n", conf->bdb_durable_transactions);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "checkpoint_interval=%d\n", conf->bdb_checkpoint_interval);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "transaction_batch_val=%d\n", trans_batch_limit);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "circular_logging=%d\n", conf->bdb_circular_logging);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "idl_divisor=%d\n", priv->dblayer_idl_divisor);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "logfile_size=%" PRIu64 "\n", conf->bdb_logfile_size);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "logbuf_size=%" PRIu64 "\n", conf->bdb_logbuf_size);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "file_mode=%d\n", priv->dblayer_file_mode);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "cache_config=%d\n", conf->bdb_cache_config);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "lib_version=%d\n", conf->bdb_lib_version);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "spin_count=%d\n", conf->bdb_spin_count);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "named_regions=%d\n", conf->bdb_named_regions);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "private mem=%d\n", conf->bdb_private_mem);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "private import mem=%d\n", conf->bdb_private_import_mem);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "shm_key=%ld\n", conf->bdb_shm_key);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "lockdown=%d\n", conf->bdb_lockdown);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "locks=%d\n", conf->bdb_lock_config);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "previous_locks=%d\n", conf->bdb_previous_lock_config);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_dump_config_tracing", "tx_max=%d\n", conf->bdb_tx_max);
+}
+
+/* Check a given filesystem directory for access we need */
+#define DBLAYER_DIRECTORY_READ_ACCESS 1
+#define DBLAYER_DIRECTORY_WRITE_ACCESS 2
+#define DBLAYER_DIRECTORY_READWRITE_ACCESS 3
+static int
+dblayer_grok_directory(char *directory, int flags)
+{
+    /* First try to open the directory using NSPR */
+    /* If that fails, we can tell whether it's because it cannot be created or
+     * we don't have any permission to access it */
+    /* If that works, proceed to try to access files in the directory */
+    char filename[MAXPATHLEN];
+    PRDir *dirhandle = NULL;
+    PRDirEntry *direntry = NULL;
+    PRFileInfo64 info;
+
+    dirhandle = PR_OpenDir(directory);
+    if (NULL == dirhandle) {
+        /* it does not exist or wrong file is there */
+        /* try delete and mkdir */
+        PR_Delete(directory);
+        return mkdir_p(directory, 0700);
+    }
+
+    while (NULL !=
+           (direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+        if (NULL == direntry->name) {
+            break;
+        }
+        PR_snprintf(filename, MAXPATHLEN, "%s/%s", directory, direntry->name);
+
+        /* Right now this is set up to only look at files here.
+         * With multiple instances of the backend the are now other directories
+         * in the db home directory.  This function wasn't ment to deal with
+         * other directories, so we skip them. */
+        if (PR_GetFileInfo64(filename, &info) == PR_SUCCESS &&
+            info.type == PR_FILE_DIRECTORY) {
+            /* go into it (instance dir) */
+            int retval = dblayer_grok_directory(filename, flags);
+            PR_CloseDir(dirhandle);
+            return retval;
+        }
+
+        /* If we are here, it means that the directory exists, that we can read
+         * from it, and that there is at least one file there */
+        /* We will try to open that file now if we were asked for read access */
+        if (flags) {
+            PRFileDesc *prfd;
+            PRIntn open_flags = 0;
+            char *access_string = NULL;
+
+            if (DBLAYER_DIRECTORY_READ_ACCESS & flags) {
+                open_flags = PR_RDONLY;
+            }
+            if (DBLAYER_DIRECTORY_WRITE_ACCESS & flags) {
+                open_flags = PR_RDWR;
+            }
+            /* Let's hope that on Solaris we get to open large files OK */
+            prfd = PR_Open(filename, open_flags, 0);
+            if (NULL == prfd) {
+                if (DBLAYER_DIRECTORY_READ_ACCESS == flags) {
+                    access_string = "read";
+                } else {
+                    if (DBLAYER_DIRECTORY_READ_ACCESS & flags) {
+                        access_string = "write";
+                    } else {
+                        access_string = "****";
+                    }
+                }
+                /* If we're here, it means that we did not have the requested
+                 * permission on this file */
+                slapi_log_err(SLAPI_LOG_WARNING,
+                              "dblayer_grok_directory", "No %s permission to file %s\n",
+                              access_string, filename);
+            } else {
+                PR_Close(prfd); /* okay */
+            }
+        }
+    }
+    PR_CloseDir(dirhandle);
+    return 0;
+}
+
+static void
+bdb_set_data_dir(bdb_db_env *pEnv, char **data_directories)
+{
+    char **dirp;
+
+    if (!(pEnv->bdb_priv_flags & DBLAYER_PRIV_SET_DATA_DIR)) {
+        for (dirp = data_directories; dirp && *dirp; dirp++) {
+            pEnv->bdb_DB_ENV->set_data_dir(pEnv->bdb_DB_ENV, *dirp);
+        }
+        pEnv->bdb_priv_flags |= DBLAYER_PRIV_SET_DATA_DIR;
+    }
+}
+
+static int
+dblayer_inst_exists(ldbm_instance *inst, char *dbname)
+{
+    PRStatus prst;
+    char id2entry_file[MAXPATHLEN];
+    char *parent_dir = inst->inst_parent_dir_name;
+    char sep = get_sep(parent_dir);
+    char *dbnamep;
+    if (dbname)
+        dbnamep = dbname;
+    else
+        dbnamep = ID2ENTRY LDBM_FILENAME_SUFFIX;
+    PR_snprintf(id2entry_file, sizeof(id2entry_file), "%s%c%s%c%s", parent_dir, sep, inst->inst_dir_name,
+                sep, dbnamep);
+    prst = PR_Access(id2entry_file, PR_ACCESS_EXISTS);
+    if (PR_SUCCESS == prst)
+        return 1;
+    return 0;
+}
+
+static void
+bdb_free_env(void **arg)
+{
+    bdb_db_env **env = (bdb_db_env **)arg;
+    if (NULL == env || NULL == *env) {
+        return;
+    }
+    if ((*env)->bdb_env_lock) {
+        slapi_destroy_rwlock((*env)->bdb_env_lock);
+        (*env)->bdb_env_lock = NULL;
+    }
+    PR_DestroyCondVar((*env)->bdb_thread_count_cv);
+    (*env)->bdb_thread_count_cv = NULL;
+    PR_DestroyLock((*env)->bdb_thread_count_lock);
+    (*env)->bdb_thread_count_lock = NULL;
+    slapi_ch_free((void **)env);
+    return;
+}
+
+/*
+ * create a new DB_ENV and fill it with the goodies from dblayer_private
+ */
+static int
+bdb_make_env(bdb_db_env **env, struct ldbminfo *li)
+{
+    bdb_config *conf = (bdb_config *)li->li_dblayer_config;
+    bdb_db_env *pEnv;
+    char *home_dir = NULL;
+    int ret;
+    Object *inst_obj;
+    ldbm_instance *inst = NULL;
+
+    pEnv = (bdb_db_env *)slapi_ch_calloc(1, sizeof(bdb_db_env));
+
+    pEnv->bdb_thread_count_lock = PR_NewLock();
+    pEnv->bdb_thread_count_cv = PR_NewCondVar(pEnv->bdb_thread_count_lock);
+
+    if ((ret = db_env_create(&pEnv->bdb_DB_ENV, 0)) != 0) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_make_env", "Failed to create DB_ENV (returned: %d).\n",
+                      ret);
+    }
+
+    pEnv->bdb_DB_ENV->set_flags(pEnv->bdb_DB_ENV, DB_REGION_INIT, 1);
+
+    /* Here we overide various system functions called by libdb */
+    ret = dblayer_override_libdb_functions();
+    if (ret != 0) {
+        goto fail;
+    }
+
+    if (conf->bdb_spin_count != 0) {
+        pEnv->bdb_DB_ENV->mutex_set_tas_spins(pEnv->bdb_DB_ENV,
+                                                  conf->bdb_spin_count);
+    }
+
+    dblayer_dump_config_tracing(li);
+
+    /* set data dir to avoid having absolute paths in the transaction log */
+    for (inst_obj = objset_first_obj(li->li_instance_set);
+         inst_obj;
+         inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+        inst = (ldbm_instance *)object_get_data(inst_obj);
+        if (inst->inst_parent_dir_name) {
+            if (!charray_utf8_inlist(conf->bdb_data_directories,
+                                     inst->inst_parent_dir_name)) {
+                charray_add(&(conf->bdb_data_directories),
+                            slapi_ch_strdup(inst->inst_parent_dir_name));
+            }
+        }
+    }
+    home_dir = bdb_get_home_dir(li, NULL);
+    /* user specified db home */
+    if (home_dir && *home_dir &&
+        !charray_utf8_inlist(conf->bdb_data_directories, home_dir)) {
+        charray_add(&(conf->bdb_data_directories), slapi_ch_strdup(home_dir));
+    }
+
+    /* user specified log dir */
+    if (conf->bdb_log_directory && *(conf->bdb_log_directory)) {
+        pEnv->bdb_DB_ENV->set_lg_dir(pEnv->bdb_DB_ENV,
+                                         conf->bdb_log_directory);
+    }
+
+    /* set up cache sizes */
+    bdb_init_dbenv(pEnv->bdb_DB_ENV, conf, li->li_dblayer_private);
+
+    pEnv->bdb_env_lock = slapi_new_rwlock();
+
+    if (pEnv->bdb_env_lock) {
+        *env = pEnv;
+        pEnv = NULL; /* do not free below */
+    } else {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_make_env", "Failed to create RWLock (returned: %d).\n",
+                      ret);
+    }
+
+fail:
+    if (pEnv) {
+        slapi_ch_array_free(conf->bdb_data_directories);
+        conf->bdb_data_directories = NULL;
+        if (pEnv->bdb_DB_ENV) {
+            pEnv->bdb_DB_ENV->close(pEnv->bdb_DB_ENV, 0);
+        }
+        bdb_free_env((void **)&pEnv); /* pEnv is now garbage */
+    }
+    return ret;
+}
+
+/*
+ *  Get the total size of all the __db files
+ */
+static PRUint64
+dblayer_get_region_size(const char *dir)
+{
+    PRFileInfo64 info;
+    PRDir *dirhandle = NULL;
+    PRDirEntry *direntry = NULL;
+    PRUint64 region_size = 0;
+
+    dirhandle = PR_OpenDir(dir);
+    if (NULL == dirhandle) {
+        return region_size;
+    }
+    while (NULL != (direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+        if (NULL == direntry->name) {
+            continue;
+        }
+        if (0 == strncmp(direntry->name, DB_REGION_PREFIX, 5)) {
+            char filename[MAXPATHLEN];
+
+            PR_snprintf(filename, MAXPATHLEN, "%s/%s", dir, direntry->name);
+            if (PR_GetFileInfo64(filename, &info) != PR_FAILURE) {
+                region_size += info.size;
+            }
+        }
+    }
+    PR_CloseDir(dirhandle);
+
+    return region_size;
+}
+
+/*
+ *  Check that there is enough room for the dbcache and region files.
+ *  We can ignore this check if using db_home_dir and shared/private memory.
+ */
+static int
+no_diskspace(struct ldbminfo *li, int dbenv_flags)
+{
+    struct statvfs dbhome_buf;
+    struct statvfs db_buf;
+    int using_region_files = !(dbenv_flags & (DB_PRIVATE | DB_SYSTEM_MEM));
+    /* value of 10 == 10% == little more than the average overhead calculated for very large files on 64-bit system for bdb 4.7 */
+    uint64_t expected_siz = li->li_dbcachesize + li->li_dbcachesize / 10; /* dbcache + region files */
+    uint64_t fsiz;
+    char *region_dir;
+
+    if (statvfs(li->li_directory, &db_buf) < 0) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "no_diskspace", "Cannot get file system info for (%s); file system corrupted?\n",
+                      li->li_directory);
+        return 1;
+    } else {
+        /*
+         *  If db_home_directory is set, and it's not the same as the db_directory,
+         *  then check the disk space.
+         */
+        if (BDB_CONFIG(li)->bdb_dbhome_directory &&
+            strcmp(BDB_CONFIG(li)->bdb_dbhome_directory, "") &&
+            strcmp(li->li_directory, BDB_CONFIG(li)->bdb_dbhome_directory)) {
+            /* Calculate the available space as long as we are not using shared memory */
+            if (using_region_files) {
+                if (statvfs(BDB_CONFIG(li)->bdb_dbhome_directory, &dbhome_buf) < 0) {
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "no_diskspace", "Cannot get file system info for (%s); file system corrupted?\n",
+                                  BDB_CONFIG(li)->bdb_dbhome_directory);
+                    return 1;
+                }
+                fsiz = ((uint64_t)dbhome_buf.f_bavail) * ((uint64_t)dbhome_buf.f_bsize);
+                region_dir = BDB_CONFIG(li)->bdb_dbhome_directory;
+            } else {
+                /* Shared/private memory.  No need to check disk space, return success */
+                return 0;
+            }
+        } else {
+            /* Ok, just check the db directory */
+            region_dir = li->li_directory;
+            fsiz = ((PRUint64)db_buf.f_bavail) * ((PRUint64)db_buf.f_bsize);
+        }
+        /* Adjust the size for the region files */
+        fsiz += dblayer_get_region_size(region_dir);
+
+        /* Check if we have enough space */
+        if (fsiz < expected_siz) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "no_diskspace", "No enough space left on device (%s) (%" PRIu64 " bytes); "
+                                          "at least %" PRIu64 " bytes space is needed for db region files\n",
+                          region_dir, fsiz, expected_siz);
+            return 1;
+        }
+
+        return 0;
+    }
+}
+
+/*
+ * This function is called after all the config options have been read in,
+ * so we can do real initialization work here.
+ */
+#define DBCONFLEN 3
+#define CATASTROPHIC (bdb_db_env *)-1
+
+int
+bdb_start(struct ldbminfo *li, int dbmode)
+{
+    /*
+     * So, here we open our DB_ENV session. We store it away for future use.
+     * We also check to see if we exited cleanly last time. If we didn't,
+     * we try to recover. If recovery fails, we're hosed.
+     * We also create the thread which handles checkpointing and logfile
+     * truncation here.
+     */
+    int return_value = -1;
+    bdb_config *conf = NULL;
+    dblayer_private *priv = NULL;
+    bdb_db_env *pEnv = NULL;
+    char *region_dir = NULL; /* directory to place region files */
+    char *log_dir = NULL;    /* directory to place txn log files */
+    int open_flags = 0;
+
+    PR_ASSERT(NULL != li);
+
+    conf = (bdb_config *)li->li_dblayer_config;
+    priv = li->li_dblayer_private;
+
+    if (NULL == priv) {
+        /* you didn't call init successfully */
+        return -1;
+    }
+
+    if (NULL != priv->dblayer_env) {
+        if (CATASTROPHIC == priv->dblayer_env) {
+            slapi_log_err(SLAPI_LOG_CRIT,
+                          "bdb_start", "DB previously failed to start.\n");
+            return -1;
+        } else {
+            slapi_log_err(SLAPI_LOG_WARNING,
+                          "bdb_start", "DB already started.\n");
+            return 0;
+        }
+    }
+
+    /* DBDB we should pick these up in our config routine, and do away with
+     *  the li_ one */
+    if (NULL == li->li_directory || '\0' == *li->li_directory) {
+        slapi_log_err(SLAPI_LOG_CRIT,
+                      "bdb_start", "DB directory is not specified.\n");
+        return -1;
+    }
+    PR_Lock(li->li_config_mutex);
+    /* li->li_directory comes from nsslapd-directory */
+    /* bdb_home_directory is freed in bdb_post_close.
+     * li_directory needs to live beyond dblayer. */
+    slapi_ch_free_string(&conf->bdb_home_directory);
+    conf->bdb_home_directory = slapi_ch_strdup(li->li_directory);
+    conf->bdb_cachesize = li->li_dbcachesize;
+    conf->bdb_lock_config = li->li_dblock;
+    priv->dblayer_file_mode = li->li_mode;
+    conf->bdb_ncache = li->li_dbncache;
+    PR_Unlock(li->li_config_mutex);
+
+    /* use nsslapd-db-home-directory (bdb_dbhome_directory), if set */
+    /* Otherwise, nsslapd-directory (bdb_home_directory). */
+    region_dir = bdb_get_home_dir(li, NULL);
+    if (!region_dir || !(*region_dir)) {
+        return -1;
+    }
+    if (!bdb_version_exists(li, region_dir)) {
+        bdb_version_write(li, region_dir, NULL, DBVERSION_ALL);
+    }
+
+    /* Check here that the database directory both exists, and that we have
+     * the appropriate access to it */
+    return_value = dblayer_grok_directory(region_dir,
+                                          DBLAYER_DIRECTORY_READWRITE_ACCESS);
+    if (0 != return_value) {
+        slapi_log_err(SLAPI_LOG_CRIT, "bdb_start", "Can't start because the database "
+                                                       "directory \"%s\" either doesn't exist, or is not "
+                                                       "accessible\n",
+                      region_dir);
+        return return_value;
+    }
+
+    log_dir = conf->bdb_log_directory; /* nsslapd-db-logdirectory */
+    if (log_dir && *log_dir) {
+        /* checking the user defined log dir's accessability */
+        return_value = dblayer_grok_directory(log_dir,
+                                              DBLAYER_DIRECTORY_READWRITE_ACCESS);
+        if (0 != return_value) {
+            slapi_log_err(SLAPI_LOG_CRIT, "bdb_start", "Can't start because the log "
+                                                           "directory \"%s\" either doesn't exist, or is not "
+                                                           "accessible\n",
+                          log_dir);
+            return return_value;
+        }
+    }
+
+    /* Sanity check on cache size on platforms which allow us to figure out
+     * the available phys mem */
+    slapi_pal_meminfo *mi = spal_meminfo_get();
+    util_cachesize_result result = util_is_cachesize_sane(mi, &(conf->bdb_cachesize));
+    if (result == UTIL_CACHESIZE_ERROR) {
+        slapi_log_err(SLAPI_LOG_CRIT, "bdb_start", "Unable to determine if cachesize was valid!!!");
+    } else if (result == UTIL_CACHESIZE_REDUCED) {
+        /* In some cases we saw this go to 0, prevent this. */
+        if (conf->bdb_cachesize < MINCACHESIZE) {
+            conf->bdb_cachesize = MINCACHESIZE;
+        }
+        /* Oops---looks like the admin misconfigured, let's warn them */
+        slapi_log_err(SLAPI_LOG_WARNING, "bdb_start",
+            "Likely CONFIGURATION ERROR - dbcachesize is configured to use more than the available "
+            "memory, decreased to (%" PRIu64 " bytes).\n", conf->bdb_cachesize);
+        li->li_dbcachesize = conf->bdb_cachesize;
+    }
+    spal_meminfo_destroy(mi);
+
+    /* fill in DB_ENV stuff from the common configuration */
+    return_value = bdb_make_env(&pEnv, li);
+    if (return_value != 0)
+        return return_value;
+
+    if ((DBLAYER_NORMAL_MODE | DBLAYER_CLEAN_RECOVER_MODE) & dbmode) {
+        /* Now, we read our metadata */
+        return_value = read_metadata(li);
+        if (0 != return_value) {
+            /* The error message was output by read_metadata() */
+            return -1;
+        }
+        if (bdb_restore_file_check(li)) {
+            dblayer_set_restored();
+        }
+    }
+
+    bdb_free_env(&priv->dblayer_env);
+    priv->dblayer_env = pEnv;
+
+    open_flags = DB_CREATE | DB_INIT_MPOOL | DB_THREAD;
+
+    if (conf->bdb_enable_transactions) {
+        open_flags |= (DB_INIT_TXN | DB_INIT_LOG | DB_INIT_LOCK);
+        if (conf->bdb_recovery_required) {
+            open_flags |= DB_RECOVER;
+            if (DBLAYER_RESTORE_MODE & dbmode) {
+                slapi_log_err(SLAPI_LOG_NOTICE, "bdb_start", "Recovering database after restore "
+                                                                 "from archive.\n");
+            } else if (DBLAYER_CLEAN_RECOVER_MODE & dbmode) {
+                slapi_log_err(SLAPI_LOG_NOTICE, "bdb_start", "Clean up db environment and start "
+                                                                 "from archive.\n");
+            } else {
+                slapi_log_err(SLAPI_LOG_NOTICE, "bdb_start", "Detected Disorderly Shutdown last "
+                                                                 "time Directory Server was running, recovering database.\n");
+                slapi_disordely_shutdown(PR_TRUE);
+            }
+        }
+        switch (dbmode & DBLAYER_RESTORE_MASK) {
+        case DBLAYER_RESTORE_MODE:
+            open_flags |= DB_RECOVER_FATAL;
+            open_flags &= ~DB_RECOVER; /* shouldn't set both */
+            if (!(dbmode & DBLAYER_NO_DBTHREADS_MODE))
+                dbmode = DBLAYER_NORMAL_MODE; /* to restart helper threads */
+            break;
+        case DBLAYER_RESTORE_NO_RECOVERY_MODE:
+            open_flags &= ~(DB_RECOVER | DB_RECOVER_FATAL);
+            if (!(dbmode & DBLAYER_NO_DBTHREADS_MODE))
+                dbmode = DBLAYER_NORMAL_MODE; /* to restart helper threads */
+        }
+    }
+
+    if (conf->bdb_private_mem) {
+        slapi_log_err(SLAPI_LOG_INFO, "bdb_start",
+                      "Server is running with nsslapd-db-private-mem on; "
+                      "No other process is allowed to access the database\n");
+        open_flags |= DB_PRIVATE;
+    }
+
+    if (conf->bdb_named_regions) {
+        open_flags |= DB_SYSTEM_MEM;
+    }
+
+    if (conf->bdb_lockdown) {
+        open_flags |= DB_LOCKDOWN;
+    }
+
+
+    /* Is the cache being re-sized ? (If we're just doing an archive or export,
+     * we don't care if the cache is being re-sized) */
+    if ((conf->bdb_previous_cachesize || conf->bdb_previous_ncache) &&
+        (conf->bdb_previous_lock_config) &&
+        ((conf->bdb_cachesize != conf->bdb_previous_cachesize) ||
+         (conf->bdb_ncache != conf->bdb_previous_ncache) ||
+         (conf->bdb_lock_config != conf->bdb_previous_lock_config)) &&
+        !(dbmode & (DBLAYER_ARCHIVE_MODE | DBLAYER_EXPORT_MODE))) {
+        if (conf->bdb_cachesize != conf->bdb_previous_cachesize) {
+            slapi_log_err(SLAPI_LOG_INFO, "bdb_start", "Resizing db cache size: %" PRIu64 " -> %" PRIu64 "\n",
+                          conf->bdb_previous_cachesize, conf->bdb_cachesize);
+        }
+        if (conf->bdb_ncache != conf->bdb_previous_ncache) {
+            slapi_log_err(SLAPI_LOG_INFO, "bdb_start", "Resizing db cache count: %d -> %d\n",
+                          conf->bdb_previous_ncache, conf->bdb_ncache);
+        }
+        if (conf->bdb_lock_config != conf->bdb_previous_lock_config) {
+            /*
+             * The default value of nsslapd-db-locks is BDB_LOCKS_MIN.
+             * We don't allow lower value than that.
+             */
+            if (conf->bdb_lock_config <= BDB_LOCK_NB_MIN) {
+                slapi_log_err(SLAPI_LOG_NOTICE, "bdb_start", "New max db lock count is too small.  "
+                                                                 "Resetting it to the default value %d.\n",
+                              BDB_LOCK_NB_MIN);
+                conf->bdb_lock_config = BDB_LOCK_NB_MIN;
+            }
+            if (conf->bdb_lock_config != conf->bdb_previous_lock_config) {
+                slapi_log_err(SLAPI_LOG_NOTICE, "bdb_start", "Resizing max db lock count: %d -> %d\n",
+                              conf->bdb_previous_lock_config, conf->bdb_lock_config);
+            }
+        }
+        dblayer_reset_env(li);
+        /*
+         * Once pEnv->remove (via dblayer_reset_env) has been called,
+         * the DB_ENV (pEnv) needs to be created again.
+         */
+        if ((return_value = bdb_make_env(&pEnv, li)) != 0) {
+            slapi_log_err(SLAPI_LOG_CRIT,
+                          "bdb_start", "Failed to create DBENV (returned: %d).\n",
+                          return_value);
+        }
+        bdb_free_env(&priv->dblayer_env);
+        priv->dblayer_env = pEnv;
+    }
+
+    /* transactions enabled and logbuf size greater than sleepycat's default */
+    if (conf->bdb_enable_transactions && (conf->bdb_logbuf_size > 0)) {
+        if (conf->bdb_logbuf_size >= 32768) {
+            pEnv->bdb_DB_ENV->set_lg_bsize(pEnv->bdb_DB_ENV, conf->bdb_logbuf_size);
+        } else {
+            slapi_log_err(SLAPI_LOG_NOTICE, "bdb_start",
+                "Using default value for log bufsize because configured value (%" PRIu64 ") is too small.\n",
+                conf->bdb_logbuf_size);
+        }
+    }
+
+    /* check if there's enough disk space to start */
+    if (no_diskspace(li, open_flags)) {
+        return ENOSPC;
+    }
+
+    bdb_set_data_dir(pEnv, conf->bdb_data_directories);
+    /* If we're doing recovery, we MUST open the env single-threaded ! */
+    if ((open_flags & DB_RECOVER) || (open_flags & DB_RECOVER_FATAL)) {
+        /* Recover, then close, then open again */
+        int recover_flags = open_flags & ~DB_THREAD;
+
+        if (DBLAYER_CLEAN_RECOVER_MODE & dbmode) /* upgrade case */
+        {
+            DB_ENV *thisenv = pEnv->bdb_DB_ENV;
+            return_value = thisenv->remove(thisenv, region_dir, DB_FORCE);
+            if (0 != return_value) {
+                slapi_log_err(SLAPI_LOG_CRIT,
+                              "bdb_start", "Failed to remove old db env "
+                                               "in %s: %s\n",
+                              region_dir,
+                              dblayer_strerror(return_value));
+                return return_value;
+            }
+            dbmode = DBLAYER_NORMAL_MODE;
+
+            if ((return_value = bdb_make_env(&pEnv, li)) != 0) {
+                slapi_log_err(SLAPI_LOG_CRIT,
+                              "bdb_start", "Failed to create DBENV (returned: %d).\n",
+                              return_value);
+                return return_value;
+            }
+        }
+
+        return_value = (pEnv->bdb_DB_ENV->open)(
+            pEnv->bdb_DB_ENV,
+            region_dir,
+            recover_flags,
+            priv->dblayer_file_mode);
+        if (0 != return_value) {
+            if (return_value == ENOMEM) {
+                /*
+                 * https://blackflag.mcom.com/show_bug.cgi?id=557319
+                 * Crash ns-slapd while running scalab01 after restart slapd
+                 */
+                slapi_log_err(SLAPI_LOG_CRIT,
+                              "bdb_start", "mmap in opening database environment (recovery mode) "
+                                               "failed trying to allocate %" PRIu64 " bytes. (OS err %d - %s)\n",
+                              li->li_dbcachesize, return_value, dblayer_strerror(return_value));
+                bdb_free_env(&priv->dblayer_env);
+                priv->dblayer_env = CATASTROPHIC;
+            } else {
+                slapi_log_err(SLAPI_LOG_CRIT, "bdb_start", "Database Recovery Process FAILED. "
+                                                               "The database is not recoverable. err=%d: %s\n",
+                              return_value, dblayer_strerror(return_value));
+                slapi_log_err(SLAPI_LOG_CRIT,
+                              "bdb_start", "Please make sure there is enough disk space for "
+                                               "dbcache (%" PRIu64 " bytes) and db region files\n",
+                              li->li_dbcachesize);
+            }
+            return return_value;
+        } else {
+            open_flags &= ~(DB_RECOVER | DB_RECOVER_FATAL);
+            pEnv->bdb_DB_ENV->close(pEnv->bdb_DB_ENV, 0);
+            if ((return_value = bdb_make_env(&pEnv, li)) != 0) {
+                slapi_log_err(SLAPI_LOG_CRIT,
+                              "bdb_start", "Failed to create DBENV (returned: %d).\n",
+                              return_value);
+                return return_value;
+            }
+            bdb_free_env(&priv->dblayer_env);
+            priv->dblayer_env = pEnv;
+            bdb_set_data_dir(pEnv, conf->bdb_data_directories);
+        }
+    }
+
+    if ((!conf->bdb_durable_transactions) ||
+        ((conf->bdb_enable_transactions) && (trans_batch_limit > 0))) {
+        pEnv->bdb_DB_ENV->set_flags(pEnv->bdb_DB_ENV, DB_TXN_WRITE_NOSYNC, 1);
+    }
+    /* ldbm2index uses transactions but sets the transaction flag to off - we
+       need to dblayer_init_pvt_txn in that case */
+    dblayer_init_pvt_txn();
+    if (!((DBLAYER_IMPORT_MODE | DBLAYER_INDEX_MODE) & dbmode)) {
+        pEnv->bdb_openflags = open_flags;
+        return_value = (pEnv->bdb_DB_ENV->open)(
+            pEnv->bdb_DB_ENV,
+            region_dir,
+            open_flags,
+            priv->dblayer_file_mode);
+
+
+        /* Now attempt to start up the checkpoint and deadlock threads */
+        /* note: need to be '==', not '&' to omit DBLAYER_NO_DBTHREADS_MODE */
+        if ((DBLAYER_NORMAL_MODE == dbmode) &&
+            (0 == return_value)) {
+            /* update the dbversion file */
+            bdb_version_write(li, region_dir, NULL, DBVERSION_ALL);
+
+            /* if dblayer_close then bdb_start is called,
+               this flag is set */
+            conf->bdb_stop_threads = 0;
+            if (0 != (return_value = bdb_start_deadlock_thread(li))) {
+                return return_value;
+            }
+
+            if (0 != (return_value = bdb_start_checkpoint_thread(li))) {
+                return return_value;
+            }
+
+            if (0 != (return_value = bdb_start_log_flush_thread(li))) {
+                return return_value;
+            }
+
+            if (0 != (return_value = bdb_start_trickle_thread(li))) {
+                return return_value;
+            }
+
+            if (0 != (return_value = bdb_start_perf_thread(li))) {
+                return return_value;
+            }
+
+            /* Now open the performance counters stuff */
+            perfctrs_init(li, &(conf->perf_private));
+            if (getenv(TXN_TESTING)) {
+                bdb_start_txn_test_thread(li);
+            }
+        }
+        if (return_value != 0) {
+            if (return_value == ENOMEM) {
+                /*
+                 * https://blackflag.mcom.com/show_bug.cgi?id=557319
+                 * Crash ns-slapd while running scalab01 after restart slapd
+                 */
+                slapi_log_err(SLAPI_LOG_CRIT,
+                              "bdb_start", "mmap in opening database environment "
+                                               "failed trying to allocate %" PRIu64 " bytes. (OS err %d - %s)\n",
+                              li->li_dbcachesize, return_value, dblayer_strerror(return_value));
+                bdb_free_env(&priv->dblayer_env);
+                priv->dblayer_env = CATASTROPHIC;
+            } else {
+                slapi_log_err(SLAPI_LOG_CRIT,
+                              "bdb_start", "Opening database environment (%s) failed. err=%d: %s\n",
+                              region_dir, return_value, dblayer_strerror(return_value));
+            }
+        }
+        return return_value;
+    }
+    return 0;
+}
+
+/*
+ * If import cache autosize is enabled:
+ *    nsslapd-import-cache-autosize: -1 or 1 ~ 99
+ * calculate the import cache size.
+ * If import cache is disabled:
+ *    nsslapd-import-cache-autosize: 0
+ * get the nsslapd-import-cachesize.
+ * Calculate the memory size left after allocating the import cache size.
+ *
+ * Note: this function is called only if the import is executed as a stand
+ * alone command line (ldif2db).
+ */
+int
+bdb_check_and_set_import_cache(struct ldbminfo *li)
+{
+    uint64_t import_cache = 0;
+    char s[64]; /* big enough to hold %ld */
+    /* Get our platform memory values. */
+    slapi_pal_meminfo *mi = spal_meminfo_get();
+
+    if (mi == NULL) {
+        slapi_log_err(SLAPI_LOG_ERR, "check_and_set_import_cache", "Failed to get system memory infomation\n");
+        return ENOENT;
+    }
+    slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", "pagesize: %" PRIu64 ", available bytes %" PRIu64 ", process usage %" PRIu64 " \n", mi->pagesize_bytes, mi->system_available_bytes, mi->process_consumed_bytes);
+
+    /*
+     * default behavior for ldif2db import cache,
+     * nsslapd-import-cache-autosize==-1,
+     * autosize 50% mem to import cache
+     */
+    if (li->li_import_cache_autosize < 0) {
+        li->li_import_cache_autosize = 50;
+    }
+
+    /* sanity check */
+    if (li->li_import_cache_autosize >= 100) {
+        slapi_log_err(SLAPI_LOG_NOTICE,
+                      "check_and_set_import_cache",
+                      "Import cache autosizing value (nsslapd-import-cache-autosize) should not be "
+                      "greater than or equal to 100%%. Reset to 50%%.\n");
+        li->li_import_cache_autosize = 50;
+    }
+
+    if (li->li_import_cache_autosize == 0) {
+        /* user specified importCache */
+        import_cache = li->li_import_cachesize;
+
+    } else {
+        /* autosizing importCache */
+        /* ./125 instead of ./100 is for adjusting the BDB overhead. */
+        import_cache = (li->li_import_cache_autosize * mi->system_available_bytes) / 125;
+    }
+
+    if (util_is_cachesize_sane(mi, &import_cache) == UTIL_CACHESIZE_ERROR) {
+
+        slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", "Import failed to run: unable to validate system memory limits.\n");
+        spal_meminfo_destroy(mi);
+        return ENOMEM;
+    }
+
+    slapi_log_err(SLAPI_LOG_INFO, "check_and_set_import_cache", "Import allocates %" PRIu64 "KB import cache.\n", import_cache / 1024);
+    if (li->li_import_cache_autosize > 0) {
+        /* import cache autosizing */
+        /* set the calculated import cache size to the config */
+        sprintf(s, "%" PRIu64, import_cache);
+        bdb_config_internal_set(li, CONFIG_IMPORT_CACHESIZE, s);
+    }
+    spal_meminfo_destroy(mi);
+    return 0;
+}
+
+
+/* mode is one of
+ * DBLAYER_NORMAL_MODE,
+ * DBLAYER_INDEX_MODE,
+ * DBLAYER_IMPORT_MODE,
+ * DBLAYER_EXPORT_MODE
+ */
+int
+bdb_instance_start(backend *be, int mode)
+{
+    struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
+    ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
+    bdb_config *conf;
+    dblayer_private *priv;
+    bdb_db_env *pEnv;
+    char inst_dir[MAXPATHLEN];
+    char *inst_dirp = NULL;
+    int return_value = -1;
+
+    conf = (bdb_config *)li->li_dblayer_config;
+    priv = li->li_dblayer_private;
+    pEnv = priv->dblayer_env;
+    if (CATASTROPHIC == pEnv || NULL == pEnv) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_instance_start", "DB Instance %s: dbenv is not available (0x%p).\n",
+                      inst ? inst->inst_name : "unknown", pEnv);
+        return return_value;
+    }
+
+    if (NULL != inst->inst_id2entry) {
+        slapi_log_err(SLAPI_LOG_WARNING,
+                      "bdb_instance_start", "DB instance \"%s\" already started.\n",
+                      inst->inst_name);
+        return 0;
+    }
+
+    if (attrcrypt_init(inst)) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_instance_start", "Unable to initialize attrcrypt system for %s\n",
+                      inst->inst_name);
+        return return_value;
+    }
+
+    /* Get the name of the directory that holds index files
+     * for this instance. */
+    if (dblayer_get_instance_data_dir(be) != 0) {
+        /* Problem getting the name of the directory that holds the
+         * index files for this instance. */
+        return return_value;
+    }
+
+    inst_dirp = dblayer_get_full_inst_dir(li, inst, inst_dir, MAXPATHLEN);
+    if (inst_dirp && *inst_dirp) {
+        return_value = dblayer_grok_directory(inst_dirp,
+                                              DBLAYER_DIRECTORY_READWRITE_ACCESS);
+    } else {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_instance_start",
+                      "Can't start because the database instance "
+                      "directory is NULL\n");
+        goto errout;
+    }
+    if (0 != return_value) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_instance_start",
+                      "Can't start because the database instance "
+                      "directory \"%s\" either doesn't exist, "
+                      "or the db files are not accessible\n",
+                      inst_dirp);
+        goto errout;
+    }
+
+    if (mode & DBLAYER_NORMAL_MODE) {
+        /* In normal mode (not db2ldif, ldif2db, etc.) we need to deal with
+         * the dbversion file here. */
+
+        /* Read the dbversion file if there is one, and create it
+         * if it doesn't exist. */
+        if (bdb_version_exists(li, inst_dirp)) {
+            char *ldbmversion = NULL;
+            char *dataversion = NULL;
+
+            if (bdb_version_read(li, inst_dirp, &ldbmversion, &dataversion) != 0) {
+                slapi_log_err(SLAPI_LOG_WARNING, "bdb_instance_start", "Unable to read dbversion "
+                                                                           "file in %s\n",
+                              inst->inst_dir_name);
+            } else {
+                int rval = 0;
+                /* check the DBVERSION and reset idl-switch if needed (DS6.2) */
+                /* from the next major rel, we won't do this and just upgrade */
+                if (!(li->li_flags & LI_FORCE_MOD_CONFIG)) {
+                    adjust_idl_switch(ldbmversion, li);
+                }
+                slapi_ch_free_string(&ldbmversion);
+
+                /* check to make sure these instance was made with the correct
+                 * version. */
+                rval = check_db_inst_version(inst);
+                if (rval & DBVERSION_NOT_SUPPORTED) {
+                    slapi_log_err(SLAPI_LOG_ERR, "bdb_instance_start", " DB Instance %s does not have the "
+                                                                           "expected version\n",
+                                  inst->inst_name);
+                    PR_ASSERT(0);
+                    slapi_ch_free_string(&dataversion);
+                    return_value = -1;
+                    goto errout;
+                } else if (rval & DBVERSION_NEED_DN2RDN) {
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "bdb_instance_start", "%s is on, while the instance %s is in the DN format. "
+                                                            "Please run dn2rdn to convert the database format.\n",
+                                  CONFIG_ENTRYRDN_SWITCH, inst->inst_name);
+                    slapi_ch_free_string(&dataversion);
+                    return_value = -1;
+                    goto errout;
+                } else if (rval & DBVERSION_NEED_RDN2DN) {
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "bdb_instance_start", "%s is off, while the instance %s is in the RDN "
+                                                            "format. Please change the value to on in dse.ldif.\n",
+                                  CONFIG_ENTRYRDN_SWITCH, inst->inst_name);
+                    slapi_ch_free_string(&dataversion);
+                    return_value = -1;
+                    goto errout;
+                }
+
+                /* record the dataversion */
+                if (dataversion != NULL && *dataversion != '\0') {
+                    inst->inst_dataversion = dataversion;
+                } else {
+                    slapi_ch_free_string(&dataversion);
+                }
+
+                rval = ldbm_upgrade(inst, rval);
+                if (0 != rval) {
+                    slapi_log_err(SLAPI_LOG_ERR, "bdb_instance_start", "Upgrading instance %s failed\n",
+                                  inst->inst_name);
+                    PR_ASSERT(0);
+                    return_value = -1;
+                    goto errout;
+                }
+            }
+        } else {
+            /* The dbversion file didn't exist, so we'll create one. */
+            bdb_version_write(li, inst_dirp, NULL, DBVERSION_ALL);
+        }
+    } /* on import we don't mess with the dbversion file except to write it
+       * when done with the import. */
+
+    /* Now attempt to open id2entry */
+    {
+        char *id2entry_file;
+        int open_flags = 0;
+        DB *dbp;
+        char *subname;
+        bdb_db_env *mypEnv;
+
+        id2entry_file = slapi_ch_smprintf("%s/%s", inst->inst_dir_name,
+                                          ID2ENTRY LDBM_FILENAME_SUFFIX);
+
+        open_flags = DB_CREATE | DB_THREAD;
+
+        /* The subname argument allows applications to have
+         * subdatabases, i.e., multiple databases inside of a single
+         * physical file. This is useful when the logical databases
+         * are both numerous and reasonably small, in order to
+         * avoid creating a large number of underlying files.
+         */
+        subname = NULL;
+        mypEnv = NULL;
+        if (mode & (DBLAYER_IMPORT_MODE | DBLAYER_INDEX_MODE)) {
+            size_t cachesize;
+            char *data_directories[2] = {0, 0};
+            /* [605974] delete DB_PRIVATE:
+             * to make import visible to the other process */
+            int oflags = DB_CREATE | DB_INIT_MPOOL | DB_THREAD;
+            /*
+             * but nsslapd-db-private-import-mem should work with import,
+             * as well */
+            if (conf->bdb_private_import_mem) {
+                slapi_log_err(SLAPI_LOG_INFO,
+                              "bdb_instance_start", "Import is running with "
+                                                        "nsslapd-db-private-import-mem on; "
+                                                        "No other process is allowed to access the database\n");
+                oflags |= DB_PRIVATE;
+            }
+            PR_Lock(li->li_config_mutex);
+            /* import cache checking and autosizing is available only
+             * for the command line */
+            if (li->li_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE) {
+                return_value = bdb_check_and_set_import_cache(li);
+                if (return_value) {
+                    goto out;
+                }
+            }
+            cachesize = li->li_import_cachesize;
+            PR_Unlock(li->li_config_mutex);
+
+            if (cachesize < 1048576) {
+                /* make it at least 1M */
+                cachesize = 1048576;
+            }
+            conf->bdb_cachesize = cachesize;
+            /* We always auto-calculate ncache for the import region */
+            conf->bdb_ncache = 0;
+
+            /* use our own env */
+            return_value = bdb_make_env(&mypEnv, li);
+            if (return_value != 0) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_instance_start", "Unable to create new DB_ENV for import/export! %d\n",
+                              return_value);
+                goto out;
+            }
+            /* do not assume import cache size is under 1G */
+            mypEnv->bdb_DB_ENV->set_cachesize(mypEnv->bdb_DB_ENV,
+                                                  cachesize / GIGABYTE,
+                                                  cachesize % GIGABYTE,
+                                                  conf->bdb_ncache);
+            /* probably want to change this -- but for now, create the
+             * mpool files in the instance directory.
+             */
+            mypEnv->bdb_openflags = oflags;
+            data_directories[0] = inst->inst_parent_dir_name;
+            bdb_set_data_dir(mypEnv, data_directories);
+            return_value = (mypEnv->bdb_DB_ENV->open)(mypEnv->bdb_DB_ENV,
+                                                          inst_dirp,
+                                                          oflags,
+                                                          priv->dblayer_file_mode);
+            if (return_value != 0) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_instance_start", "Unable to open new DB_ENV for import/export! %d\n",
+                              return_value);
+                goto out;
+            }
+            inst->inst_db = mypEnv;
+        } else {
+            mypEnv = pEnv;
+        }
+
+        inst->inst_id2entry = NULL;
+        return_value = db_create(&inst->inst_id2entry, mypEnv->bdb_DB_ENV, 0);
+        if (0 != return_value) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_instance_start", "Unable to create id2entry db file! %d\n",
+                          return_value);
+            goto out;
+        }
+        dbp = inst->inst_id2entry;
+
+        return_value = dbp->set_pagesize(dbp,
+                                         (conf->bdb_page_size == 0) ? DBLAYER_PAGESIZE : conf->bdb_page_size);
+        if (0 != return_value) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_instance_start", "dbp->set_pagesize(%" PRIu32 " or %" PRIu32 ") failed %d\n",
+                          conf->bdb_page_size, DBLAYER_PAGESIZE,
+                          return_value);
+            goto out;
+        }
+
+        if ((charray_get_index(conf->bdb_data_directories,
+                               inst->inst_parent_dir_name) != 0) &&
+            !dblayer_inst_exists(inst, NULL)) {
+            char *abs_id2entry_file = NULL;
+            /* create a file with abs path, then try again */
+
+            abs_id2entry_file = slapi_ch_smprintf("%s%c%s", inst_dirp,
+                                                  get_sep(inst_dirp), ID2ENTRY LDBM_FILENAME_SUFFIX);
+            DB_OPEN(mypEnv->bdb_openflags,
+                    dbp, NULL /* txnid */, abs_id2entry_file, subname, DB_BTREE,
+                    open_flags, priv->dblayer_file_mode, return_value);
+            dbp->close(dbp, 0);
+            return_value = db_create(&inst->inst_id2entry,
+                                     mypEnv->bdb_DB_ENV, 0);
+            if (0 != return_value)
+                goto out;
+            dbp = inst->inst_id2entry;
+            return_value = dbp->set_pagesize(dbp,
+                                             (conf->bdb_page_size == 0) ? DBLAYER_PAGESIZE : conf->bdb_page_size);
+            if (0 != return_value) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_instance_start", "dbp->set_pagesize(%" PRIu32 " or %" PRIu32 ") failed %d\n",
+                              conf->bdb_page_size, DBLAYER_PAGESIZE,
+                              return_value);
+                goto out;
+            }
+
+            slapi_ch_free_string(&abs_id2entry_file);
+        }
+        DB_OPEN(mypEnv->bdb_openflags,
+                dbp, NULL /* txnid */, id2entry_file, subname, DB_BTREE,
+                open_flags, priv->dblayer_file_mode, return_value);
+        if (0 != return_value) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_instance_start", "dbp->open(\"%s\") failed: %s (%d)\n",
+                          id2entry_file, dblayer_strerror(return_value),
+                          return_value);
+            /* if it's a newly created backend instance,
+             * need to check the inst_parent_dir already exists and
+             * set as a data dir */
+            if (strstr(dblayer_strerror(return_value),
+                       "No such file or directory")) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_instance_start", "Instance %s is not registered as a db data directory. "
+                                                        "Please restart the server to create it.\n",
+                              inst ? inst->inst_name : "unknown");
+            } else if (strstr(dblayer_strerror(return_value),
+                              "Permission denied")) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_instance_start", "Instance directory %s may not be writable\n",
+                              inst_dirp);
+            }
+
+            goto out;
+        }
+    out:
+        slapi_ch_free_string(&id2entry_file);
+    }
+
+    if (0 == return_value) {
+        /* get nextid from disk now */
+        get_ids_from_disk(be);
+    }
+
+    if (mode & DBLAYER_NORMAL_MODE) {
+        bdb_version_write(li, inst_dirp, NULL, DBVERSION_ALL);
+        /* richm - not sure if need to acquire the be lock first? */
+        /* need to set state back to started - set to stopped in
+           dblayer_instance_close */
+        be->be_state = BE_STATE_STARTED;
+    }
+
+    /*
+     * check if nextid is valid: it only matters if the database is either
+     * being imported or is in normal mode
+     */
+    if (inst->inst_nextid > MAXID && !(mode & DBLAYER_EXPORT_MODE)) {
+        slapi_log_err(SLAPI_LOG_CRIT, "bdb_instance_start", "Backend '%s' "
+                                                                "has no IDs left. DATABASE MUST BE REBUILT.\n",
+                      be->be_name);
+        return 1;
+    }
+
+    if (return_value != 0) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_instance_start", "Failure %s (%d)\n",
+                      dblayer_strerror(return_value), return_value);
+    }
+errout:
+    if (inst_dirp != inst_dir)
+        slapi_ch_free_string(&inst_dirp);
+    return return_value;
+}
+
+/*
+ * dblayer_get_aux_id2entry:
+ * - create a dedicated db env and db handler for id2entry.
+ * - introduced for upgradedb not to share the env and db handler with
+ *   other index files to support multiple passes and merge.
+ * - Argument path is for returning the full path for the id2entry.db#,
+ *   if the memory to store the address of the full path is given.  The
+ *   caller is supposed to release the full path.
+ */
+int
+bdb_get_aux_id2entry(backend *be, DB **ppDB, DB_ENV **ppEnv, char **path)
+{
+    return bdb_get_aux_id2entry_ext(be, ppDB, ppEnv, path, 0);
+}
+
+/*
+ * flags:
+ * DBLAYER_AUX_ID2ENTRY_TMP -- create id2entry_tmp.db#
+ *
+ * - if non-NULL *ppEnv is given, env is already open.
+ *   Just open an id2entry[_tmp].db#.
+ * - Argument path is for returning the full path for the id2entry[_tmp].db#,
+ *   if the memory to store the address of the full path is given.  The
+ *   caller is supposed to release the full path.
+ */
+int
+bdb_get_aux_id2entry_ext(backend *be, DB **ppDB, DB_ENV **ppEnv, char **path, int flags)
+{
+    ldbm_instance *inst;
+    bdb_db_env *mypEnv = NULL;
+    DB *dbp = NULL;
+    int rval = 1;
+    struct ldbminfo *li = NULL;
+    bdb_config *oconf = NULL;
+    bdb_config *conf = NULL;
+    dblayer_private *priv;
+    char *subname = NULL;
+    int envflags = 0;
+    int dbflags = 0;
+    size_t cachesize;
+    PRFileInfo64 prfinfo;
+    PRStatus prst;
+    char *id2entry_file = NULL;
+    char inst_dir[MAXPATHLEN];
+    char *inst_dirp = NULL;
+    char *data_directories[2] = {0, 0};
+
+    PR_ASSERT(NULL != be);
+
+    if ((NULL == ppEnv) || (NULL == ppDB)) {
+        slapi_log_err(SLAPI_LOG_ERR, "dblayer_get_aux_id2entry_ext", "No memory for DB_ENV or DB handle\n");
+        goto done;
+    }
+    *ppDB = NULL;
+    inst = (ldbm_instance *)be->be_instance_info;
+    if (NULL == inst) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_get_aux_id2entry_ext", "No instance/env: persistent id2entry is not available\n");
+        goto done;
+    }
+
+    li = inst->inst_li;
+    if (NULL == li) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_get_aux_id2entry_ext", "No ldbm info: persistent id2entry is not available\n");
+        goto done;
+    }
+
+    priv = li->li_dblayer_private;
+    oconf = (bdb_config *)li->li_dblayer_config;
+    if (NULL == oconf) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_get_aux_id2entry_ext", "No dblayer info: persistent id2entry is not available\n");
+        goto done;
+    }
+    conf = (bdb_config *)slapi_ch_calloc(1, sizeof(bdb_config));
+    memcpy(conf, oconf, sizeof(bdb_config));
+    conf->bdb_spin_count = 0;
+
+    inst_dirp = dblayer_get_full_inst_dir(li, inst, inst_dir, MAXPATHLEN);
+    if (inst_dirp && *inst_dirp) {
+        conf->bdb_home_directory = slapi_ch_smprintf("%s/dbenv", inst_dirp);
+    } else {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_get_aux_id2entry_ext", "Instance dir is NULL: persistent id2entry is not available\n");
+        goto done;
+    }
+    conf->bdb_log_directory = slapi_ch_strdup(conf->bdb_home_directory);
+
+    prst = PR_GetFileInfo64(inst_dirp, &prfinfo);
+    if (PR_FAILURE == prst || PR_FILE_DIRECTORY != prfinfo.type) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_get_aux_id2entry_ext", "No inst dir: persistent id2entry is not available\n");
+        goto done;
+    }
+
+    prst = PR_GetFileInfo64(conf->bdb_home_directory, &prfinfo);
+    if (PR_SUCCESS == prst) {
+        ldbm_delete_dirs(conf->bdb_home_directory);
+    }
+    rval = mkdir_p(conf->bdb_home_directory, 0700);
+    if (rval) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_get_aux_id2entry_ext", "Can't create env dir: persistent id2entry is not available\n");
+        goto done;
+    }
+
+    /* use our own env if not passed */
+    if (!*ppEnv) {
+        rval = bdb_make_env(&mypEnv, li);
+        if (rval) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "dblayer_get_aux_id2entry_ext", "Unable to create new DB_ENV for import/export! %d\n", rval);
+            goto err;
+        }
+    }
+
+    envflags = DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE;
+    cachesize = DEFAULT_DBCACHE_SIZE;
+
+    if (!*ppEnv) {
+        mypEnv->bdb_DB_ENV->set_cachesize(mypEnv->bdb_DB_ENV,
+                                              0, cachesize, conf->bdb_ncache);
+
+        /* probably want to change this -- but for now, create the
+         * mpool files in the instance directory.
+         */
+        mypEnv->bdb_openflags = envflags;
+        data_directories[0] = inst->inst_parent_dir_name;
+        bdb_set_data_dir(mypEnv, data_directories);
+        rval = (mypEnv->bdb_DB_ENV->open)(mypEnv->bdb_DB_ENV,
+                                              conf->bdb_home_directory, envflags, priv->dblayer_file_mode);
+        if (rval) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "dblayer_get_aux_id2entry_ext", "Unable to open new DB_ENV for upgradedb/reindex %d\n", rval);
+            goto err;
+        }
+        *ppEnv = mypEnv->bdb_DB_ENV;
+    }
+    rval = db_create(&dbp, *ppEnv, 0);
+    if (rval) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_get_aux_id2entry_ext", "Unable to create id2entry db handler! %d\n", rval);
+        goto err;
+    }
+
+    rval = dbp->set_pagesize(dbp, (conf->bdb_page_size == 0) ? DBLAYER_PAGESIZE : conf->bdb_page_size);
+    if (rval) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_get_aux_id2entry_ext", "dbp->set_pagesize(%" PRIu32 " or %" PRIu32 ") failed %d\n",
+                      conf->bdb_page_size, DBLAYER_PAGESIZE, rval);
+        goto err;
+    }
+
+    if (flags & DBLAYER_AUX_ID2ENTRY_TMP) {
+        id2entry_file = slapi_ch_smprintf("%s/%s_tmp%s",
+                                          inst->inst_dir_name, ID2ENTRY, LDBM_FILENAME_SUFFIX);
+        dbflags = DB_CREATE;
+    } else {
+        id2entry_file = slapi_ch_smprintf("%s/%s",
+                                          inst->inst_dir_name, ID2ENTRY LDBM_FILENAME_SUFFIX);
+    }
+
+    PR_ASSERT(dblayer_inst_exists(inst, NULL));
+    DB_OPEN(envflags, dbp, NULL /* txnid */, id2entry_file, subname, DB_BTREE,
+            dbflags, priv->dblayer_file_mode, rval);
+    if (rval) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_get_aux_id2entry_ext", "dbp->open(\"%s\") failed: %s (%d)\n",
+                      id2entry_file, dblayer_strerror(rval), rval);
+        if (strstr(dblayer_strerror(rval), "Permission denied")) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "dblayer_get_aux_id2entry_ext", "Instance directory %s may not be writable\n", inst_dirp);
+        }
+        goto err;
+    }
+    *ppDB = dbp;
+    rval = 0; /* to make it sure ... */
+    goto done;
+err:
+    if (*ppEnv) {
+        (*ppEnv)->close(*ppEnv, 0);
+        *ppEnv = NULL;
+    }
+    if (conf->bdb_home_directory) {
+        ldbm_delete_dirs(conf->bdb_home_directory);
+    }
+done:
+    if (path) {
+        if (0 == rval) { /* return the path only when successfull */
+            *path = slapi_ch_smprintf("%s/%s", inst->inst_parent_dir_name,
+                                      id2entry_file);
+        } else {
+            *path = NULL;
+        }
+    }
+    slapi_ch_free_string(&id2entry_file);
+    if (priv) {
+        slapi_ch_free_string(&conf->bdb_home_directory);
+        slapi_ch_free_string(&conf->bdb_log_directory);
+    }
+    /* Don't free priv->bdb_data_directories since priv doesn't own the memory */
+    slapi_ch_free((void **)&conf);
+    slapi_ch_free((void **)&mypEnv);
+    if (inst_dirp != inst_dir)
+        slapi_ch_free_string(&inst_dirp);
+    return rval;
+}
+
+int
+bdb_release_aux_id2entry(backend *be, DB *pDB, DB_ENV *pEnv)
+{
+    ldbm_instance *inst;
+    char *envdir = NULL;
+    char inst_dir[MAXPATHLEN];
+    char *inst_dirp = NULL;
+
+    inst = (ldbm_instance *)be->be_instance_info;
+    if (NULL == inst) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_release_aux_id2entry", "No instance/env: persistent id2entry is not available\n");
+        goto done;
+    }
+
+    inst_dirp = dblayer_get_full_inst_dir(inst->inst_li, inst,
+                                          inst_dir, MAXPATHLEN);
+    if (inst_dirp && *inst_dirp) {
+        envdir = slapi_ch_smprintf("%s/dbenv", inst_dirp);
+    }
+
+done:
+    if (pDB)
+        pDB->close(pDB, 0);
+    if (pEnv)
+        pEnv->close(pEnv, 0);
+    if (envdir) {
+        ldbm_delete_dirs(envdir);
+        slapi_ch_free_string(&envdir);
+    }
+    if (inst_dirp != inst_dir)
+        slapi_ch_free_string(&inst_dirp);
+    return 0;
+}
+
+
+void
+bdb_pre_close(struct ldbminfo *li)
+{
+    dblayer_private *priv = 0;
+    bdb_config *conf;
+    PRInt32 threadcount = 0;
+
+    PR_ASSERT(NULL != li);
+    priv = li->li_dblayer_private;
+    conf = (bdb_config *)li->li_dblayer_config;
+    bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+
+    if (conf->bdb_stop_threads) /* already stopped.  do nothing... */
+        return;
+
+    /* first, see if there are any housekeeping threads running */
+    PR_Lock(pEnv->bdb_thread_count_lock);
+    threadcount = pEnv->bdb_thread_count;
+    PR_Unlock(pEnv->bdb_thread_count_lock);
+
+    if (threadcount) {
+        PRIntervalTime cvwaittime = PR_MillisecondsToInterval(DBLAYER_SLEEP_INTERVAL * 100);
+        int timedout = 0;
+        /* Print handy-dandy log message */
+        slapi_log_err(SLAPI_LOG_INFO, "bdb_pre_close", "Waiting for %d database threads to stop\n",
+                      threadcount);
+        PR_Lock(pEnv->bdb_thread_count_lock);
+        /* Tell them to stop - we wait until the last possible moment to invoke
+           this.  If we do this much sooner than this, we could find ourselves
+           in a situation where the threads see the stop_threads and exit before
+           we can issue the WaitCondVar below, which means the last thread to
+           exit will do a NotifyCondVar that has nothing waiting.  If we do this
+           inside the lock, we will ensure that the threads will block until we
+           issue the WaitCondVar below */
+        conf->bdb_stop_threads = 1;
+        /* Wait for them to exit */
+        while (pEnv->bdb_thread_count > 0) {
+            PRIntervalTime before = PR_IntervalNow();
+            /* There are 3 ways to wake up from this WaitCondVar:
+               1) The last database thread exits and calls NotifyCondVar - thread_count
+               should be 0 in this case
+               2) Timeout - in this case, thread_count will be > 0 - bad
+               3) A bad error occurs - bad - will be reported as a timeout
+            */
+            PR_WaitCondVar(pEnv->bdb_thread_count_cv, cvwaittime);
+            if (pEnv->bdb_thread_count > 0) {
+                /* still at least 1 thread running - see if this is a timeout */
+                if ((PR_IntervalNow() - before) >= cvwaittime) {
+                    threadcount = pEnv->bdb_thread_count;
+                    timedout = 1;
+                    break;
+                }
+                /* else just a spurious interrupt */
+            }
+        }
+        PR_Unlock(pEnv->bdb_thread_count_lock);
+        if (timedout) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_pre_close", "Timeout after [%d] milliseconds; leave %d database thread(s)...\n",
+                          (DBLAYER_SLEEP_INTERVAL * 100), threadcount);
+            priv->dblayer_bad_stuff_happened = 1;
+            goto timeout_escape;
+        }
+    }
+    slapi_log_err(SLAPI_LOG_INFO, "bdb_pre_close", "All database threads now stopped\n");
+timeout_escape:
+    return;
+}
+
+int
+bdb_post_close(struct ldbminfo *li, int dbmode)
+{
+    bdb_config *conf = 0;
+    int return_value = 0;
+    PR_ASSERT(NULL != li);
+    dblayer_private *priv = li->li_dblayer_private;
+    bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+
+    conf = (bdb_config *)li->li_dblayer_config;
+
+    /* We close all the files we ever opened, and call pEnv->close. */
+    if (NULL == pEnv) /* db env is already closed. do nothing. */
+        return return_value;
+    /* Shutdown the performance counter stuff */
+    if (DBLAYER_NORMAL_MODE & dbmode) {
+        if (conf->perf_private) {
+            perfctrs_terminate(&conf->perf_private, pEnv->bdb_DB_ENV);
+        }
+    }
+
+    /* Now release the db environment */
+    return_value = pEnv->bdb_DB_ENV->close(pEnv->bdb_DB_ENV, 0);
+    bdb_free_env((void **)&pEnv); /* pEnv is now garbage */
+    priv->dblayer_env = NULL;
+
+    if (0 == return_value && !((DBLAYER_ARCHIVE_MODE | DBLAYER_EXPORT_MODE) & dbmode) && !priv->dblayer_bad_stuff_happened) {
+        commit_good_database(conf, priv->dblayer_file_mode);
+    }
+    if (conf->bdb_data_directories) {
+        /* bdb_data_directories are set in bdb_make_env via
+         * dblayer_start, which is paired with dblayer_close. */
+        /* no need to release bdb_home_directory,
+         * which is one of bdb_data_directories */
+        charray_free(conf->bdb_data_directories);
+        conf->bdb_data_directories = NULL;
+    }
+    slapi_ch_free_string(&conf->bdb_dbhome_directory);
+    slapi_ch_free_string(&conf->bdb_home_directory);
+
+    return return_value;
+}
+
+/*
+ * This function is called when the server is shutting down, or when the
+ * backend is being disabled (e.g. backup/restore).
+ * This is not safe to call while other threads are calling into the open
+ * databases !!!   So: DON'T !
+ */
+int
+bdb_close(struct ldbminfo *li, int dbmode)
+{
+    backend *be = NULL;
+    ldbm_instance *inst;
+    Object *inst_obj;
+    int return_value = 0;
+    int shutdown = g_get_shutdown();
+
+    bdb_pre_close(li);
+
+    /*
+     * dblayer_close_indexes and pDB->close used to be located above loop:
+     *   while(priv->dblayer_thread_count > 0) in pre_close.
+     * This order fixes a bug: shutdown under the stress makes txn_checkpoint
+     * (checkpoint_thread) fail b/c the mpool might have been already closed.
+     */
+    for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+         inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+        inst = (ldbm_instance *)object_get_data(inst_obj);
+        if (shutdown) {
+            vlv_close(inst);
+        }
+        be = inst->inst_be;
+        if (NULL != be->be_instance_info) {
+            return_value |= dblayer_instance_close(be);
+        }
+    }
+
+    if (return_value != 0) {
+        /* force recovery next startup if any close failed */
+        dblayer_private *priv;
+        PR_ASSERT(NULL != li);
+        priv = li->li_dblayer_private;
+        PR_ASSERT(NULL != priv);
+        priv->dblayer_bad_stuff_happened = 1;
+    }
+
+    return_value |= bdb_post_close(li, dbmode);
+
+    return return_value;
+}
+
+/* API to remove the environment */
+int
+bdb_remove_env(struct ldbminfo *li)
+{
+    DB_ENV *env = NULL;
+    char *home_dir = NULL;
+    int rc = db_env_create(&env, 0);
+    if (rc) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_remove_env", "Failed to create DB_ENV (returned: %d)\n", rc);
+        return rc;
+    }
+    if (NULL == li) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_remove_env", "No ldbm info is given\n");
+        return -1;
+    }
+
+    home_dir = bdb_get_home_dir(li, NULL);
+    if (home_dir) {
+        rc = env->remove(env, home_dir, 0);
+        if (rc) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_remove_env", "Failed to remove DB environment files. "
+                                                "Please remove %s/__db.00# (# is 1 through 6)\n",
+                          home_dir);
+        }
+    }
+    return rc;
+}
+
+#if !defined(DB_DUPSORT)
+#define DB_DUPSORT 0
+#endif
+
+static int
+_dblayer_set_db_callbacks(bdb_config *conf, DB *dbp, struct attrinfo *ai)
+{
+    int idl_use_new = 0;
+    int rc = 0;
+
+    /* With the new idl design, the large 8Kbyte pages we use are not
+       optimal. The page pool churns very quickly as we add new IDs under a
+       sustained add load. Smaller pages stop this happening so much and
+       consequently make us spend less time flushing dirty pages on checkpoints.
+       But 8K is still a good page size for id2entry. So we now allow different
+       page sizes for the primary and secondary indices.
+       Filed as bug: 604654
+     */
+    if (idl_get_idl_new()) {
+        rc = dbp->set_pagesize(
+            dbp,
+            (conf->bdb_index_page_size == 0) ? DBLAYER_INDEX_PAGESIZE : conf->bdb_index_page_size);
+    } else {
+        rc = dbp->set_pagesize(
+            dbp,
+            (conf->bdb_page_size == 0) ? DBLAYER_PAGESIZE : conf->bdb_page_size);
+    }
+    if (rc)
+        return rc;
+
+    /*
+     * If using the "new" idl, set the flags and the compare function.
+     * If using the "old" idl, we still need to set the index DB flags
+     * for the attribute "entryRDN".
+     */
+    if (((idl_use_new = idl_get_idl_new()) ||
+         0 == strcasecmp(ai->ai_type, LDBM_ENTRYRDN_STR)) &&
+        !(ai->ai_indexmask & INDEX_VLV)) {
+        /* set the flags */
+        rc = dbp->set_flags(dbp, DB_DUP | DB_DUPSORT);
+        if (rc)
+            return rc;
+        /* set the compare function */
+        if (ai->ai_dup_cmp_fn) {
+            /* If set, use the special dup compare callback */
+            rc = dbp->set_dup_compare(dbp, ai->ai_dup_cmp_fn);
+        } else if (idl_use_new) {
+            rc = dbp->set_dup_compare(dbp, idl_new_compare_dups);
+        }
+        if (rc)
+            return rc;
+    }
+
+    if (ai->ai_indexmask & INDEX_VLV) {
+        /*
+         * Need index with record numbers for
+         * Virtual List View index
+         */
+        rc = dbp->set_flags(dbp, DB_RECNUM);
+        if (rc)
+            return rc;
+    } else if (ai->ai_key_cmp_fn) { /* set in attr_index_config() */
+        /*
+          This is so that we can have ordered keys in the index, so that
+          greater than/less than searches work on indexed attrs.  We had
+          to introduce this when we changed the integer key format from
+          a 32/64 bit value to a normalized string value.  The default
+          bdb key cmp is based on length and lexicographic order, which
+          does not work with integer strings.
+
+          NOTE: If we ever need to use app_private for something else, we
+          will have to create some sort of data structure with different
+          fields for different uses.  We will also need to have a new()
+          function that creates and allocates that structure, and a
+          destroy() function that destroys the structure, and make sure
+          to call it when the DB* is closed and/or freed.
+        */
+        dbp->app_private = (void *)ai->ai_key_cmp_fn;
+        dbp->set_bt_compare(dbp, bdb_bt_compare);
+    }
+    return rc;
+}
+
+/* Routines for opening and closing random files in the DB_ENV.
+   Used by ldif2db merging code currently.
+
+   Return value:
+       Success: 0
+    Failure: -1
+ */
+int
+bdb_get_db(backend *be, char *indexname, int open_flag, struct attrinfo *ai, DB **ppDB)
+{
+    struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
+    ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
+    int open_flags = 0;
+    char *file_name = NULL;
+    char *rel_path = NULL;
+    bdb_db_env *pENV = 0;
+    bdb_config *conf = NULL;
+    dblayer_private *priv = NULL;
+    int return_value = 0;
+    DB *dbp = NULL;
+    char *subname = NULL;
+    char inst_dir[MAXPATHLEN];
+    char *inst_dirp = NULL;
+
+    PR_ASSERT(NULL != li);
+    conf = (bdb_config *)li->li_dblayer_config;
+    priv = li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+
+    if (NULL == inst->inst_dir_name) {
+        if (dblayer_get_instance_data_dir(be) != 0)
+            return -1;
+    }
+
+    if (NULL != inst->inst_parent_dir_name) {
+        if (!charray_utf8_inlist(conf->bdb_data_directories,
+                                 inst->inst_parent_dir_name) &&
+            !is_fullpath(inst->inst_dir_name))
+
+        {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "dblayer_open_file", "The instance path %s is not registered for db_data_dir, "
+                                               "although %s is a relative path.\n",
+                          inst->inst_parent_dir_name, inst->inst_dir_name);
+            return -1;
+        }
+    }
+
+    pENV = (bdb_db_env *)priv->dblayer_env;
+    if (inst->inst_db)
+        pENV = (bdb_db_env *)inst->inst_db;
+
+    PR_ASSERT(NULL != pENV);
+    file_name = slapi_ch_smprintf("%s%s", indexname, LDBM_FILENAME_SUFFIX);
+    rel_path = slapi_ch_smprintf("%s/%s", inst->inst_dir_name, file_name);
+
+    open_flags = DB_THREAD;
+    if (open_flag & DBOPEN_CREATE)
+        open_flags |= DB_CREATE;
+    if (open_flag & DBOPEN_TRUNCATE)
+        open_flags |= DB_TRUNCATE;
+
+    if (!ppDB)
+        goto out;
+    return_value = db_create(ppDB, pENV->bdb_DB_ENV, 0);
+    if (0 != return_value)
+        goto out;
+
+    dbp = *ppDB;
+    return_value = _dblayer_set_db_callbacks(conf, dbp, ai);
+    if (return_value)
+        goto out;
+
+    /* The subname argument allows applications to have
+     * subdatabases, i.e., multiple databases inside of a single
+     * physical file. This is useful when the logical databases
+     * are both numerous and reasonably small, in order to
+     * avoid creating a large number of underlying files.
+     */
+    /* If inst_parent_dir_name is not the primary DB dir &&
+     * the index file does not exist */
+    if ((charray_get_index(conf->bdb_data_directories,
+                           inst->inst_parent_dir_name) > 0) &&
+        !dblayer_inst_exists(inst, file_name)) {
+        char *abs_file_name = NULL;
+        /* create a file with abs path, then try again */
+
+        inst_dirp = dblayer_get_full_inst_dir(li, inst, inst_dir, MAXPATHLEN);
+        if (!inst_dirp || !*inst_dirp) {
+            return_value = -1;
+            goto out;
+        }
+        abs_file_name = slapi_ch_smprintf("%s%c%s",
+                                          inst_dirp, get_sep(inst_dirp), file_name);
+        DB_OPEN(pENV->bdb_openflags,
+                dbp, NULL /* txnid */, abs_file_name, subname, DB_BTREE,
+                open_flags, priv->dblayer_file_mode, return_value);
+        dbp->close(dbp, 0);
+        return_value = db_create(ppDB, pENV->bdb_DB_ENV, 0);
+        if (0 != return_value) {
+            goto out;
+        }
+        dbp = *ppDB;
+        return_value = _dblayer_set_db_callbacks(conf, dbp, ai);
+        if (return_value)
+            goto out;
+
+        slapi_ch_free_string(&abs_file_name);
+    }
+    DB_OPEN(pENV->bdb_openflags,
+            dbp, NULL, /* txnid */ rel_path, subname, DB_BTREE,
+            open_flags, priv->dblayer_file_mode, return_value);
+out:
+    slapi_ch_free((void **)&file_name);
+    slapi_ch_free((void **)&rel_path);
+    if (inst_dirp != inst_dir) {
+        slapi_ch_free_string(&inst_dirp);
+    }
+    /* close the database handle to avoid handle leak */
+    if (dbp && (return_value != 0)) {
+        bdb_close_file(&dbp);
+    }
+    return return_value;
+}
+
+int
+bdb_close_file(DB **db)
+{
+    if (db) {
+        DB *dbp = *db;
+        *db = NULL; /* To avoid to leave stale DB, set NULL before closing. */
+        return dbp->close(dbp, 0);
+    }
+    return 1;
+}
+
+
+/*
+  bdb_db_remove assumptions:
+
+  No environment has the given database open.
+
+*/
+
+static int
+bdb_db_remove_ex(bdb_db_env *env, char const path[], char const dbName[], PRBool use_lock)
+{
+    DB_ENV *db_env = 0;
+    int rc;
+    DB *db;
+
+    if (env) {
+        if (use_lock)
+            slapi_rwlock_wrlock(env->bdb_env_lock); /* We will be causing logging activity */
+        db_env = env->bdb_DB_ENV;
+    }
+
+    rc = db_create(&db, db_env, 0); /* must use new handle to database */
+    if (0 != rc) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_db_remove_ex", "Failed to create db (%d) %s\n",
+                      rc, dblayer_strerror(rc));
+        goto done;
+    }
+    rc = db->remove(db, path, dbName, 0); /* kiss the db goodbye! */
+
+done:
+    if (env) {
+        if (use_lock)
+            slapi_rwlock_unlock(env->bdb_env_lock);
+    }
+
+    return rc;
+}
+
+
+int
+bdb_db_remove(bdb_db_env *env, char const path[], char const dbName[])
+{
+    return (bdb_db_remove_ex(env, path, dbName, PR_TRUE));
+}
+
+#define DBLAYER_CACHE_DELAY PR_MillisecondsToInterval(250)
+int
+bdb_rm_db_file(backend *be, struct attrinfo *a, PRBool use_lock, int no_force_checkpoint)
+{
+    struct ldbminfo *li = NULL;
+    dblayer_private *priv;
+    bdb_db_env *pEnv = NULL;
+    ldbm_instance *inst = NULL;
+    dblayer_handle *handle = NULL;
+    char dbName[MAXPATHLEN] = {0};
+    char *dbNamep = NULL;
+    char *p;
+    int dbbasenamelen, dbnamelen;
+    int rc = 0;
+    DB *db = 0;
+
+    if ((NULL == be) || (NULL == be->be_database)) {
+        return rc;
+    }
+    inst = (ldbm_instance *)be->be_instance_info;
+    if (NULL == inst) {
+        return rc;
+    }
+    li = (struct ldbminfo *)be->be_database->plg_private;
+    if (NULL == li) {
+        return rc;
+    }
+    priv = li->li_dblayer_private;
+    if (NULL == priv) {
+        return rc;
+    }
+    pEnv = (bdb_db_env *)priv->dblayer_env;
+    if (NULL == pEnv) { /* db does not exist */
+        return rc;
+    }
+    /* Added for bug 600401. Somehow the checkpoint thread deadlocked on
+     index file with this function, index file couldn't be removed on win2k.
+     Force a checkpoint here to break deadlock.
+  */
+    if (0 == no_force_checkpoint) {
+        dblayer_force_checkpoint(li);
+    }
+
+    if (0 == dblayer_get_index_file(be, a, &db, 0 /* Don't create an index file
+                                                   if it does not exist. */)) {
+        if (use_lock)
+            slapi_rwlock_wrlock(pEnv->bdb_env_lock); /* We will be causing logging activity */
+        /* first, remove the file handle for this index, if we have it open */
+        PR_Lock(inst->inst_handle_list_mutex);
+        if (a->ai_dblayer) {
+            /* there is a handle */
+            handle = (dblayer_handle *)a->ai_dblayer;
+
+            /* when we successfully called dblayer_get_index_file we bumped up
+         the reference count of how many threads are using the index. So we
+         must manually back off the count by one here.... rwagner */
+
+            dblayer_release_index_file(be, a, db);
+
+            while (slapi_atomic_load_64(&(a->ai_dblayer_count), __ATOMIC_ACQUIRE) > 0) {
+                /* someone is using this index file */
+                /* ASSUMPTION: you have already set the INDEX_OFFLINE flag, because
+                 * you intend to mess with this index.  therefore no new requests
+                 * for this indexfile should happen, so the dblayer_count should
+                 * NEVER increase.
+                 */
+                PR_ASSERT(a->ai_indexmask & INDEX_OFFLINE);
+                PR_Unlock(inst->inst_handle_list_mutex);
+                DS_Sleep(DBLAYER_CACHE_DELAY);
+                PR_Lock(inst->inst_handle_list_mutex);
+            }
+            bdb_close_file(&(handle->dblayer_dbp));
+
+            /* remove handle from handle-list */
+            if (inst->inst_handle_head == handle) {
+                inst->inst_handle_head = handle->dblayer_handle_next;
+                if (inst->inst_handle_tail == handle) {
+                    inst->inst_handle_tail = NULL;
+                }
+            } else {
+                dblayer_handle *hp;
+
+                for (hp = inst->inst_handle_head; hp; hp = hp->dblayer_handle_next) {
+                    if (hp->dblayer_handle_next == handle) {
+                        hp->dblayer_handle_next = handle->dblayer_handle_next;
+                        if (inst->inst_handle_tail == handle) {
+                            inst->inst_handle_tail = hp;
+                        }
+                        break;
+                    }
+                }
+            }
+            dbNamep = dblayer_get_full_inst_dir(li, inst, dbName, MAXPATHLEN);
+            if (dbNamep && *dbNamep) {
+                dbbasenamelen = strlen(dbNamep);
+                dbnamelen = dbbasenamelen + strlen(a->ai_type) + 6;
+                if (dbnamelen > MAXPATHLEN) {
+                    dbNamep = (char *)slapi_ch_realloc(dbNamep, dbnamelen);
+                }
+                p = dbNamep + dbbasenamelen;
+                sprintf(p, "%c%s%s", get_sep(dbNamep), a->ai_type, LDBM_FILENAME_SUFFIX);
+                rc = bdb_db_remove_ex(pEnv, dbNamep, 0, 0);
+                a->ai_dblayer = NULL;
+            } else {
+                rc = -1;
+            }
+            if (dbNamep != dbName) {
+                slapi_ch_free_string(&dbNamep);
+            }
+            slapi_ch_free((void **)&handle);
+        } else {
+            /* no handle to close */
+        }
+        PR_Unlock(inst->inst_handle_list_mutex);
+        if (use_lock)
+            slapi_rwlock_unlock(pEnv->bdb_env_lock);
+    }
+
+    return rc;
+}
+
+
+/*
+ * Transaction stuff. The idea is that the caller doesn't need to
+ * know the transaction mechanism underneath (because the caller is
+ * typically a few calls up the stack from any DB stuff).
+ * Sadly, in slapd there was no handy structure associated with
+ * an LDAP operation, and passed around everywhere, so we had
+ * to invent the back_txn structure.
+ * The lower levels of the back-end look into this structure, and
+ * take out the DB_TXN they need.
+ */
+
+int
+bdb_txn_begin(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock)
+{
+    int return_value = -1;
+    bdb_config *conf = NULL;
+    dblayer_private *priv = NULL;
+    back_txn new_txn = {NULL};
+    PR_ASSERT(NULL != li);
+    /*
+     * When server is shutting down, some components need to
+     * flush some data (e.g. replication to write ruv).
+     * So don't check shutdown signal unless we can't write.
+     */
+    if (g_get_shutdown() == SLAPI_SHUTDOWN_DISKFULL) {
+        return return_value;
+    }
+
+    conf = (bdb_config *)li->li_dblayer_config;
+    priv = li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+
+    if (txn) {
+        txn->back_txn_txn = NULL;
+    }
+
+    if (conf->bdb_enable_transactions) {
+        int txn_begin_flags;
+
+        bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+        if (use_lock)
+            slapi_rwlock_rdlock(pEnv->bdb_env_lock);
+        if (!parent_txn) {
+            /* see if we have a stored parent txn */
+            back_txn *par_txn_txn = dblayer_get_pvt_txn();
+            if (par_txn_txn) {
+                parent_txn = par_txn_txn->back_txn_txn;
+            }
+        }
+        if (conf->bdb_txn_wait) {
+            txn_begin_flags = 0;
+        } else {
+            txn_begin_flags = DB_TXN_NOWAIT;
+        }
+        return_value = TXN_BEGIN(pEnv->bdb_DB_ENV,
+                                 (DB_TXN *)parent_txn,
+                                 &new_txn.back_txn_txn,
+                                 txn_begin_flags);
+        if (0 != return_value) {
+            if (use_lock)
+                slapi_rwlock_unlock(pEnv->bdb_env_lock);
+        } else {
+            /* this txn is now our current transaction for current operations
+               and new parent for any nested transactions created */
+            if (use_lock && log_flush_thread) {
+                int txn_id = new_txn.back_txn_txn->id(new_txn.back_txn_txn);
+                PR_Lock(sync_txn_log_flush);
+                txn_in_progress_count++;
+                slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_begin_ext",
+                              "Batchcount: %d, txn_in_progress: %d, curr_txn: %x\n",
+                              trans_batch_count, txn_in_progress_count, txn_id);
+                PR_Unlock(sync_txn_log_flush);
+            }
+            dblayer_push_pvt_txn(&new_txn);
+            if (txn) {
+                txn->back_txn_txn = new_txn.back_txn_txn;
+            }
+        }
+    } else {
+        return_value = 0;
+    }
+    if (0 != return_value) {
+        slapi_log_err(SLAPI_LOG_CRIT,
+                      "dblayer_txn_begin_ext", "Serious Error---Failed in dblayer_txn_begin, err=%d (%s)\n",
+                      return_value, dblayer_strerror(return_value));
+    }
+    return return_value;
+}
+
+int
+bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock)
+{
+    int return_value = -1;
+    bdb_config *conf = NULL;
+    dblayer_private *priv = NULL;
+    DB_TXN *db_txn = NULL;
+    back_txn *cur_txn = NULL;
+    int txn_id = 0;
+    int txn_batch_slot = 0;
+
+    PR_ASSERT(NULL != li);
+
+    conf = (bdb_config *)li->li_dblayer_config;
+    priv = li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+
+    /* use the transaction we are given - if none, see if there
+       is a transaction in progress */
+    if (txn) {
+        db_txn = txn->back_txn_txn;
+    }
+    cur_txn = dblayer_get_pvt_txn();
+    if (!db_txn) {
+        if (cur_txn) {
+            db_txn = cur_txn->back_txn_txn;
+        }
+    }
+    if (NULL != db_txn &&
+        1 != conf->bdb_stop_threads &&
+        priv->dblayer_env &&
+        conf->bdb_enable_transactions) {
+        bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+        txn_id = db_txn->id(db_txn);
+        return_value = TXN_COMMIT(db_txn, 0);
+        /* if we were given a transaction, and it is the same as the
+           current transaction in progress, pop it off the stack
+           or, if no transaction was given, we must be using the
+           current one - must pop it */
+        if (!txn || (cur_txn && (cur_txn->back_txn_txn == db_txn))) {
+            dblayer_pop_pvt_txn();
+        }
+        if (txn) {
+            /* this handle is no longer value - set it to NULL */
+            txn->back_txn_txn = NULL;
+        }
+        if ((conf->bdb_durable_transactions) && use_lock) {
+            if (trans_batch_limit > 0 && log_flush_thread) {
+                /* let log_flush thread do the flushing */
+                PR_Lock(sync_txn_log_flush);
+                txn_batch_slot = trans_batch_count++;
+                txn_log_flush_pending[txn_batch_slot] = txn_id;
+                slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", "(before notify): batchcount: %d, "
+                                                                            "txn_in_progress: %d, curr_txn: %x\n",
+                              trans_batch_count,
+                              txn_in_progress_count, txn_id);
+                /*
+                 * The log flush thread will periodically flush the txn log,
+                 * but in two cases it should be notified to do it immediately:
+                 * - the batch limit is passed
+                 * - there is no other outstanding txn
+                 */
+                if (trans_batch_count > trans_batch_limit ||
+                    trans_batch_count == txn_in_progress_count) {
+                    PR_NotifyCondVar(sync_txn_log_do_flush);
+                }
+                /*
+                 * We need to wait until the txn has been flushed before continuing
+                 * and returning success to the client, nit to vialate durability
+                 * PR_WaitCondvar releases and reaquires the lock
+                 */
+                while (txn_log_flush_pending[txn_batch_slot] == txn_id) {
+                    PR_WaitCondVar(sync_txn_log_flush_done, PR_INTERVAL_NO_TIMEOUT);
+                }
+                txn_in_progress_count--;
+                slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_commit_ext", "(before unlock): batchcount: %d, "
+                                                                            "txn_in_progress: %d, curr_txn %x\n",
+                              trans_batch_count,
+                              txn_in_progress_count, txn_id);
+                PR_Unlock(sync_txn_log_flush);
+            } else if (trans_batch_limit == FLUSH_REMOTEOFF) { /* user remotely turned batching off */
+                LOG_FLUSH(pEnv->bdb_DB_ENV, 0);
+            }
+        }
+        if (use_lock)
+            slapi_rwlock_unlock(pEnv->bdb_env_lock);
+    } else {
+        return_value = 0;
+    }
+
+    if (0 != return_value) {
+        slapi_log_err(SLAPI_LOG_CRIT,
+                      "dblayer_txn_commit_ext", "Serious Error---Failed in dblayer_txn_commit, err=%d (%s)\n",
+                      return_value, dblayer_strerror(return_value));
+        if (LDBM_OS_ERR_IS_DISKFULL(return_value)) {
+            operation_out_of_disk_space();
+        }
+    }
+    return return_value;
+}
+
+int
+bdb_txn_abort(struct ldbminfo *li, back_txn *txn, PRBool use_lock)
+{
+    int return_value = -1;
+    dblayer_private *priv = NULL;
+    DB_TXN *db_txn = NULL;
+    back_txn *cur_txn = NULL;
+
+    PR_ASSERT(NULL != li);
+
+    priv = li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+
+    /* use the transaction we are given - if none, see if there
+       is a transaction in progress */
+    if (txn) {
+        db_txn = txn->back_txn_txn;
+    }
+    cur_txn = dblayer_get_pvt_txn();
+    if (!db_txn) {
+        if (cur_txn) {
+            db_txn = cur_txn->back_txn_txn;
+        }
+    }
+    if (NULL != db_txn &&
+        priv->dblayer_env &&
+        BDB_CONFIG(li)->bdb_enable_transactions) {
+        int txn_id = db_txn->id(db_txn);
+        bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+        if (use_lock && log_flush_thread) {
+            PR_Lock(sync_txn_log_flush);
+            txn_in_progress_count--;
+            PR_Unlock(sync_txn_log_flush);
+            slapi_log_err(SLAPI_LOG_BACKLDBM, "dblayer_txn_abort_ext",
+                          "Batchcount: %d, txn_in_progress: %d, curr_txn: %x\n",
+                          trans_batch_count, txn_in_progress_count, txn_id);
+        }
+        return_value = TXN_ABORT(db_txn);
+        /* if we were given a transaction, and it is the same as the
+           current transaction in progress, pop it off the stack
+           or, if no transaction was given, we must be using the
+           current one - must pop it */
+        if (!txn || (cur_txn && (cur_txn->back_txn_txn == db_txn))) {
+            dblayer_pop_pvt_txn();
+        }
+        if (txn) {
+            /* this handle is no longer value - set it to NULL */
+            txn->back_txn_txn = NULL;
+        }
+        if (use_lock)
+            slapi_rwlock_unlock(pEnv->bdb_env_lock);
+    } else {
+        return_value = 0;
+    }
+
+    if (0 != return_value) {
+        slapi_log_err(SLAPI_LOG_CRIT,
+                      "dblayer_txn_abort_ext", "Serious Error---Failed in dblayer_txn_abort, err=%d (%s)\n",
+                      return_value, dblayer_strerror(return_value));
+        if (LDBM_OS_ERR_IS_DISKFULL(return_value)) {
+            operation_out_of_disk_space();
+        }
+    }
+    return return_value;
+}
+
+uint32_t
+dblayer_get_optimal_block_size(struct ldbminfo *li)
+{
+    uint32_t page_size = 0;
+
+    PR_ASSERT(NULL != li);
+
+    page_size = (BDB_CONFIG(li)->bdb_page_size == 0) ? DBLAYER_PAGESIZE : BDB_CONFIG(li)->bdb_page_size;
+    if (li->li_dblayer_private->dblayer_idl_divisor == 0) {
+        return page_size - DB_EXTN_PAGE_HEADER_SIZE;
+    } else {
+        return page_size / li->li_dblayer_private->dblayer_idl_divisor;
+    }
+}
+
+
+
+/* code which implements checkpointing and log file truncation */
+
+/*
+ * create a thread for perf_threadmain
+ */
+static int
+bdb_start_perf_thread(struct ldbminfo *li)
+{
+    int return_value = 0;
+    if (NULL == PR_CreateThread(PR_USER_THREAD,
+                                (VFP)(void *)perf_threadmain, li,
+                                PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+                                PR_UNJOINABLE_THREAD,
+                                SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+        PRErrorCode prerr = PR_GetError();
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_start_perf_thread",
+                      "Failed to create database perf thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                      prerr, slapd_pr_strerror(prerr));
+        return_value = -1;
+    }
+    return return_value;
+}
+
+/* Performance thread */
+static int
+perf_threadmain(void *param)
+{
+    struct ldbminfo *li = NULL;
+
+    PR_ASSERT(NULL != param);
+    li = (struct ldbminfo *)param;
+
+    dblayer_private *priv = li->li_dblayer_private;
+    bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+    PR_ASSERT(NULL != priv);
+
+    INCR_THREAD_COUNT(pEnv);
+
+    while (!BDB_CONFIG(li)->bdb_stop_threads) {
+        /* sleep for a while, updating perf counters if we need to */
+        perfctrs_wait(1000, BDB_CONFIG(li)->perf_private, pEnv->bdb_DB_ENV);
+    }
+
+    DECR_THREAD_COUNT(pEnv);
+    slapi_log_err(SLAPI_LOG_TRACE, "perf_threadmain", "Leaving perf_threadmain\n");
+    return 0;
+}
+
+/*
+ * create a thread for deadlock_threadmain
+ */
+static int
+bdb_start_deadlock_thread(struct ldbminfo *li)
+{
+    int return_value = 0;
+    if (NULL == PR_CreateThread(PR_USER_THREAD,
+                                (VFP)(void *)deadlock_threadmain, li,
+                                PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+                                PR_UNJOINABLE_THREAD,
+                                SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+        PRErrorCode prerr = PR_GetError();
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_start_deadlock_thread",
+                      "Failed to create database deadlock thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                      prerr, slapd_pr_strerror(prerr));
+        return_value = -1;
+    }
+    return return_value;
+}
+
+static const u_int32_t default_flags = DB_NEXT;
+
+/* this is the loop delay - how long after we release the db pages
+   until we acquire them again */
+#define TXN_TEST_LOOP_WAIT(msecs)                                      \
+    do {                                                               \
+        if (msecs) {                                                   \
+            DS_Sleep(PR_MillisecondsToInterval(slapi_rand() % msecs)); \
+        }                                                              \
+    } while (0)
+
+/* this is how long we hold the pages open until we close the cursors */
+#define TXN_TEST_PAGE_HOLD(msecs)                                      \
+    do {                                                               \
+        if (msecs) {                                                   \
+            DS_Sleep(PR_MillisecondsToInterval(slapi_rand() % msecs)); \
+        }                                                              \
+    } while (0)
+
+typedef struct txn_test_iter
+{
+    DB *db;
+    DBC *cur;
+    uint64_t cnt;
+    const char *attr;
+    uint32_t flags;
+    backend *be;
+} txn_test_iter;
+
+typedef struct txn_test_cfg
+{
+    PRUint32 hold_msec;
+    PRUint32 loop_msec;
+    uint32_t flags;
+    int use_txn;
+    char **indexes;
+    int verbose;
+} txn_test_cfg;
+
+static txn_test_iter *
+new_txn_test_iter(DB *db, const char *attr, backend *be, uint32_t flags)
+{
+    txn_test_iter *tti = (txn_test_iter *)slapi_ch_malloc(sizeof(txn_test_iter));
+    tti->db = db;
+    tti->cur = NULL;
+    tti->cnt = 0;
+    tti->attr = attr;
+    tti->flags = default_flags | flags;
+    tti->be = be;
+    return tti;
+}
+
+static void
+init_txn_test_iter(txn_test_iter *tti)
+{
+    if (tti->cur) {
+        if (tti->cur->dbp && (tti->cur->dbp->open_flags == 0x58585858)) {
+            /* already closed? */
+        } else if (tti->be && (tti->be->be_state != BE_STATE_STARTED)) {
+            /* already closed? */
+        } else {
+            tti->cur->c_close(tti->cur);
+        }
+        tti->cur = NULL;
+    }
+    tti->cnt = 0;
+    tti->flags = default_flags;
+}
+
+static void
+free_txn_test_iter(txn_test_iter *tti)
+{
+    init_txn_test_iter(tti);
+    slapi_ch_free((void **)&tti);
+}
+
+static void
+free_ttilist(txn_test_iter ***ttilist, uint64_t *tticnt)
+{
+    if (!ttilist || !*ttilist || !**ttilist) {
+        return;
+    }
+    while (*tticnt > 0) {
+        (*tticnt)--;
+        free_txn_test_iter((*ttilist)[*tticnt]);
+    }
+    slapi_ch_free((void *)ttilist);
+}
+
+static void
+init_ttilist(txn_test_iter **ttilist, uint64_t tticnt)
+{
+    if (!ttilist || !*ttilist) {
+        return;
+    }
+    while (tticnt > 0) {
+        tticnt--;
+        init_txn_test_iter(ttilist[tticnt]);
+    }
+}
+
+static void
+print_ttilist(txn_test_iter **ttilist, uint64_t tticnt)
+{
+    while (tticnt > 0) {
+        tticnt--;
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "txn_test_threadmain", "attr [%s] cnt [%" PRIu64 "]\n",
+                      ttilist[tticnt]->attr, ttilist[tticnt]->cnt);
+    }
+}
+
+#define TXN_TEST_IDX_OK_IF_NULL "nscpEntryDN"
+
+static void
+txn_test_init_cfg(txn_test_cfg *cfg)
+{
+    static char *indexlist = "aci,entryrdn,numsubordinates,uid,ancestorid,objectclass,uniquemember,cn,parentid,nsuniqueid,sn,id2entry," TXN_TEST_IDX_OK_IF_NULL;
+    char *indexlist_copy = NULL;
+
+    cfg->hold_msec = getenv(TXN_TEST_HOLD_MSEC) ? atoi(getenv(TXN_TEST_HOLD_MSEC)) : 200;
+    cfg->loop_msec = getenv(TXN_TEST_LOOP_MSEC) ? atoi(getenv(TXN_TEST_LOOP_MSEC)) : 10;
+    cfg->flags = getenv(TXN_TEST_USE_RMW) ? DB_RMW : 0;
+    cfg->use_txn = getenv(TXN_TEST_USE_TXN) ? 1 : 0;
+    if (getenv(TXN_TEST_INDEXES)) {
+        indexlist_copy = slapi_ch_strdup(getenv(TXN_TEST_INDEXES));
+    } else {
+        indexlist_copy = slapi_ch_strdup(indexlist);
+    }
+    cfg->indexes = slapi_str2charray(indexlist_copy, ",");
+    slapi_ch_free_string(&indexlist_copy);
+    cfg->verbose = getenv(TXN_TEST_VERBOSE) ? 1 : 0;
+
+    slapi_log_err(SLAPI_LOG_ERR, "txn_test_init_cfg",
+                  "Config hold_msec [%d] loop_msec [%d] rmw [%d] txn [%d] indexes [%s]\n",
+                  cfg->hold_msec, cfg->loop_msec, cfg->flags, cfg->use_txn,
+                  getenv(TXN_TEST_INDEXES) ? getenv(TXN_TEST_INDEXES) : indexlist);
+}
+
+static int
+txn_test_threadmain(void *param)
+{
+    struct ldbminfo *li = NULL;
+    Object *inst_obj;
+    int rc = 0;
+    txn_test_iter **ttilist = NULL;
+    uint64_t tticnt = 0;
+    DB_TXN *txn = NULL;
+    txn_test_cfg cfg = {0};
+    uint64_t counter = 0;
+    char keybuf[8192];
+    char databuf[8192];
+    int dbattempts = 0;
+    int dbmaxretries = 3;
+
+    PR_ASSERT(NULL != param);
+    li = (struct ldbminfo *)param;
+
+    dblayer_private *priv = li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+    bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+
+    INCR_THREAD_COUNT(pEnv);
+
+    txn_test_init_cfg(&cfg);
+
+    if(BDB_CONFIG(li)->bdb_enable_transactions) {
+        goto end;
+    }
+
+wait_for_init:
+    free_ttilist(&ttilist, &tticnt);
+    DS_Sleep(PR_MillisecondsToInterval(1000));
+    if (BDB_CONFIG(li)->bdb_stop_threads) {
+        goto end;
+    }
+    dbattempts++;
+    for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+         inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+        char **idx = NULL;
+        ldbm_instance *inst = (ldbm_instance *)object_get_data(inst_obj);
+        backend *be = inst->inst_be;
+
+        if (be->be_state != BE_STATE_STARTED) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "txn_test_threadmain", "Backend not started, retrying\n");
+            object_release(inst_obj);
+            goto wait_for_init;
+        }
+
+        for (idx = cfg.indexes; idx && *idx; ++idx) {
+            DB *db = NULL;
+            if (be->be_state != BE_STATE_STARTED) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "txn_test_threadmain", "Backend not started, retrying\n");
+                object_release(inst_obj);
+                goto wait_for_init;
+            }
+
+            if (!strcmp(*idx, "id2entry")) {
+                dblayer_get_id2entry(be, &db);
+                if (db == NULL) {
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "txn_test_threadmain", "id2entry database not found or not ready yet, retrying\n");
+                    object_release(inst_obj);
+                    goto wait_for_init;
+                }
+            } else {
+                struct attrinfo *ai = NULL;
+                ainfo_get(be, *idx, &ai);
+                if (NULL == ai) {
+                    if (dbattempts >= dbmaxretries) {
+                        slapi_log_err(SLAPI_LOG_ERR,
+                                      "txn_test_threadmain", "Index [%s] not found or not ready yet, skipping\n",
+                                      *idx);
+                        continue;
+                    } else {
+                        slapi_log_err(SLAPI_LOG_ERR,
+                                      "txn_test_threadmain", "Index [%s] not found or not ready yet, retrying\n",
+                                      *idx);
+                        object_release(inst_obj);
+                        goto wait_for_init;
+                    }
+                }
+                if (dblayer_get_index_file(be, ai, &db, 0) || (NULL == db)) {
+                    if ((NULL == db) && strcasecmp(*idx, TXN_TEST_IDX_OK_IF_NULL)) {
+                        if (dbattempts >= dbmaxretries) {
+                            slapi_log_err(SLAPI_LOG_ERR,
+                                          "txn_test_threadmain", "Database file for index [%s] not found or not ready yet, skipping\n",
+                                          *idx);
+                            continue;
+                        } else {
+                            slapi_log_err(SLAPI_LOG_ERR,
+                                          "txn_test_threadmain", "Database file for index [%s] not found or not ready yet, retrying\n",
+                                          *idx);
+                            object_release(inst_obj);
+                            goto wait_for_init;
+                        }
+                    }
+                }
+            }
+            if (db) {
+                ttilist = (txn_test_iter **)slapi_ch_realloc((char *)ttilist, sizeof(txn_test_iter *) * (tticnt + 1));
+                ttilist[tticnt++] = new_txn_test_iter(db, *idx, be, cfg.flags);
+            }
+        }
+    }
+
+    slapi_log_err(SLAPI_LOG_ERR, "txn_test_threadmain", "Starting main txn stress loop\n");
+    print_ttilist(ttilist, tticnt);
+
+    while (!BDB_CONFIG(li)->bdb_stop_threads) {
+    retry_txn:
+        init_ttilist(ttilist, tticnt);
+        if (txn) {
+            TXN_ABORT(txn);
+            txn = NULL;
+        }
+        if (cfg.use_txn) {
+            rc = TXN_BEGIN(((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV, NULL, &txn, 0);
+            if (rc || !txn) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "txn_test_threadmain", "Failed to create a new transaction, err=%d (%s)\n",
+                              rc, dblayer_strerror(rc));
+            }
+        } else {
+            rc = 0;
+        }
+        if (!rc) {
+            DBT key;
+            DBT data;
+            uint64_t ii;
+            uint64_t donecnt = 0;
+            uint64_t cnt = 0;
+
+            /* phase 1 - open a cursor to each db */
+            if (cfg.verbose) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "txn_test_threadmain", "Starting [%" PRIu64 "] indexes\n", tticnt);
+            }
+            for (ii = 0; ii < tticnt; ++ii) {
+                txn_test_iter *tti = ttilist[ii];
+
+            retry_cursor:
+                if (BDB_CONFIG(li)->bdb_stop_threads) {
+                    goto end;
+                }
+                if (tti->be->be_state != BE_STATE_STARTED) {
+                    if (txn) {
+                        TXN_ABORT(txn);
+                        txn = NULL;
+                    }
+                    goto wait_for_init;
+                }
+                if (tti->db->open_flags == 0xdbdbdbdb) {
+                    if (txn) {
+                        TXN_ABORT(txn);
+                        txn = NULL;
+                    }
+                    goto wait_for_init;
+                }
+                rc = tti->db->cursor(tti->db, txn, &tti->cur, 0);
+                if (DB_LOCK_DEADLOCK == rc) {
+                    if (cfg.verbose) {
+                        slapi_log_err(SLAPI_LOG_ERR,
+                                      "txn_test_threadmain", "Cursor create deadlock - retry\n");
+                    }
+                    if (cfg.use_txn) {
+                        goto retry_txn;
+                    } else {
+                        goto retry_cursor;
+                    }
+                } else if (rc) {
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "txn_test_threadmain", "Failed to create a new cursor, err=%d (%s)\n",
+                                  rc, dblayer_strerror(rc));
+                }
+            }
+
+            memset(&key, 0, sizeof(key));
+            key.flags = DB_DBT_USERMEM;
+            key.data = keybuf;
+            key.ulen = sizeof(keybuf);
+            memset(&data, 0, sizeof(data));
+            data.flags = DB_DBT_USERMEM;
+            data.data = databuf;
+            data.ulen = sizeof(databuf);
+            /* phase 2 - iterate over each cursor at the same time until
+               1) get error
+               2) get deadlock
+               3) all cursors are exhausted
+            */
+            while (donecnt < tticnt) {
+                for (ii = 0; ii < tticnt; ++ii) {
+                    txn_test_iter *tti = ttilist[ii];
+                    if (tti->cur) {
+                    retry_get:
+                        if (BDB_CONFIG(li)->bdb_stop_threads) {
+                            goto end;
+                        }
+                        if (tti->be->be_state != BE_STATE_STARTED) {
+                            if (txn) {
+                                TXN_ABORT(txn);
+                                txn = NULL;
+                            }
+                            goto wait_for_init;
+                        }
+                        if (tti->db->open_flags == 0xdbdbdbdb) {
+                            if (txn) {
+                                TXN_ABORT(txn);
+                                txn = NULL;
+                            }
+                            goto wait_for_init;
+                        }
+                        rc = tti->cur->c_get(tti->cur, &key, &data, tti->flags);
+                        if (DB_LOCK_DEADLOCK == rc) {
+                            if (cfg.verbose) {
+                                slapi_log_err(SLAPI_LOG_ERR,
+                                              "txn_test_threadmain", "Cursor get deadlock - retry\n");
+                            }
+                            if (cfg.use_txn) {
+                                goto retry_txn;
+                            } else {
+                                goto retry_get;
+                            }
+                        } else if (DB_NOTFOUND == rc) {
+                            donecnt++;                         /* ran out of this one */
+                            tti->flags = DB_FIRST | cfg.flags; /* start over until all indexes are done */
+                        } else if (rc) {
+                            if ((DB_BUFFER_SMALL != rc) || cfg.verbose) {
+                                slapi_log_err(SLAPI_LOG_ERR,
+                                              "txn_test_threadmain", "Failed to read a cursor, err=%d (%s)\n",
+                                              rc, dblayer_strerror(rc));
+                            }
+                            tti->cur->c_close(tti->cur);
+                            tti->cur = NULL;
+                            donecnt++;
+                        } else {
+                            tti->cnt++;
+                            tti->flags = default_flags | cfg.flags;
+                            cnt++;
+                        }
+                    }
+                }
+            }
+            TXN_TEST_PAGE_HOLD(cfg.hold_msec);
+            /*print_ttilist(ttilist, tticnt);*/
+            init_ttilist(ttilist, tticnt);
+            if (cfg.verbose) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "txn_test_threadmain", "Finished [%" PRIu64 "] indexes [%" PRIu64 "] records\n", tticnt, cnt);
+            }
+            TXN_TEST_LOOP_WAIT(cfg.loop_msec);
+        } else {
+            TXN_TEST_LOOP_WAIT(cfg.loop_msec);
+        }
+        counter++;
+        if (!(counter % 40)) {
+            /* some operations get completely stuck - so every once in a while,
+               pause to allow those ops to go through */
+            DS_Sleep(PR_SecondsToInterval(1));
+        }
+    }
+
+end:
+    slapi_ch_array_free(cfg.indexes);
+    free_ttilist(&ttilist, &tticnt);
+    if (txn) {
+        TXN_ABORT(txn);
+    }
+    DECR_THREAD_COUNT(pEnv);
+    return 0;
+}
+
+/*
+ * create a thread for transaction deadlock testing
+ */
+static int
+bdb_start_txn_test_thread(struct ldbminfo *li)
+{
+    int return_value = 0;
+    if (NULL == PR_CreateThread(PR_USER_THREAD,
+                                (VFP)(void *)txn_test_threadmain, li,
+                                PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+                                PR_UNJOINABLE_THREAD,
+                                SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+        PRErrorCode prerr = PR_GetError();
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_start_txn_test_thread",
+                      "Failed to create txn test thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                      prerr, slapd_pr_strerror(prerr));
+        return_value = -1;
+    }
+    return return_value;
+}
+
+/* deadlock thread main function */
+
+static int
+deadlock_threadmain(void *param)
+{
+    int rval = -1;
+    struct ldbminfo *li = NULL;
+    PRIntervalTime interval; /*NSPR timeout stuffy*/
+    u_int32_t flags = 0;
+
+    PR_ASSERT(NULL != param);
+    li = (struct ldbminfo *)param;
+
+    dblayer_private *priv = li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+    bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+
+    INCR_THREAD_COUNT(pEnv);
+
+    interval = PR_MillisecondsToInterval(100);
+    while (!BDB_CONFIG(li)->bdb_stop_threads) {
+        if (BDB_CONFIG(li)->bdb_enable_transactions) {
+            DB_ENV *db_env = ((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV;
+            u_int32_t deadlock_policy = BDB_CONFIG(li)->bdb_deadlock_policy;
+
+            if (dblayer_db_uses_locking(db_env) && (deadlock_policy > DB_LOCK_NORUN)) {
+                int rejected = 0;
+
+                rval = db_env->lock_detect(db_env, flags, deadlock_policy, &rejected);
+                if (rval != 0) {
+                    slapi_log_err(SLAPI_LOG_CRIT,
+                                  "deadlock_threadmain", "Serious Error---Failed in deadlock detect (aborted at 0x%x), err=%d (%s)\n",
+                                  rejected, rval, dblayer_strerror(rval));
+                } else if (rejected) {
+                    slapi_log_err(SLAPI_LOG_TRACE, "deadlock_threadmain", "Found and rejected %d lock requests\n", rejected);
+                }
+            }
+        }
+        DS_Sleep(interval);
+    }
+
+    DECR_THREAD_COUNT(pEnv);
+    slapi_log_err(SLAPI_LOG_TRACE, "deadlock_threadmain", "Leaving deadlock_threadmain\n");
+    return 0;
+}
+
+#define checkpoint_debug_message(debug, ...)                       \
+    if (debug) {                                                   \
+        slapi_log_err(SLAPI_LOG_DEBUG, "CHECKPOINT", __VA_ARGS__); \
+    }
+
+/* this thread tries to do two things:
+    1. catch a group of transactions that are pending allowing a worker thread
+       to work
+    2. flush any left over transactions ( a single transaction for example)
+*/
+
+static int
+bdb_start_log_flush_thread(struct ldbminfo *li)
+{
+    int return_value = 0;
+    int max_threads = config_get_threadnumber();
+
+    if ((BDB_CONFIG(li)->bdb_durable_transactions) &&
+        (BDB_CONFIG(li)->bdb_enable_transactions) && (trans_batch_limit > 0)) {
+        /* initialize the synchronization objects for the log_flush and worker threads */
+        sync_txn_log_flush = PR_NewLock();
+        sync_txn_log_flush_done = PR_NewCondVar(sync_txn_log_flush);
+        sync_txn_log_do_flush = PR_NewCondVar(sync_txn_log_flush);
+        txn_log_flush_pending = (int *)slapi_ch_malloc(max_threads * sizeof(int));
+        log_flush_thread = PR_TRUE;
+        if (NULL == PR_CreateThread(PR_USER_THREAD,
+                                    (VFP)(void *)log_flush_threadmain, li,
+                                    PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+                                    PR_UNJOINABLE_THREAD,
+                                    SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+            PRErrorCode prerr = PR_GetError();
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_start_log_flush_thread", "Failed to create database log flush thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                          prerr, slapd_pr_strerror(prerr));
+            return_value = -1;
+        }
+    }
+    return return_value;
+}
+
+/* this thread tries to do two things:
+    1. catch a group of transactions that are pending allowing a worker thread
+       to work
+    2. flush any left over transactions ( a single transaction for example)
+*/
+
+static int
+log_flush_threadmain(void *param)
+{
+    PRIntervalTime interval_wait, interval_flush, interval_def;
+    PRIntervalTime last_flush = 0;
+    int i;
+    int do_flush = 0;
+
+    PR_ASSERT(NULL != param);
+    struct ldbminfo *li = (struct ldbminfo *)param;
+    dblayer_private *priv = li->li_dblayer_private;
+    bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+
+    INCR_THREAD_COUNT(pEnv);
+
+    interval_flush = PR_MillisecondsToInterval(trans_batch_txn_min_sleep);
+    interval_wait = PR_MillisecondsToInterval(trans_batch_txn_max_sleep);
+    interval_def = PR_MillisecondsToInterval(300); /*used while no txn or txn batching */
+    /* LK this is only needed if online change of
+     * of txn config is supported ???
+     */
+    while ((!BDB_CONFIG(li)->bdb_stop_threads) && (log_flush_thread)) {
+        if (BDB_CONFIG(li)->bdb_enable_transactions) {
+            if (trans_batch_limit > 0) {
+                /* synchronize flushing thread with workers */
+                PR_Lock(sync_txn_log_flush);
+                if (!log_flush_thread) {
+                    /* batch transactions was disabled while waiting for the lock */
+                    PR_Unlock(sync_txn_log_flush);
+                    break;
+                }
+                slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(in loop): batchcount: %d, "
+                                                                          "txn_in_progress: %d\n",
+                              trans_batch_count, txn_in_progress_count);
+                /*
+                 * if here, do flush the txn logs if any of the following conditions are met
+                 * - batch limit exceeded
+                 * - no more active transaction, no need to wait
+                 * - do_flush indicate that the max waiting interval is exceeded
+                 */
+                if (trans_batch_count >= trans_batch_limit || trans_batch_count >= txn_in_progress_count || do_flush) {
+                    slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(working): batchcount: %d, "
+                                                                              "txn_in_progress: %d\n",
+                                  trans_batch_count, txn_in_progress_count);
+                    LOG_FLUSH(((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV, 0);
+                    for (i = 0; i < trans_batch_count; i++) {
+                        txn_log_flush_pending[i] = 0;
+                    }
+                    trans_batch_count = 0;
+                    last_flush = PR_IntervalNow();
+                    do_flush = 0;
+                    slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(before notify): batchcount: %d, "
+                                                                              "txn_in_progress: %d\n",
+                                  trans_batch_count, txn_in_progress_count);
+                    PR_NotifyAllCondVar(sync_txn_log_flush_done);
+                }
+                /* wait until flushing conditions are met */
+                while ((trans_batch_count == 0) ||
+                       (trans_batch_count < trans_batch_limit && trans_batch_count < txn_in_progress_count)) {
+                    if (BDB_CONFIG(li)->bdb_stop_threads)
+                        break;
+                    if (PR_IntervalNow() - last_flush > interval_flush) {
+                        do_flush = 1;
+                        break;
+                    }
+                    PR_WaitCondVar(sync_txn_log_do_flush, interval_wait);
+                }
+                PR_Unlock(sync_txn_log_flush);
+                slapi_log_err(SLAPI_LOG_BACKLDBM, "log_flush_threadmain", "(wakeup): batchcount: %d, "
+                                                                          "txn_in_progress: %d\n",
+                              trans_batch_count, txn_in_progress_count);
+            } else {
+                DS_Sleep(interval_def);
+            }
+        } else {
+            DS_Sleep(interval_def);
+        }
+    }
+
+    DECR_THREAD_COUNT(pEnv);
+    slapi_log_err(SLAPI_LOG_TRACE, "log_flush_threadmain", "Leaving log_flush_threadmain\n");
+    return 0;
+}
+
+/*
+ * create a thread for checkpoint_threadmain
+ */
+static int
+bdb_start_checkpoint_thread(struct ldbminfo *li)
+{
+    int return_value = 0;
+    if (NULL == PR_CreateThread(PR_USER_THREAD,
+                                (VFP)(void *)checkpoint_threadmain, li,
+                                PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+                                PR_UNJOINABLE_THREAD,
+                                SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+        PRErrorCode prerr = PR_GetError();
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_start_checkpoint_thread", "Failed to create database checkpoint thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                      prerr, slapd_pr_strerror(prerr));
+        return_value = -1;
+    }
+    return return_value;
+}
+
+/*
+ * checkpoint thread -- borrow the timing for compacting id2entry, as well.
+ */
+static int
+checkpoint_threadmain(void *param)
+{
+    PRIntervalTime interval;
+    int rval = -1;
+    struct ldbminfo *li = NULL;
+    int debug_checkpointing = 0;
+    char *home_dir = NULL;
+    char **list = NULL;
+    char **listp = NULL;
+    bdb_db_env *penv = NULL;
+    struct timespec checkpoint_expire;
+    struct timespec compactdb_expire;
+    time_t compactdb_interval_update = 0;
+    time_t checkpoint_interval_update = 0;
+    time_t compactdb_interval = 0;
+    time_t checkpoint_interval = 0;
+    back_txn txn;
+
+    PR_ASSERT(NULL != param);
+    li = (struct ldbminfo *)param;
+
+    dblayer_private *priv = li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+    bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+
+    INCR_THREAD_COUNT(pEnv);
+
+    interval = PR_MillisecondsToInterval(DBLAYER_SLEEP_INTERVAL * 10);
+    home_dir = bdb_get_home_dir(li, NULL);
+    if (NULL == home_dir || '\0' == *home_dir) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "checkpoint_threadmain", "Failed due to missing db home directory info\n");
+        goto error_return;
+    }
+
+    /* work around a problem with newly created environments */
+    dblayer_force_checkpoint(li);
+
+    PR_Lock(li->li_config_mutex);
+    checkpoint_interval = (time_t)BDB_CONFIG(li)->bdb_checkpoint_interval;
+    compactdb_interval = (time_t)BDB_CONFIG(li)->bdb_compactdb_interval;
+    penv = (bdb_db_env *)priv->dblayer_env;
+    debug_checkpointing = BDB_CONFIG(li)->bdb_debug_checkpointing;
+    PR_Unlock(li->li_config_mutex);
+
+    /* assumes dblayer_force_checkpoint worked */
+    /*
+     * Importantly, the use of this api is not affected by backwards time steps
+     * and the like. Because this use relative system time, rather than utc,
+     * it makes it much more reliable to run.
+     */
+    slapi_timespec_expire_at(compactdb_interval, &compactdb_expire);
+    slapi_timespec_expire_at(checkpoint_interval, &checkpoint_expire);
+
+    while (!BDB_CONFIG(li)->bdb_stop_threads) {
+        /* sleep for a while */
+        /* why aren't we sleeping exactly the right amount of time ? */
+        /* answer---because the interval might be changed after the server
+         * starts up */
+
+        DS_Sleep(interval);
+
+        if (0 == BDB_CONFIG(li)->bdb_enable_transactions) {
+            continue;
+        }
+
+        PR_Lock(li->li_config_mutex);
+        checkpoint_interval_update = (time_t)BDB_CONFIG(li)->bdb_checkpoint_interval;
+        compactdb_interval_update = (time_t)BDB_CONFIG(li)->bdb_compactdb_interval;
+        PR_Unlock(li->li_config_mutex);
+
+        /* If the checkpoint has been updated OR we have expired */
+        if (checkpoint_interval != checkpoint_interval_update ||
+            slapi_timespec_expire_check(&checkpoint_expire) == TIMER_EXPIRED) {
+
+            /* If our interval has changed, update it. */
+            checkpoint_interval = checkpoint_interval_update;
+
+            if (!dblayer_db_uses_transactions(((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV)) {
+                continue;
+            }
+
+            /* now checkpoint */
+            checkpoint_debug_message(debug_checkpointing,
+                                     "checkpoint_threadmain - Starting checkpoint\n");
+            rval = dblayer_txn_checkpoint(li, (bdb_db_env *)priv->dblayer_env,
+                                          PR_TRUE, PR_FALSE);
+            checkpoint_debug_message(debug_checkpointing,
+                                     "checkpoint_threadmain - Checkpoint Done\n");
+            if (rval != 0) {
+                /* bad error */
+                slapi_log_err(SLAPI_LOG_CRIT,
+                              "checkpoint_threadmain", "Serious Error---Failed to checkpoint database, "
+                                                       "err=%d (%s)\n",
+                              rval, dblayer_strerror(rval));
+                if (LDBM_OS_ERR_IS_DISKFULL(rval)) {
+                    operation_out_of_disk_space();
+                    goto error_return;
+                }
+            }
+
+            rval = LOG_ARCHIVE(penv->bdb_DB_ENV, &list,
+                               DB_ARCH_ABS, (void *)slapi_ch_malloc);
+            if (rval) {
+                slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain",
+                              "log archive failed - %s (%d)\n",
+                              dblayer_strerror(rval), rval);
+            } else {
+                for (listp = list; listp && *listp != NULL; ++listp) {
+                    if (BDB_CONFIG(li)->bdb_circular_logging) {
+                        checkpoint_debug_message(debug_checkpointing,
+                                                 "Deleting %s\n", *listp);
+                        unlink(*listp);
+                    } else {
+                        char new_filename[MAXPATHLEN];
+                        PR_snprintf(new_filename, sizeof(new_filename),
+                                    "%s.old", *listp);
+                        checkpoint_debug_message(debug_checkpointing,
+                                                 "Renaming %s -> %s\n", *listp, new_filename);
+                        if (rename(*listp, new_filename) != 0) {
+                            slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "Failed to rename log (%s) to (%s)\n",
+                                          *listp, new_filename);
+                            rval = -1;
+                            goto error_return;
+                        }
+                    }
+                }
+                slapi_ch_free((void **)&list);
+                /* Note: references inside the returned memory need not be
+                 * individually freed. */
+            }
+            slapi_timespec_expire_at(checkpoint_interval, &checkpoint_expire);
+        }
+
+        /* Compacting DB borrowing the timing of the log flush */
+
+        /*
+         * Remember that if compactdb_interval is 0, timer_expired can
+         * never occur unless the value in compctdb_interval changes.
+         *
+         * this could have been a bug infact, where compactdb_interval
+         * was 0, if you change while running it would never take effect ....
+         */
+        if (compactdb_interval_update != compactdb_interval ||
+            slapi_timespec_expire_check(&compactdb_expire) == TIMER_EXPIRED) {
+            int rc = 0;
+            Object *inst_obj;
+            ldbm_instance *inst;
+            DB *db = NULL;
+            DB_COMPACT c_data = {0};
+
+            for (inst_obj = objset_first_obj(li->li_instance_set);
+                 inst_obj;
+                 inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+                inst = (ldbm_instance *)object_get_data(inst_obj);
+                rc = dblayer_get_id2entry(inst->inst_be, &db);
+                if (!db || rc) {
+                    continue;
+                }
+                slapi_log_err(SLAPI_LOG_NOTICE, "checkpoint_threadmain", "Compacting DB start: %s\n",
+                              inst->inst_name);
+
+                /*
+                 * It's possible for this to heap us after free because when we access db
+                 * *just* as the server shut's down, we don't know it. So we should probably
+                 * do something like wrapping access to the db var in a rwlock, and have "read"
+                 * to access, and take writes to change the state. This would prevent the issue.
+                 */
+                DBTYPE type;
+                rc = db->get_type(db, &type);
+                if (rc) {
+                    slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain",
+                                  "compactdb: failed to determine db type for %s: db error - %d %s\n",
+                                  inst->inst_name, rc, db_strerror(rc));
+                    continue;
+                }
+
+                rc = dblayer_txn_begin(inst->inst_be, NULL, &txn);
+                if (rc) {
+                    slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: transaction begin failed: %d\n", rc);
+                    break;
+                }
+                /*
+                 * https://docs.oracle.com/cd/E17275_01/html/api_reference/C/BDB-C_APIReference.pdf
+                 * "DB_FREELIST_ONLY
+                 * Do no page compaction, only returning pages to the filesystem that are already free and at the end
+                 * of the file. This flag must be set if the database is a Hash access method database."
+                 *
+                 */
+
+                uint32_t compact_flags = DB_FREE_SPACE;
+                if (type == DB_HASH) {
+                    compact_flags |= DB_FREELIST_ONLY;
+                }
+                rc = db->compact(db, txn.back_txn_txn, NULL /*start*/, NULL /*stop*/,
+                                 &c_data, compact_flags, NULL /*end*/);
+                if (rc) {
+                    slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain",
+                                  "compactdb: failed to compact %s; db error - %d %s\n",
+                                  inst->inst_name, rc, db_strerror(rc));
+                    if ((rc = dblayer_txn_abort(inst->inst_be, &txn))) {
+                        slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: failed to abort txn (%s) db error - %d %s\n",
+                                      inst->inst_name, rc, db_strerror(rc));
+                        break;
+                    }
+                } else {
+                    slapi_log_err(SLAPI_LOG_NOTICE, "checkpoint_threadmain",
+                                  "compactdb: compact %s - %d pages freed\n",
+                                  inst->inst_name, c_data.compact_pages_free);
+                    if ((rc = dblayer_txn_commit(inst->inst_be, &txn))) {
+                        slapi_log_err(SLAPI_LOG_ERR, "checkpoint_threadmain", "compactdb: failed to commit txn (%s) db error - %d %s\n",
+                                      inst->inst_name, rc, db_strerror(rc));
+                        break;
+                    }
+                }
+            }
+            compactdb_interval = compactdb_interval_update;
+            slapi_timespec_expire_at(compactdb_interval, &compactdb_expire);
+        }
+    }
+    slapi_log_err(SLAPI_LOG_TRACE, "checkpoint_threadmain", "Check point before leaving\n");
+    rval = dblayer_force_checkpoint(li);
+error_return:
+
+    DECR_THREAD_COUNT(pEnv);
+    slapi_log_err(SLAPI_LOG_TRACE, "checkpoint_threadmain", "Leaving checkpoint_threadmain\n");
+    return rval;
+}
+
+/*
+ * create a thread for trickle_threadmain
+ */
+static int
+bdb_start_trickle_thread(struct ldbminfo *li)
+{
+    int return_value = 0;
+    bdb_config *priv = (bdb_config *)li->li_dblayer_config;
+
+    if (priv->bdb_trickle_percentage == 0)
+        return return_value;
+
+    if (NULL == PR_CreateThread(PR_USER_THREAD,
+                                (VFP)(void *)trickle_threadmain, li,
+                                PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+                                PR_UNJOINABLE_THREAD,
+                                SLAPD_DEFAULT_THREAD_STACKSIZE)) {
+        PRErrorCode prerr = PR_GetError();
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_start_trickle_thread",
+                      "Failed to create database trickle thread, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                      prerr, slapd_pr_strerror(prerr));
+        return_value = -1;
+    }
+    return return_value;
+}
+
+static int
+trickle_threadmain(void *param)
+{
+    PRIntervalTime interval; /*NSPR timeout stuffy*/
+    int rval = -1;
+    dblayer_private *priv = NULL;
+    struct ldbminfo *li = NULL;
+    int debug_checkpointing = 0;
+
+    PR_ASSERT(NULL != param);
+    li = (struct ldbminfo *)param;
+
+    priv = li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+    bdb_db_env *pEnv = (bdb_db_env *)priv->dblayer_env;
+
+    INCR_THREAD_COUNT(pEnv);
+
+    interval = PR_MillisecondsToInterval(DBLAYER_SLEEP_INTERVAL);
+    debug_checkpointing = BDB_CONFIG(li)->bdb_debug_checkpointing;
+    while (!BDB_CONFIG(li)->bdb_stop_threads) {
+        DS_Sleep(interval); /* 622855: wait for other threads fully started */
+        if (BDB_CONFIG(li)->bdb_enable_transactions) {
+            if (dblayer_db_uses_mpool(((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV) &&
+                (0 != BDB_CONFIG(li)->bdb_trickle_percentage)) {
+                int pages_written = 0;
+                if ((rval = MEMP_TRICKLE(((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV,
+                                         BDB_CONFIG(li)->bdb_trickle_percentage,
+                                         &pages_written)) != 0) {
+                    slapi_log_err(SLAPI_LOG_ERR, "trickle_threadmain", "Serious Error---Failed to trickle, err=%d (%s)\n",
+                                  rval, dblayer_strerror(rval));
+                }
+                if (pages_written > 0) {
+                    checkpoint_debug_message(debug_checkpointing, "trickle_threadmain - Trickle thread wrote %d pages\n",
+                                             pages_written);
+                }
+            }
+        }
+    }
+
+    DECR_THREAD_COUNT(pEnv);
+    slapi_log_err(SLAPI_LOG_TRACE, "trickle_threadmain", "Leaving trickle_threadmain priv\n");
+    return 0;
+}
+
+
+
+/* Helper function for monitor stuff */
+int
+bdb_memp_stat(struct ldbminfo *li, DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp)
+{
+    DB_ENV *env = NULL;
+
+    PR_ASSERT(NULL != li);
+
+    dblayer_private *priv = li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+
+    env = ((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV;
+    PR_ASSERT(NULL != env);
+
+    return MEMP_STAT(env, gsp, fsp, 0, (void *)slapi_ch_malloc);
+}
+
+/* import wants this one */
+int
+bdb_memp_stat_instance(ldbm_instance *inst, DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp)
+{
+    DB_ENV *env = NULL;
+
+    PR_ASSERT(NULL != inst);
+
+    if (((bdb_db_env *)inst->inst_db)->bdb_DB_ENV) {
+        env = ((bdb_db_env *)inst->inst_db)->bdb_DB_ENV;
+    } else {
+        dblayer_private *priv = inst->inst_li->li_dblayer_private;
+        PR_ASSERT(NULL != priv);
+        env = ((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV;
+    }
+    PR_ASSERT(NULL != env);
+
+    return MEMP_STAT(env, gsp, fsp, 0, (void *)slapi_ch_malloc);
+}
+
+/* Helper functions for recovery */
+
+#define DB_LINE_LENGTH 80
+
+static int
+commit_good_database(bdb_config *conf, int mode)
+{
+    /* Write out the guard file */
+    char filename[MAXPATHLEN];
+    char line[DB_LINE_LENGTH * 2];
+    PRFileDesc *prfd;
+    int return_value = 0;
+    int num_bytes;
+
+    PR_snprintf(filename, sizeof(filename), "%s/guardian", conf->bdb_home_directory);
+
+    prfd = PR_Open(filename, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, mode);
+    if (NULL == prfd) {
+        slapi_log_err(SLAPI_LOG_CRIT, "commit_good_database", "Failed to write guardian file %s, database corruption possible" SLAPI_COMPONENT_NAME_NSPR " %d (%s)\n",
+                      filename, PR_GetError(), slapd_pr_strerror(PR_GetError()));
+        return -1;
+    }
+    PR_snprintf(line, sizeof(line), "cachesize:%lu\nncache:%d\nversion:%d\nlocks:%d\n",
+                (long unsigned int)conf->bdb_cachesize, conf->bdb_ncache, DB_VERSION_MAJOR, conf->bdb_lock_config);
+    num_bytes = strlen(line);
+    return_value = slapi_write_buffer(prfd, line, num_bytes);
+    if (return_value != num_bytes) {
+        goto error;
+    }
+    return_value = PR_Close(prfd);
+    if (PR_SUCCESS == return_value) {
+        return 0;
+    } else {
+        slapi_log_err(SLAPI_LOG_CRIT, "commit_good_database",
+                      "Failed to write guardian file, database corruption possible\n");
+        (void)PR_Delete(filename);
+        return -1;
+    }
+error:
+    (void)PR_Close(prfd);
+    (void)PR_Delete(filename);
+    return -1;
+}
+
+/* read the guardian file from db/ and possibly recover the database */
+static int
+read_metadata(struct ldbminfo *li)
+{
+    char filename[MAXPATHLEN];
+    char *buf;
+    char *thisline;
+    char *nextline;
+    char **dirp;
+    PRFileDesc *prfd;
+    PRFileInfo64 prfinfo;
+    int return_value = 0;
+    PRInt32 byte_count = 0;
+    char attribute[513];
+    char value[129], delimiter;
+    int number = 0;
+    bdb_config *conf = (bdb_config *)li->li_dblayer_config;
+    dblayer_private *priv = li->li_dblayer_private;
+
+    /* bdb_recovery_required is initialized in dblayer_init;
+     * and might be set 1 in check_db_version;
+     * we don't want to override it
+     * priv->bdb_recovery_required = 0; */
+    conf->bdb_previous_cachesize = 0;
+    conf->bdb_previous_ncache = 0;
+    conf->bdb_previous_lock_config = 0;
+    /* Open the guard file and read stuff, then delete it */
+    PR_snprintf(filename, sizeof(filename), "%s/guardian", conf->bdb_home_directory);
+
+    memset(&prfinfo, '\0', sizeof(PRFileInfo64));
+    (void)PR_GetFileInfo64(filename, &prfinfo);
+
+    prfd = PR_Open(filename, PR_RDONLY, priv->dblayer_file_mode);
+    if (NULL == prfd || 0 == prfinfo.size) {
+        /* file empty or not present--means the database needs recovered */
+        int count = 0;
+        for (dirp = conf->bdb_data_directories; dirp && *dirp; dirp++) {
+            count_dbfiles_in_dir(*dirp, &count, 1 /* recurse */);
+            if (count > 0) {
+                conf->bdb_recovery_required = 1;
+                return 0;
+            }
+        }
+        return 0; /* no files found; no need to run recover start */
+    }
+    /* So, we opened the file, now let's read the cache size and version stuff
+     */
+    buf = slapi_ch_calloc(1, prfinfo.size + 1);
+    byte_count = slapi_read_buffer(prfd, buf, prfinfo.size);
+    if (byte_count < 0) {
+        /* something bad happened while reading */
+        conf->bdb_recovery_required = 1;
+    } else {
+        buf[byte_count] = '\0';
+        thisline = buf;
+        while (1) {
+            /* Find the end of the line */
+            nextline = strchr(thisline, '\n');
+            if (NULL != nextline) {
+                *nextline++ = '\0';
+                while ('\n' == *nextline) {
+                    nextline++;
+                }
+            }
+            sscanf(thisline, "%512[a-z]%c%128s", attribute, &delimiter, value);
+            if (0 == strcmp("cachesize", attribute)) {
+                conf->bdb_previous_cachesize = strtoul(value, NULL, 10);
+            } else if (0 == strcmp("ncache", attribute)) {
+                number = atoi(value);
+                conf->bdb_previous_ncache = number;
+            } else if (0 == strcmp("version", attribute)) {
+            } else if (0 == strcmp("locks", attribute)) {
+                number = atoi(value);
+                conf->bdb_previous_lock_config = number;
+            }
+            if (NULL == nextline || '\0' == *nextline) {
+                /* Nothing more to read */
+                break;
+            }
+            thisline = nextline;
+        }
+    }
+    slapi_ch_free((void **)&buf);
+    (void)PR_Close(prfd);
+    return_value = PR_Delete(filename); /* very important that this happen ! */
+    if (PR_SUCCESS != return_value) {
+        slapi_log_err(SLAPI_LOG_CRIT,
+                      "read_metadata", "Failed to delete guardian file, "
+                                       "database corruption possible\n");
+    }
+    return return_value;
+}
+
+/* handy routine for checkpointing the db */
+static int
+dblayer_force_checkpoint(struct ldbminfo *li)
+{
+    int ret = 0, i;
+    dblayer_private *priv = li->li_dblayer_private;
+    bdb_db_env *pEnv;
+
+    if (NULL == priv || NULL == priv->dblayer_env) {
+        /* already terminated.  nothing to do */
+        return -1;
+    }
+
+    pEnv = (bdb_db_env *)priv->dblayer_env;
+
+    if (BDB_CONFIG(li)->bdb_enable_transactions) {
+
+        slapi_log_err(SLAPI_LOG_TRACE, "dblayer_force_checkpoint", "Checkpointing database ...\n");
+
+        /*
+     * DB workaround. Newly created environments do not know what the
+     * previous checkpoint LSN is. The default LSN of [0][0] would
+     * cause us to read all log files from very beginning during a
+     * later recovery. Taking two checkpoints solves the problem.
+     */
+
+        for (i = 0; i < 2; i++) {
+            ret = dblayer_txn_checkpoint(li, pEnv, PR_FALSE, PR_TRUE);
+            if (ret != 0) {
+                slapi_log_err(SLAPI_LOG_ERR, "dblayer_force_checkpoint", "Checkpoint FAILED, error %s (%d)\n",
+                              dblayer_strerror(ret), ret);
+                break;
+            }
+        }
+    }
+
+    return ret;
+}
+
+static int
+_dblayer_delete_aux_dir(struct ldbminfo *li, char *path)
+{
+    PRDir *dirhandle = NULL;
+    PRDirEntry *direntry = NULL;
+    char filename[MAXPATHLEN];
+    dblayer_private *priv = NULL;
+    bdb_db_env *pEnv = NULL;
+    int rc = -1;
+
+    if (NULL == li || NULL == path) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "_dblayer_delete_aux_dir", "Invalid LDBM info (0x%p) "
+                                                 "or path (0x%p)\n",
+                      li, path);
+        return rc;
+    }
+    priv = li->li_dblayer_private;
+    if (priv) {
+        pEnv = (bdb_db_env *)priv->dblayer_env;
+    }
+    dirhandle = PR_OpenDir(path);
+    if (!dirhandle) {
+        return 0; /* The dir does not exist. */
+    }
+    while (NULL != (direntry = PR_ReadDir(dirhandle,
+                                          PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+        if (!direntry->name)
+            break;
+        PR_snprintf(filename, sizeof(filename), "%s/%s", path, direntry->name);
+        if (pEnv &&
+            /* PL_strcmp takes NULL arg */
+            (PL_strcmp(LDBM_FILENAME_SUFFIX, strrchr(direntry->name, '.')) == 0)) {
+            rc = bdb_db_remove_ex(pEnv, filename, 0, PR_TRUE);
+        } else {
+            rc = ldbm_delete_dirs(filename);
+        }
+    }
+    PR_CloseDir(dirhandle);
+    PR_RmDir(path);
+    return rc;
+}
+
+/* TEL:  Added startdb flag.  If set (1), the DB environment will be started so
+ * that bdb_db_remove_ex will be used to remove the database files instead
+ * of simply deleting them.  That is important when doing a selective restoration
+ * of a single backend (FRI).  If not set (0), the traditional remove is used.
+ */
+static int
+_dblayer_delete_instance_dir(ldbm_instance *inst, int startdb)
+{
+    PRDir *dirhandle = NULL;
+    PRDirEntry *direntry = NULL;
+    char filename[MAXPATHLEN];
+    struct ldbminfo *li = inst->inst_li;
+    dblayer_private *priv = NULL;
+    bdb_db_env *pEnv = NULL;
+    char inst_dir[MAXPATHLEN];
+    char *inst_dirp = NULL;
+    int rval = 0;
+
+    if (NULL == li) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "_dblayer_delete_instance_dir", "NULL LDBM info\n");
+        rval = -1;
+        goto done;
+    }
+
+    if (startdb) {
+        /* close immediately; no need to run db threads */
+        rval = bdb_start(li, DBLAYER_NORMAL_MODE | DBLAYER_NO_DBTHREADS_MODE);
+        if (rval) {
+            slapi_log_err(SLAPI_LOG_ERR, "_dblayer_delete_instance_dir", "bdb_start failed! %s (%d)\n",
+                          dblayer_strerror(rval), rval);
+            goto done;
+        }
+    }
+
+    priv = li->li_dblayer_private;
+    if (NULL != priv) {
+        pEnv = (bdb_db_env *)priv->dblayer_env;
+    }
+
+    if (inst->inst_dir_name == NULL)
+        dblayer_get_instance_data_dir(inst->inst_be);
+
+    inst_dirp = dblayer_get_full_inst_dir(li, inst, inst_dir, MAXPATHLEN);
+    if (inst_dirp && *inst_dirp) {
+        dirhandle = PR_OpenDir(inst_dirp);
+    }
+    if (!dirhandle) {
+        if (PR_GetError() == PR_FILE_NOT_FOUND_ERROR) {
+            /* the directory does not exist... that's not an error */
+            rval = 0;
+            goto done;
+        }
+        if (inst_dirp && *inst_dirp) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "_dblayer_delete_instance_dir", "inst_dir is NULL\n");
+        } else {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "_dblayer_delete_instance_dir", "PR_OpenDir(%s) failed (%d): %s\n",
+                          inst_dirp, PR_GetError(), slapd_pr_strerror(PR_GetError()));
+        }
+        rval = -1;
+        goto done;
+    }
+
+    /*
+        Note the use of PR_Delete here as opposed to using
+        sleepycat to "remove" the file. Reason: One should
+        not expect logging to be able to recover the wholesale
+        removal of a complete directory... a directory that includes
+        files outside the scope of sleepycat's logging. rwagner
+
+        ADDITIONAL COMMENT:
+        libdb41 is more strict on the transaction log control.
+        Even if checkpoint is forced before this delete function,
+        no log regarding the file deleted found in the log file,
+        following checkpoint repeatedly complains with these error messages:
+        libdb: <path>/mail.db4: cannot sync: No such file or directory
+        libdb: txn_checkpoint: failed to flush the buffer cache
+                               No such file or directory
+    */
+
+    while (NULL != (direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT |
+                                                         PR_SKIP_DOT_DOT))) {
+        if (!direntry->name)
+            break;
+        PR_snprintf(filename, MAXPATHLEN, "%s/%s", inst_dirp, direntry->name);
+        if (pEnv &&
+            /* PL_strcmp takes NULL arg */
+            (PL_strcmp(LDBM_FILENAME_SUFFIX, strrchr(direntry->name, '.')) == 0)) {
+            rval = bdb_db_remove_ex(pEnv, filename, 0, PR_TRUE);
+        } else {
+            rval = ldbm_delete_dirs(filename);
+        }
+    }
+    PR_CloseDir(dirhandle);
+    if (pEnv && startdb) {
+        rval = dblayer_close(li, DBLAYER_NORMAL_MODE);
+        if (rval) {
+            slapi_log_err(SLAPI_LOG_ERR, "_dblayer_delete_instance_dir", "dblayer_close failed! %s (%d)\n",
+                          dblayer_strerror(rval), rval);
+        }
+    }
+done:
+    /* remove the directory itself too */
+    if (0 == rval)
+        PR_RmDir(inst_dirp);
+    if (inst_dirp != inst_dir)
+        slapi_ch_free_string(&inst_dirp);
+    return rval;
+}
+
+/* delete the db3 files in a specific backend instance --
+ * this is probably only used for import.
+ * assumption: dblayer is open, but the instance has been closed.
+ */
+int
+dblayer_delete_instance_dir(backend *be)
+{
+    struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
+    int ret = dblayer_force_checkpoint(li);
+
+    if (ret != 0) {
+        return ret;
+    } else {
+        ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
+        return _dblayer_delete_instance_dir(inst, 0);
+    }
+}
+
+
+static int
+bdb_delete_database_ex(struct ldbminfo *li, char *cldir)
+{
+    dblayer_private *priv = NULL;
+    Object *inst_obj;
+    PRDir *dirhandle = NULL;
+    PRDirEntry *direntry = NULL;
+    PRFileInfo64 fileinfo;
+    char filename[MAXPATHLEN];
+    char *log_dir;
+    int ret;
+
+    PR_ASSERT(NULL != li);
+    priv = (dblayer_private *)li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+
+    /* delete each instance */
+    for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+         inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+        ldbm_instance *inst = (ldbm_instance *)object_get_data(inst_obj);
+
+        if (inst->inst_be->be_instance_info != NULL) {
+            ret = _dblayer_delete_instance_dir(inst, 0 /* Do not start DB environment: traditional */);
+            if (ret != 0) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_delete_database_ex", "Failed (%d)\n", ret);
+                return ret;
+            }
+        }
+    }
+
+    /* changelog path is given; delete it, too. */
+    if (cldir) {
+        ret = _dblayer_delete_aux_dir(li, cldir);
+        if (ret) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_delete_database_ex", "Failed to delete \"%s\"\n",
+                          cldir);
+            return ret;
+        }
+    }
+
+    /* now smash everything else in the db/ dir */
+    if (BDB_CONFIG(li)->bdb_home_directory == NULL){
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_delete_database_ex",
+            "bdb_home_directory is NULL, can not proceed\n");
+        return -1;
+    }
+    dirhandle = PR_OpenDir(BDB_CONFIG(li)->bdb_home_directory);
+    if (!dirhandle) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_delete_database_ex", "PR_OpenDir (%s) failed (%d): %s\n",
+                      BDB_CONFIG(li)->bdb_home_directory,
+                      PR_GetError(), slapd_pr_strerror(PR_GetError()));
+        return -1;
+    }
+    while (NULL != (direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT |
+                                                         PR_SKIP_DOT_DOT))) {
+        int rval_tmp = 0;
+        if (!direntry->name)
+            break;
+
+        PR_snprintf(filename, MAXPATHLEN, "%s/%s", BDB_CONFIG(li)->bdb_home_directory,
+                    direntry->name);
+
+        /* Do not call PR_Delete on the instance directories if they exist.
+         * It would not work, but we still should not do it. */
+        rval_tmp = PR_GetFileInfo64(filename, &fileinfo);
+        if (rval_tmp == PR_SUCCESS && fileinfo.type != PR_FILE_DIRECTORY) {
+            /* Skip deleting log files; that should be handled below.
+             * (Note, we don't want to use "filename," because that is qualified and would
+             * not be compatibile with what dblayer_is_logfilename expects.) */
+            if (!dblayer_is_logfilename(direntry->name)) {
+                PR_Delete(filename);
+            }
+        }
+    }
+
+    PR_CloseDir(dirhandle);
+    /* remove transaction logs */
+    if ((NULL != BDB_CONFIG(li)->bdb_log_directory) &&
+        (0 != strlen(BDB_CONFIG(li)->bdb_log_directory))) {
+        log_dir = BDB_CONFIG(li)->bdb_log_directory;
+    } else {
+        log_dir = bdb_get_home_dir(li, NULL);
+    }
+    if (log_dir && *log_dir) {
+        ret = dblayer_delete_transaction_logs(log_dir);
+        if (ret) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_delete_database_ex", "dblayer_delete_transaction_logs failed (%d)\n", ret);
+            return -1;
+        }
+    }
+    return 0;
+}
+
+/* delete an entire db/ directory, including all instances under it!
+ * this is used mostly for restores.
+ * dblayer is assumed to be closed.
+ */
+int
+bdb_delete_db(struct ldbminfo *li)
+{
+    return bdb_delete_database_ex(li, NULL);
+}
+
+
+/*
+ * Return the size of the database (in kilobytes).  XXXggood returning
+ * the size in units of kb is really a hack, and is done because we
+ * didn't have NSPR support for 64-bit file offsets originally (now we do)
+ * Caveats:
+ * - We can still return incorrect results if an individual file is
+ *   larger than fit in a PRUint32.
+ * - PR_GetFileInfo64 doesn't do any special processing for symlinks,
+ *   nor does it inform us if the file is a symlink.  Nice.  So if
+ *   a file in the db directory is a symlink, the size we return
+ *   will probably be way too small.
+ */
+int
+dblayer_database_size(struct ldbminfo *li, unsigned int *size)
+{
+    bdb_config *priv = NULL;
+    int return_value = 0;
+    char filename[MAXPATHLEN];
+    PRDir *dirhandle = NULL;
+    unsigned int cumulative_size = 0;
+    unsigned int remainder = 0;
+    PRFileInfo64 info;
+
+    PR_ASSERT(NULL != li);
+    priv = (bdb_config *)li->li_dblayer_config;
+    PR_ASSERT(NULL != priv);
+
+    dirhandle = PR_OpenDir(priv->bdb_home_directory);
+    if (NULL != dirhandle) {
+        PRDirEntry *direntry = NULL;
+        while (NULL != (direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+            if (NULL == direntry->name) {
+                break;
+            }
+            PR_snprintf(filename, MAXPATHLEN, "%s/%s", priv->bdb_home_directory, direntry->name);
+            return_value = PR_GetFileInfo64(filename, &info);
+            if (PR_SUCCESS == return_value) {
+                cumulative_size += (info.size / 1024);
+                remainder += (info.size % 1024);
+            } else {
+                cumulative_size = (PRUint32)0;
+                return_value = -1;
+                break;
+            }
+        }
+        PR_CloseDir(dirhandle);
+    } else {
+        return_value = -1;
+    }
+
+    *size = cumulative_size + (remainder / 1024);
+    return return_value;
+}
+
+
+static int
+count_dbfiles_in_dir(char *directory, int *count, int recurse)
+{
+    /* The new recurse argument was added to help with multiple backend
+     * instances.  When recurse is true, this function will also look through
+     * the directories in the given directory for .db3 files. */
+    int return_value = 0;
+    PRDir *dirhandle = NULL;
+
+    if (!recurse) {
+        /* It is really the callers responsibility to set count to 0 before
+         * calling.  However, if recurse isn't true, we can make sure it is
+         * set to 0. */
+        *count = 0;
+    }
+    dirhandle = PR_OpenDir(directory);
+    if (NULL != dirhandle) {
+        PRDirEntry *direntry = NULL;
+        char *direntry_name;
+        PRFileInfo64 info;
+
+        while (NULL != (direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+            if (NULL == direntry->name) {
+                break;
+            }
+            direntry_name = PR_smprintf("%s/%s", directory, direntry->name);
+            if ((PR_GetFileInfo64(direntry_name, &info) == PR_SUCCESS) &&
+                (PR_FILE_DIRECTORY == info.type) && recurse) {
+                /* Recurse into this directory but not any further.  This is
+                 * because each instance gets its own directory, but in those
+                 * directories there should be only .db3 files.  There should
+                 * not be any more directories in an instance directory. */
+                count_dbfiles_in_dir(direntry_name, count, 0 /* don't recurse */);
+            }
+            if (direntry_name) {
+                PR_smprintf_free(direntry_name);
+            }
+            /* PL_strcmp takes NULL arg */
+            if (PL_strcmp(LDBM_FILENAME_SUFFIX, strrchr(direntry->name, '.')) == 0) {
+                (*count)++;
+            }
+        }
+        PR_CloseDir(dirhandle);
+    } else {
+        return_value = -1;
+    }
+
+    return return_value;
+}
+
+/* And finally... Tubular Bells.
+ * Well, no, actually backup and restore...
+ */
+
+/* Backup works like this:
+ * the slapd executable is run like for ldif2ldbm and so on.
+ * this means that the front-end gets the back-end loaded, and then calls
+ * into the back-end backup entry point. This then gets us down to here.
+ *
+ * So, we need to copy the data files to the backup point.
+ * While we are doing that, we need to make sure that the logfile
+ * truncator in slapd doesn't delete our files. To do this we need
+ * some way to signal to it that it should cease its work, or we need
+ * to do something like start a long-lived transaction so that the
+ * log files look like they're needed.
+ *
+ * When we've copied the data files, we can then copy the log files
+ * too.
+ *
+ * Finally, we tell the log file truncator to go back about its business in peace
+ *
+ */
+
+int
+dblayer_copyfile(char *source, char *destination, int overwrite __attribute__((unused)), int mode)
+{
+#ifdef DB_USE_64LFS
+#define OPEN_FUNCTION dblayer_open_large
+#else
+#define OPEN_FUNCTION open
+#endif
+    int source_fd = -1;
+    int dest_fd = -1;
+    char *buffer = NULL;
+    int return_value = -1;
+    int bytes_to_write = 0;
+
+    /* malloc the buffer */
+    buffer = slapi_ch_malloc(64 * 1024);
+    if (NULL == buffer) {
+        goto error;
+    }
+    /* Open source file */
+    source_fd = OPEN_FUNCTION(source, O_RDONLY, 0);
+    if (-1 == source_fd) {
+        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copyfile", "Failed to open source file %s by \"%s\"\n",
+                      source, strerror(errno));
+        goto error;
+    }
+    /* Open destination file */
+    dest_fd = OPEN_FUNCTION(destination, O_CREAT | O_WRONLY, mode);
+    if (-1 == dest_fd) {
+        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copyfile", "Failed to open dest file %s by \"%s\"\n",
+                      destination, strerror(errno));
+        goto error;
+    }
+    slapi_log_err(SLAPI_LOG_INFO,
+                  "dblayer_copyfile", "Copying %s to %s\n", source, destination);
+    /* Loop round reading data and writing it */
+    while (1) {
+        int i;
+        char *ptr = NULL;
+        return_value = read(source_fd, buffer, 64 * 1024);
+        if (return_value <= 0) {
+            /* means error or EOF */
+            if (return_value < 0) {
+                slapi_log_err(SLAPI_LOG_ERR, "dblayer_copyfile", "Failed to read by \"%s\": rval = %d\n",
+                              strerror(errno), return_value);
+            }
+            break;
+        }
+        bytes_to_write = return_value;
+        ptr = buffer;
+#define CPRETRY 4
+        for (i = 0; i < CPRETRY; i++) { /* retry twice */
+            return_value = write(dest_fd, ptr, bytes_to_write);
+            if (return_value == bytes_to_write) {
+                break;
+            } else {
+                /* means error */
+                slapi_log_err(SLAPI_LOG_ERR, "dblayer_copyfile", "Failed to write by \"%s\"; real: %d bytes, exp: %d bytes\n",
+                              strerror(errno), return_value, bytes_to_write);
+                if (return_value > 0) {
+                    bytes_to_write -= return_value;
+                    ptr += return_value;
+                    slapi_log_err(SLAPI_LOG_NOTICE, "dblayer_copyfile", "Retrying to write %d bytes\n", bytes_to_write);
+                } else {
+                    break;
+                }
+            }
+        }
+        if ((CPRETRY == i) || (return_value < 0)) {
+            return_value = -1;
+            break;
+        }
+    }
+error:
+    if (source_fd != -1) {
+        close(source_fd);
+    }
+    if (dest_fd != -1) {
+        close(dest_fd);
+    }
+    slapi_ch_free((void **)&buffer);
+    return return_value;
+}
+
+/*
+ * Copies all the .db# files in instance_dir to a directory with the same name
+ * in destination_dir.  Both instance_dir and destination_dir are absolute
+ * paths.
+ * (#604921: added indexonly flag for the use in convindices
+ *           -- backup/restore indices)
+ *
+ * If the argument restore is true,
+ *        logging messages will be about "Restoring" files.
+ * If the argument restore is false,
+ *        logging messages will be about "Backing up" files.
+ * The argument cnt is used to count the number of files that were copied.
+ *
+ * This function is used during db2bak and bak2db.
+ */
+int
+bdb_copy_directory(struct ldbminfo *li,
+                       Slapi_Task *task,
+                       char *src_dir,
+                       char *dest_dir,
+                       int restore,
+                       int *cnt,
+                       int indexonly,
+                       int is_changelog)
+{
+    dblayer_private *priv = NULL;
+    char *new_src_dir = NULL;
+    char *new_dest_dir = NULL;
+    PRDir *dirhandle = NULL;
+    PRDirEntry *direntry = NULL;
+    char *compare_piece = NULL;
+    char *filename1;
+    char *filename2;
+    int return_value = -1;
+    char *relative_instance_name = NULL;
+    char *inst_dirp = NULL;
+    char inst_dir[MAXPATHLEN];
+    char sep;
+    int src_is_fullpath = 0;
+    ldbm_instance *inst = NULL;
+
+    if (!src_dir || '\0' == *src_dir) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_copy_directory", "src_dir is empty\n");
+        return return_value;
+    }
+    if (!dest_dir || '\0' == *dest_dir) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_copy_directory", "dest_dir is empty\n");
+        return return_value;
+    }
+
+    priv = li->li_dblayer_private;
+
+    /* get the backend instance name */
+    sep = get_sep(src_dir);
+    if ((relative_instance_name = strrchr(src_dir, sep)) == NULL)
+        relative_instance_name = src_dir;
+    else
+        relative_instance_name++;
+
+    if (is_fullpath(src_dir)) {
+        src_is_fullpath = 1;
+    }
+    if (is_changelog) {
+        if (!src_is_fullpath) {
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_copy_directory", "Changelogdir \"%s\" is not full path; "
+                                                                   "Skipping it.\n",
+                          src_dir);
+            return 0;
+        }
+    } else {
+        inst = ldbm_instance_find_by_name(li, relative_instance_name);
+        if (NULL == inst) {
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_copy_directory", "Backend instance \"%s\" does not exist; "
+                                                                   "Instance path %s could be invalid.\n",
+                          relative_instance_name, src_dir);
+            return return_value;
+        }
+    }
+
+    if (src_is_fullpath) {
+        new_src_dir = src_dir;
+    } else {
+        int len;
+
+        inst_dirp = dblayer_get_full_inst_dir(inst->inst_li, inst,
+                                              inst_dir, MAXPATHLEN);
+        if (!inst_dirp || !*inst_dirp) {
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_copy_directory", "Instance dir is NULL.\n");
+            if (inst_dirp != inst_dir) {
+                slapi_ch_free_string(&inst_dirp);
+            }
+            return return_value;
+        }
+        len = strlen(inst_dirp);
+        sep = get_sep(inst_dirp);
+        if (*(inst_dirp + len - 1) == sep)
+            sep = '\0';
+        new_src_dir = inst_dirp;
+    }
+
+    dirhandle = PR_OpenDir(new_src_dir);
+    if (NULL == dirhandle) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_copy_directory", "Failed to open dir %s\n",
+                      new_src_dir);
+
+        return return_value;
+    }
+
+    while (NULL != (direntry =
+                        PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+        if (NULL == direntry->name) {
+            /* NSPR doesn't behave like the docs say it should */
+            break;
+        }
+        if (indexonly &&
+            0 == strcmp(direntry->name, ID2ENTRY LDBM_FILENAME_SUFFIX)) {
+            continue;
+        }
+
+        compare_piece = PL_strrchr((char *)direntry->name, '.');
+        if (NULL == compare_piece) {
+            compare_piece = (char *)direntry->name;
+        }
+        /* rename .db3 -> .db4 or .db4 -> .db */
+        if (0 == strcmp(compare_piece, LDBM_FILENAME_SUFFIX) ||
+            0 == strcmp(compare_piece, LDBM_SUFFIX_OLD) ||
+            0 == strcmp(direntry->name, DBVERSION_FILENAME)) {
+            /* Found a database file.  Copy it. */
+
+            if (NULL == new_dest_dir) {
+                /* Need to create the new directory where the files will be
+                 * copied to. */
+                PRFileInfo64 info;
+                char *prefix = "";
+                char mysep = 0;
+
+                if (!is_fullpath(dest_dir)) {
+                    prefix = bdb_get_home_dir(li, NULL);
+                    if (!prefix || !*prefix) {
+                        continue;
+                    }
+                    mysep = get_sep(prefix);
+                }
+
+                if (mysep)
+                    new_dest_dir = slapi_ch_smprintf("%s%c%s%c%s",
+                                                     prefix, mysep, dest_dir, mysep, relative_instance_name);
+                else
+                    new_dest_dir = slapi_ch_smprintf("%s/%s",
+                                                     dest_dir, relative_instance_name);
+                /* } */
+                if (PR_SUCCESS == PR_GetFileInfo64(new_dest_dir, &info)) {
+                    ldbm_delete_dirs(new_dest_dir);
+                }
+                if (mkdir_p(new_dest_dir, 0700) != PR_SUCCESS) {
+                    slapi_log_err(SLAPI_LOG_ERR, "bdb_copy_directory", "Can't create new directory %s, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)\n",
+                                  new_dest_dir, PR_GetError(),
+                                  slapd_pr_strerror(PR_GetError()));
+                    goto out;
+                }
+            }
+
+            filename1 = slapi_ch_smprintf("%s/%s", new_src_dir, direntry->name);
+            filename2 = slapi_ch_smprintf("%s/%s", new_dest_dir, direntry->name);
+
+            if (restore) {
+                slapi_log_err(SLAPI_LOG_INFO, "bdb_copy_directory", "Restoring file %d (%s)\n",
+                              *cnt, filename2);
+                if (task) {
+                    slapi_task_log_notice(task,
+                                          "Restoring file %d (%s)", *cnt, filename2);
+                    slapi_task_log_status(task,
+                                          "Restoring file %d (%s)", *cnt, filename2);
+                }
+            } else {
+                slapi_log_err(SLAPI_LOG_INFO, "bdb_copy_directory", "Backing up file %d (%s)\n",
+                              *cnt, filename2);
+                if (task) {
+                    slapi_task_log_notice(task,
+                                          "Backing up file %d (%s)", *cnt, filename2);
+                    slapi_task_log_status(task,
+                                          "Backing up file %d (%s)", *cnt, filename2);
+                }
+            }
+
+            /* copy filename1 to filename2 */
+            /* PL_strcmp takes NULL arg */
+            return_value = dblayer_copyfile(filename1, filename2,
+                                                0, priv->dblayer_file_mode);
+            if (return_value < 0) {
+                slapi_log_err(SLAPI_LOG_ERR, "bdb_copy_directory", "Failed to copy file %s to %s\n",
+                              filename1, filename2);
+                slapi_ch_free((void **)&filename1);
+                slapi_ch_free((void **)&filename2);
+                break;
+            }
+            slapi_ch_free((void **)&filename1);
+            slapi_ch_free((void **)&filename2);
+
+            (*cnt)++;
+        }
+    }
+out:
+    PR_CloseDir(dirhandle);
+    slapi_ch_free_string(&new_dest_dir);
+    if ((new_src_dir != src_dir) && (new_src_dir != inst_dir)) {
+        slapi_ch_free_string(&new_src_dir);
+    }
+    return return_value;
+}
+
+/*
+ * Get changelogdir from cn=changelog5,cn=config
+ * The value does not have trailing spaces nor slashes.
+ * The changelogdir value must be a fullpath.
+ */
+static int
+_dblayer_get_changelogdir(struct ldbminfo *li, char **changelogdir)
+{
+    Slapi_PBlock *pb = NULL;
+    Slapi_Entry **entries = NULL;
+    Slapi_Attr *attr = NULL;
+    Slapi_Value *v = NULL;
+    const char *s = NULL;
+    char *attrs[2];
+    int rc = -1;
+
+    if (NULL == li || NULL == changelogdir) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "_dblayer_get_changelogdir", "Invalid arg: "
+                                                   "li: 0x%p, changelogdir: 0x%p\n",
+                      li, changelogdir);
+        return rc;
+    }
+    *changelogdir = NULL;
+
+    pb = slapi_pblock_new();
+    attrs[0] = CHANGELOGDIRATTR;
+    attrs[1] = NULL;
+    slapi_search_internal_set_pb(pb, CHANGELOGENTRY,
+                                 LDAP_SCOPE_BASE, "cn=*", attrs, 0, NULL, NULL,
+                                 li->li_identity, 0);
+    slapi_search_internal_pb(pb);
+    slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_RESULT, &rc);
+
+    if (LDAP_NO_SUCH_OBJECT == rc) {
+        /* No changelog; Most likely standalone or not a master. */
+        rc = LDAP_SUCCESS;
+        goto bail;
+    }
+    if (LDAP_SUCCESS != rc) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "_dblayer_get_changelogdir", "Failed to search \"%s\"\n", CHANGELOGENTRY);
+        goto bail;
+    }
+    /* rc == LDAP_SUCCESS */
+    slapi_pblock_get(pb, SLAPI_PLUGIN_INTOP_SEARCH_ENTRIES, &entries);
+    if (NULL == entries) {
+        /* No changelog */
+        goto bail;
+    }
+    /* There should be only one entry. */
+    rc = slapi_entry_attr_find(entries[0], CHANGELOGDIRATTR, &attr);
+    if (rc || NULL == attr) {
+        /* No changelog dir */
+        rc = LDAP_SUCCESS;
+        goto bail;
+    }
+    rc = slapi_attr_first_value(attr, &v);
+    if (rc || NULL == v) {
+        /* No changelog dir */
+        rc = LDAP_SUCCESS;
+        goto bail;
+    }
+    rc = LDAP_SUCCESS;
+    s = slapi_value_get_string(v);
+    if (NULL == s) {
+        /* No changelog dir */
+        goto bail;
+    }
+    *changelogdir = slapi_ch_strdup(s);
+    /* Remove trailing spaces and '/' if any */
+    normalize_dir(*changelogdir);
+bail:
+    slapi_free_search_results_internal(pb);
+    slapi_pblock_destroy(pb);
+    return rc;
+}
+
+/* Destination Directory is an absolute pathname */
+int
+bdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task)
+{
+    dblayer_private *priv = NULL;
+    bdb_config *conf = NULL;
+    char **listA = NULL, **listB = NULL, **listi, **listj, *prefix;
+    char *home_dir = NULL;
+    int return_value = -1;
+    char *pathname1;
+    char *pathname2;
+    back_txn txn;
+    int cnt = 1, ok = 0;
+    Object *inst_obj;
+    char inst_dir[MAXPATHLEN];
+    char *inst_dirp = NULL;
+    char *changelogdir = NULL;
+
+    PR_ASSERT(NULL != li);
+    conf = (bdb_config *)li->li_dblayer_config;
+    priv = li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+    home_dir = bdb_get_home_dir(li, NULL);
+    if (NULL == home_dir || '\0' == *home_dir) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_backup", "Missing db home directory info\n");
+        return return_value;
+    }
+
+    /*
+     * What are we doing here ?
+     * We want to copy into the backup directory:
+     * All the backend instance dir / database files;
+     * All the logfiles
+     * The version file
+     */
+
+    /* changed in may 1999 for political correctness.
+     * 1. take checkpoint
+     * 2. open transaction
+     * 3. get list of logfiles (A)
+     * 4. copy the db# files
+     * 5. get list of logfiles (B)
+     * 6. if !(A in B), goto 3
+     *    (logfiles were flushed during our backup)
+     * 7. copy logfiles from list B
+     * 8. abort transaction
+     * 9. backup index config info
+     */
+
+    /* Order of checkpointing and txn creation reversed to work
+     * around DB problem. If we don't do it this way around DB
+     * thinks all old transaction logs are required for recovery
+     * when the DB environment has been newly created (such as
+     * after an import).
+     */
+
+    /* do a quick checkpoint */
+    dblayer_force_checkpoint(li);
+    dblayer_txn_init(li, &txn);
+    return_value = dblayer_txn_begin_all(li, NULL, &txn);
+    if (return_value) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_backup", "Transaction error\n");
+        return return_value;
+    }
+
+    if (g_get_shutdown() || c_get_shutdown()) {
+        slapi_log_err(SLAPI_LOG_WARNING, "dblayer_backup", "Server shutting down, backup aborted\n");
+        return_value = -1;
+        goto bail;
+    }
+
+    /* repeat this until the logfile sets match... */
+    do {
+        /* get the list of logfiles currently existing */
+        if (conf->bdb_enable_transactions) {
+            return_value = LOG_ARCHIVE(((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV,
+                                       &listA, DB_ARCH_LOG, (void *)slapi_ch_malloc);
+            if (return_value || (listA == NULL)) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "dblayer_backup", "Log archive error\n");
+                if (task) {
+                    slapi_task_log_notice(task, "Backup: log archive error\n");
+                }
+                return_value = -1;
+                goto bail;
+            }
+        } else {
+            ok = 1;
+        }
+        if (g_get_shutdown() || c_get_shutdown()) {
+            slapi_log_err(SLAPI_LOG_ERR, "dblayer_backup", "Server shutting down, backup aborted\n");
+            return_value = -1;
+            goto bail;
+        }
+
+        for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+             inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+            ldbm_instance *inst = (ldbm_instance *)object_get_data(inst_obj);
+            inst_dirp = dblayer_get_full_inst_dir(inst->inst_li, inst,
+                                                  inst_dir, MAXPATHLEN);
+            if ((NULL == inst_dirp) || ('\0' == *inst_dirp)) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "dblayer_backup", "Instance dir is empty\n");
+                if (task) {
+                    slapi_task_log_notice(task,
+                                          "Backup: Instance dir is empty\n");
+                }
+                if (inst_dirp != inst_dir) {
+                    slapi_ch_free_string(&inst_dirp);
+                }
+                return_value = -1;
+                goto bail;
+            }
+            return_value = bdb_copy_directory(li, task, inst_dirp,
+                                                  dest_dir, 0 /* backup */,
+                                                  &cnt, 0, 0);
+            if (return_value) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "dblayer_backup", "Error in copying directory "
+                                                "(%s -> %s): err=%d\n",
+                              inst_dirp, dest_dir, return_value);
+                if (task) {
+                    slapi_task_log_notice(task,
+                                          "Backup: error in copying directory "
+                                          "(%s -> %s): err=%d\n",
+                                          inst_dirp, dest_dir, return_value);
+                }
+                if (inst_dirp != inst_dir) {
+                    slapi_ch_free_string(&inst_dirp);
+                }
+                goto bail;
+            }
+            if (inst_dirp != inst_dir)
+                slapi_ch_free_string(&inst_dirp);
+        }
+        /* Get changelogdir, if any */
+        _dblayer_get_changelogdir(li, &changelogdir);
+        if (changelogdir) {
+            /* dest dir for changelog: dest_dir/repl_changelog_backup  */
+            char *changelog_destdir = slapi_ch_smprintf("%s/%s",
+                                                        dest_dir, CHANGELOG_BACKUPDIR);
+            return_value = bdb_copy_directory(li, task, changelogdir,
+                                                  changelog_destdir,
+                                                  0 /* backup */,
+                                                  &cnt, 0, 1);
+            if (return_value) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "dblayer_backup", "Error in copying directory "
+                                                "(%s -> %s): err=%d\n",
+                              changelogdir, changelog_destdir, return_value);
+                if (task) {
+                    slapi_task_log_notice(task,
+                                          "Backup: error in copying directory "
+                                          "(%s -> %s): err=%d\n",
+                                          changelogdir, changelog_destdir, return_value);
+                }
+                slapi_ch_free_string(&changelog_destdir);
+                goto bail;
+            }
+            /* Copy DBVERSION */
+            pathname1 = slapi_ch_smprintf("%s/%s",
+                                          changelogdir, DBVERSION_FILENAME);
+            pathname2 = slapi_ch_smprintf("%s/%s",
+                                          changelog_destdir, DBVERSION_FILENAME);
+            return_value = dblayer_copyfile(pathname1, pathname2,
+                                            0, priv->dblayer_file_mode);
+            slapi_ch_free_string(&pathname2);
+            slapi_ch_free_string(&changelog_destdir);
+            if (0 > return_value) {
+                slapi_log_err(SLAPI_LOG_ERR, "dblayer_backup", "Failed to copy file %s\n", pathname1);
+                slapi_ch_free_string(&pathname1);
+                goto bail;
+            }
+            slapi_ch_free_string(&pathname1);
+        }
+        if (conf->bdb_enable_transactions) {
+            /* now, get the list of logfiles that still exist */
+            return_value = LOG_ARCHIVE(((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV,
+                                       &listB, DB_ARCH_LOG, (void *)slapi_ch_malloc);
+            if (return_value || (listB == NULL)) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "dblayer_backup", "Can't get list of logs\n");
+                goto bail;
+            }
+
+            /* compare: make sure everything in list A is still in list B */
+            ok = 1;
+            for (listi = listA; listi && *listi && ok; listi++) {
+                int found = 0;
+                for (listj = listB; listj && *listj && !found; listj++) {
+                    if (strcmp(*listi, *listj) == 0) {
+                        found = 1;
+                        break;
+                    }
+                }
+                if (!found) {
+                    ok = 0; /* missing log: start over */
+                    slapi_log_err(SLAPI_LOG_WARNING,
+                                  "dblayer_backup", "Log %s has been swiped "
+                                                    "out from under me! (retrying)\n",
+                                  *listi);
+                    if (task) {
+                        slapi_task_log_notice(task,
+                                              "WARNING: Log %s has been swiped out from under me! "
+                                              "(retrying)",
+                                              *listi);
+                    }
+                }
+            }
+
+            if (g_get_shutdown() || c_get_shutdown()) {
+                slapi_log_err(SLAPI_LOG_ERR, "dblayer_backup", "Server shutting down, backup aborted\n");
+                return_value = -1;
+                goto bail;
+            }
+
+            if (ok) {
+                size_t p1len, p2len;
+                char **listptr;
+
+                prefix = NULL;
+                if ((NULL != conf->bdb_log_directory) &&
+                    (0 != strlen(conf->bdb_log_directory))) {
+                    prefix = conf->bdb_log_directory;
+                } else {
+                    prefix = home_dir;
+                }
+                /* log files have the same filename len(100 is a safety net:) */
+                p1len = strlen(prefix) + strlen(*listB) + 100;
+                pathname1 = (char *)slapi_ch_malloc(p1len);
+                p2len = strlen(dest_dir) + strlen(*listB) + 100;
+                pathname2 = (char *)slapi_ch_malloc(p2len);
+                /* We copy those over */
+                for (listptr = listB; listptr && *listptr && ok; ++listptr) {
+                    PR_snprintf(pathname1, p1len, "%s/%s", prefix, *listptr);
+                    PR_snprintf(pathname2, p2len, "%s/%s", dest_dir, *listptr);
+                    slapi_log_err(SLAPI_LOG_INFO, "dblayer_backup", "Backing up file %d (%s)\n",
+                                  cnt, pathname2);
+                    if (task) {
+                        slapi_task_log_notice(task,
+                                              "Backing up file %d (%s)", cnt, pathname2);
+                        slapi_task_log_status(task,
+                                              "Backing up file %d (%s)", cnt, pathname2);
+                    }
+                    return_value = dblayer_copyfile(pathname1, pathname2,
+                                                    0, priv->dblayer_file_mode);
+                    if (0 > return_value) {
+                        slapi_log_err(SLAPI_LOG_ERR, "dblayer_backup", "Error in copying file '%s' (err=%d)\n",
+                                      pathname1, return_value);
+                        if (task) {
+                            slapi_task_log_notice(task, "Error copying file '%s' (err=%d)",
+                                                  pathname1, return_value);
+                        }
+                        slapi_ch_free((void **)&pathname1);
+                        slapi_ch_free((void **)&pathname2);
+                        goto bail;
+                    }
+                    if (g_get_shutdown() || c_get_shutdown()) {
+                        slapi_log_err(SLAPI_LOG_ERR, "dblayer_backup", "Server shutting down, backup aborted\n");
+                        return_value = -1;
+                        slapi_ch_free((void **)&pathname1);
+                        slapi_ch_free((void **)&pathname2);
+                        goto bail;
+                    }
+                    cnt++;
+                }
+                slapi_ch_free((void **)&pathname1);
+                slapi_ch_free((void **)&pathname2);
+            }
+
+            slapi_ch_free((void **)&listA);
+            slapi_ch_free((void **)&listB);
+        }
+    } while (!ok);
+
+    /* now copy the version file */
+    pathname1 = slapi_ch_smprintf("%s/%s", home_dir, DBVERSION_FILENAME);
+    pathname2 = slapi_ch_smprintf("%s/%s", dest_dir, DBVERSION_FILENAME);
+    slapi_log_err(SLAPI_LOG_INFO, "dblayer_backup", "Backing up file %d (%s)\n", cnt, pathname2);
+    if (task) {
+        slapi_task_log_notice(task, "Backing up file %d (%s)", cnt, pathname2);
+        slapi_task_log_status(task, "Backing up file %d (%s)", cnt, pathname2);
+    }
+    return_value = dblayer_copyfile(pathname1, pathname2, 0, priv->dblayer_file_mode);
+    if (0 > return_value) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_backup", "Error in copying version file "
+                                        "(%s -> %s): err=%d\n",
+                      pathname1, pathname2, return_value);
+        if (task) {
+            slapi_task_log_notice(task,
+                                  "Backup: error in copying version file "
+                                  "(%s -> %s): err=%d\n",
+                                  pathname1, pathname2, return_value);
+        }
+    }
+    slapi_ch_free((void **)&pathname1);
+    slapi_ch_free((void **)&pathname2);
+
+    /* Lastly we tell log file truncation to start again */
+
+    if (0 == return_value) /* if everything went well, backup the index conf */
+        return_value = dse_conf_backup(li, dest_dir);
+bail:
+    slapi_ch_free((void **)&listA);
+    slapi_ch_free((void **)&listB);
+    dblayer_txn_abort_all(li, &txn);
+    slapi_ch_free_string(&changelogdir);
+    return return_value;
+}
+
+
+/*
+ * Restore is pretty easy.
+ * We delete the current database.
+ * We then copy all the files over from the backup point.
+ * We then leave them there for the slapd process to pick up and do the recovery
+ * (which it will do as it sees no guard file).
+ */
+
+/* Helper function first */
+
+static int
+dblayer_is_logfilename(const char *path)
+{
+    int ret = 0;
+    /* Is the filename at least 4 characters long ? */
+    if (strlen(path) < 4) {
+        return 0; /* Not a log file then */
+    }
+    /* Are the first 4 characters "log." ? */
+    ret = strncmp(path, "log.", 4);
+    if (0 == ret) {
+        /* Now, are the last 4 characters _not_ .db# ? */
+        const char *piece = path + (strlen(path) - 4);
+        ret = strcmp(piece, LDBM_FILENAME_SUFFIX);
+        if (0 != ret) {
+            /* Is */
+            return 1;
+        }
+    }
+    return 0; /* Is not */
+}
+
+/* remove log.xxx from log directory*/
+static int
+dblayer_delete_transaction_logs(const char *log_dir)
+{
+    int rc = 0;
+    char filename1[MAXPATHLEN];
+    PRDir *dirhandle = NULL;
+    dirhandle = PR_OpenDir(log_dir);
+    if (NULL != dirhandle) {
+        PRDirEntry *direntry = NULL;
+        int is_a_logfile = 0;
+        int pre = 0;
+        PRFileInfo64 info;
+
+        while (NULL != (direntry =
+                            PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+            if (NULL == direntry->name) {
+                /* NSPR doesn't behave like the docs say it should */
+                slapi_log_err(SLAPI_LOG_ERR, "dblayer_delete_transaction_logs", "PR_ReadDir failed (%d): %s\n",
+                              PR_GetError(), slapd_pr_strerror(PR_GetError()));
+                break;
+            }
+            PR_snprintf(filename1, MAXPATHLEN, "%s/%s", log_dir, direntry->name);
+            pre = PR_GetFileInfo64(filename1, &info);
+            if (pre == PR_SUCCESS && PR_FILE_DIRECTORY == info.type) {
+                continue;
+            }
+            is_a_logfile = dblayer_is_logfilename(direntry->name);
+            if (is_a_logfile && (NULL != log_dir) && (0 != strlen(log_dir))) {
+                slapi_log_err(SLAPI_LOG_INFO, "dblayer_delete_transaction_logs", "Deleting log file: (%s)\n",
+                              filename1);
+                unlink(filename1);
+            }
+        }
+        PR_CloseDir(dirhandle);
+    } else if (PR_FILE_NOT_FOUND_ERROR != PR_GetError()) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_delete_transaction_logs", "PR_OpenDir(%s) failed (%d): %s\n",
+                      log_dir, PR_GetError(), slapd_pr_strerror(PR_GetError()));
+        rc = 1;
+    }
+    return rc;
+}
+
+const char *skip_list[] =
+    {
+        ".ldif",
+        NULL};
+
+static int
+doskip(const char *filename)
+{
+    const char **p;
+    int len = strlen(filename);
+
+    for (p = skip_list; p && *p; p++) {
+        int n = strlen(*p);
+        if (0 == strncmp(filename + len - n, *p, n))
+            return 1;
+    }
+    return 0;
+}
+
+int
+bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task)
+{
+    bdb_config *conf = NULL;
+    dblayer_private *priv = NULL;
+    int return_value = 0;
+    int tmp_rval;
+    char filename1[MAXPATHLEN];
+    char filename2[MAXPATHLEN];
+    PRDir *dirhandle = NULL;
+    PRDirEntry *direntry = NULL;
+    PRFileInfo64 info;
+    ldbm_instance *inst = NULL;
+    int seen_logfiles = 0; /* Tells us if we restored any logfiles */
+    int is_a_logfile = 0;
+    int dbmode;
+    int action = 0;
+    char *home_dir = NULL;
+    char *real_src_dir = NULL;
+    struct stat sbuf;
+    char *changelogdir = NULL;
+    char *restore_dir = NULL;
+    char *prefix = NULL;
+    int cnt = 1;
+
+    PR_ASSERT(NULL != li);
+    conf = (bdb_config *)li->li_dblayer_config;
+    priv = li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+
+    /* DBDB this is a hack, take out later */
+    PR_Lock(li->li_config_mutex);
+    /* bdb_home_directory is freed in bdb_post_close.
+     * li_directory needs to live beyond dblayer. */
+    slapi_ch_free_string(&conf->bdb_home_directory);
+    conf->bdb_home_directory = slapi_ch_strdup(li->li_directory);
+    conf->bdb_cachesize = li->li_dbcachesize;
+    conf->bdb_lock_config = li->li_dblock;
+    conf->bdb_ncache = li->li_dbncache;
+    priv->dblayer_file_mode = li->li_mode;
+    PR_Unlock(li->li_config_mutex);
+
+    home_dir = bdb_get_home_dir(li, NULL);
+
+    if (NULL == home_dir || '\0' == *home_dir) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_restore",
+                      "Missing db home directory info\n");
+        return -1;
+    }
+
+    /* We find out if slapd is running */
+    /* If it is, we fail */
+    /* We check on the source staging area, no point in going further if it
+     * isn't there */
+    if (stat(src_dir, &sbuf) < 0) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_restore", "Backup directory %s does not "
+                                                        "exist.\n",
+                      src_dir);
+        if (task) {
+            slapi_task_log_notice(task, "Restore: backup directory %s does not "
+                                        "exist.\n",
+                                  src_dir);
+        }
+        return LDAP_UNWILLING_TO_PERFORM;
+    } else if (!S_ISDIR(sbuf.st_mode)) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_restore", "Backup directory %s is not "
+                                                        "a directory.\n",
+                      src_dir);
+        if (task) {
+            slapi_task_log_notice(task, "Restore: backup directory %s is not "
+                                        "a directory.\n",
+                                  src_dir);
+        }
+        return LDAP_UNWILLING_TO_PERFORM;
+    }
+    if (!bdb_version_exists(li, src_dir)) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_restore", "Backup directory %s does not "
+                                                        "contain a complete backup\n",
+                      src_dir);
+        if (task) {
+            slapi_task_log_notice(task, "Restore: backup directory %s does not "
+                                        "contain a complete backup",
+                                  src_dir);
+        }
+        return LDAP_UNWILLING_TO_PERFORM;
+    }
+
+    /*
+     * Check if the target is a superset of the backup.
+     * If not don't restore any db at all, otherwise
+     * the target will be crippled.
+     */
+    dirhandle = PR_OpenDir(src_dir);
+    if (NULL != dirhandle) {
+        while ((direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT)) && direntry->name) {
+            PR_snprintf(filename1, sizeof(filename1), "%s/%s",
+                        src_dir, direntry->name);
+            {
+                tmp_rval = PR_GetFileInfo64(filename1, &info);
+                if (tmp_rval == PR_SUCCESS && PR_FILE_DIRECTORY == info.type) {
+                    /* Is it CHANGELOG_BACKUPDIR? */
+                    if (0 == strcmp(CHANGELOG_BACKUPDIR, direntry->name)) {
+                        /* Yes, this is a changelog backup. */
+                        /* Get the changelog path */
+                        _dblayer_get_changelogdir(li, &changelogdir);
+                        continue;
+                    }
+                    inst = ldbm_instance_find_by_name(li, (char *)direntry->name);
+                    if (inst == NULL) {
+                        slapi_log_err(SLAPI_LOG_ERR,
+                                      "bdb_restore", "Target server has no %s configured\n",
+                                      direntry->name);
+                        if (task) {
+                            slapi_task_log_notice(task,
+                                                  "bdb_restore - Target server has no %s configured\n",
+                                                  direntry->name);
+                        }
+                        PR_CloseDir(dirhandle);
+                        return_value = LDAP_UNWILLING_TO_PERFORM;
+                        goto error_out;
+                    }
+
+                    if (slapd_comp_path(src_dir, inst->inst_parent_dir_name) == 0) {
+                        slapi_log_err(SLAPI_LOG_ERR,
+                                      "bdb_restore", "Backup dir %s and target dir %s are identical\n",
+                                      src_dir, inst->inst_parent_dir_name);
+                        if (task) {
+                            slapi_task_log_notice(task,
+                                                  "Restore: backup dir %s and target dir %s are identical\n",
+                                                  src_dir, inst->inst_parent_dir_name);
+                        }
+                        PR_CloseDir(dirhandle);
+                        return_value = LDAP_UNWILLING_TO_PERFORM;
+                        goto error_out;
+                    }
+                }
+            }
+        }
+        PR_CloseDir(dirhandle);
+    }
+
+    /* We delete the existing database */
+    /* changelogdir is taken care only when it's not NULL. */
+    return_value = bdb_delete_database_ex(li, changelogdir);
+    if (return_value) {
+        goto error_out;
+    }
+
+    {
+        /* Otherwise use the src_dir from the caller */
+        real_src_dir = src_dir;
+    }
+
+    /* We copy the files over from the staging area */
+    /* We want to treat the logfiles specially: if there's
+     * a log file directory configured, copy the logfiles there
+     * rather than to the db dirctory */
+    dirhandle = PR_OpenDir(real_src_dir);
+    if (NULL == dirhandle) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_restore", "Failed to open the directory \"%s\"\n", real_src_dir);
+        if (task) {
+            slapi_task_log_notice(task,
+                                  "Restore: failed to open the directory \"%s\"\n", real_src_dir);
+        }
+        return_value = -1;
+        goto error_out;
+    }
+
+    while (NULL !=
+           (direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+        if (NULL == direntry->name) {
+            /* NSPR doesn't behave like the docs say it should */
+            break;
+        }
+
+        /* Is this entry a directory? */
+        PR_snprintf(filename1, sizeof(filename1), "%s/%s",
+                    real_src_dir, direntry->name);
+        tmp_rval = PR_GetFileInfo64(filename1, &info);
+        if (tmp_rval == PR_SUCCESS && PR_FILE_DIRECTORY == info.type) {
+            /* This is an instance directory. It contains the *.db#
+             * files for the backend instance.
+             * restore directory is supposed to be where the backend
+             * directory is located.
+             */
+            if (0 == strcmp(CHANGELOG_BACKUPDIR, direntry->name)) {
+                if (changelogdir) {
+                    char *cldirname = PL_strrchr(changelogdir, '/');
+                    char *p = filename1 + strlen(filename1);
+                    if (NULL == cldirname) {
+                        slapi_log_err(SLAPI_LOG_ERR,
+                                      "bdb_restore", "Broken changelog dir path %s\n",
+                                      changelogdir);
+                        if (task) {
+                            slapi_task_log_notice(task,
+                                                  "Restore: broken changelog dir path %s\n",
+                                                  changelogdir);
+                        }
+                        goto error_out;
+                    }
+                    PR_snprintf(p, sizeof(filename1) - (p - filename1),
+                                "/%s", cldirname + 1);
+                    /* Get the parent dir of changelogdir */
+                    *cldirname = '\0';
+                    return_value = bdb_copy_directory(li, task, filename1,
+                                                          changelogdir, 1 /* restore */,
+                                                          &cnt, 0, 1);
+                    *cldirname = '/';
+                    if (return_value) {
+                        slapi_log_err(SLAPI_LOG_ERR,
+                                      "bdb_restore", "Failed to copy directory %s\n",
+                                      filename1);
+                        if (task) {
+                            slapi_task_log_notice(task,
+                                                  "Restore: failed to copy directory %s",
+                                                  filename1);
+                        }
+                        goto error_out;
+                    }
+                    /* Copy DBVERSION */
+                    p = filename1 + strlen(filename1);
+                    PR_snprintf(p, sizeof(filename1) - (p - filename1),
+                                "/%s", DBVERSION_FILENAME);
+                    PR_snprintf(filename2, sizeof(filename2), "%s/%s",
+                                changelogdir, DBVERSION_FILENAME);
+                    return_value = dblayer_copyfile(filename1, filename2,
+                                                    0, priv->dblayer_file_mode);
+                    if (0 > return_value) {
+                        slapi_log_err(SLAPI_LOG_ERR, "bdb_restore", "Failed to copy file %s\n", filename1);
+                        goto error_out;
+                    }
+                }
+                continue;
+            }
+
+            inst = ldbm_instance_find_by_name(li, (char *)direntry->name);
+            if (inst == NULL)
+                continue;
+
+            restore_dir = inst->inst_parent_dir_name;
+            /* If we're doing a partial restore, we need to reset the LSNs on the data files */
+            if (bdb_copy_directory(li, task, filename1,
+                                       restore_dir, 1 /* restore */, &cnt, 0, 0) == 0)
+                continue;
+            else {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_restore", "Failed to copy directory %s\n",
+                              filename1);
+                if (task) {
+                    slapi_task_log_notice(task,
+                                          "bdb_restore - Failed to copy directory %s", filename1);
+                }
+                goto error_out;
+            }
+        }
+
+        if (doskip(direntry->name))
+            continue;
+
+        /* Is this a log file ? */
+        /* Log files have names of the form "log.xxxxx" */
+        /* We detect these by looking for the prefix "log." and
+         * the lack of the ".db#" suffix */
+        is_a_logfile = dblayer_is_logfilename(direntry->name);
+        if (is_a_logfile) {
+            seen_logfiles = 1;
+        }
+        if (is_a_logfile && (NULL != BDB_CONFIG(li)->bdb_log_directory) &&
+            (0 != strlen(BDB_CONFIG(li)->bdb_log_directory))) {
+            prefix = BDB_CONFIG(li)->bdb_log_directory;
+        } else {
+            prefix = home_dir;
+        }
+        mkdir_p(prefix, 0700);
+        PR_snprintf(filename1, sizeof(filename1), "%s/%s",
+                    real_src_dir, direntry->name);
+        PR_snprintf(filename2, sizeof(filename2), "%s/%s",
+                    prefix, direntry->name);
+        slapi_log_err(SLAPI_LOG_INFO, "bdb_restore", "Restoring file %d (%s)\n",
+                      cnt, filename2);
+        if (task) {
+            slapi_task_log_notice(task, "Restoring file %d (%s)",
+                                  cnt, filename2);
+            slapi_task_log_status(task, "Restoring file %d (%s)",
+                                  cnt, filename2);
+        }
+        return_value = dblayer_copyfile(filename1, filename2, 0,
+                                        priv->dblayer_file_mode);
+        if (0 > return_value) {
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_restore", "Failed to copy file %s\n", filename1);
+            goto error_out;
+        }
+        cnt++;
+    }
+    PR_CloseDir(dirhandle);
+
+    /* We're done ! */
+
+    /* [605024] check the DBVERSION and reset idl-switch if needed */
+    if (bdb_version_exists(li, home_dir)) {
+        char *ldbmversion = NULL;
+        char *dataversion = NULL;
+
+        if (bdb_version_read(li, home_dir, &ldbmversion, &dataversion) != 0) {
+            slapi_log_err(SLAPI_LOG_WARNING, "bdb_restore", "Unable to read dbversion "
+                                                                "file in %s\n",
+                          home_dir);
+        } else {
+            adjust_idl_switch(ldbmversion, li);
+            slapi_ch_free_string(&ldbmversion);
+            slapi_ch_free_string(&dataversion);
+        }
+    }
+
+    return_value = check_db_version(li, &action);
+    if (action &
+        (DBVERSION_UPGRADE_3_4 | DBVERSION_UPGRADE_4_4 | DBVERSION_UPGRADE_4_5)) {
+        dbmode = DBLAYER_CLEAN_RECOVER_MODE; /* upgrade: remove logs & recover */
+    } else if (seen_logfiles) {
+        dbmode = DBLAYER_RESTORE_MODE;
+    } else if (action & DBVERSION_NEED_DN2RDN) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_restore", "%s is on, while the instance %s is in the DN format. "
+                                         "Please run dn2rdn to convert the database format.\n",
+                      CONFIG_ENTRYRDN_SWITCH, inst->inst_name);
+        return_value = -1;
+        goto error_out;
+    } else if (action & DBVERSION_NEED_RDN2DN) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_restore", "%s is off, while the instance %s is in the RDN format. "
+                                         "Please change the value to on in dse.ldif.\n",
+                      CONFIG_ENTRYRDN_SWITCH, inst->inst_name);
+        return_value = -1;
+        goto error_out;
+    } else {
+        dbmode = DBLAYER_RESTORE_NO_RECOVERY_MODE;
+    }
+
+    /* now start the database code up, to prevent recovery next time the
+     * server starts;
+     * dse_conf_verify may need to have db started, as well. */
+    /* If no logfiles were stored, then fatal recovery isn't required */
+
+    if (li->li_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE) {
+        /* command line mode; no need to run db threads */
+        dbmode |= DBLAYER_NO_DBTHREADS_MODE;
+    } else /* on-line mode */
+    {
+        allinstance_set_not_busy(li);
+    }
+
+    tmp_rval = bdb_start(li, dbmode);
+    if (0 != tmp_rval) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_restore", "Failed to init database\n");
+        if (task) {
+            slapi_task_log_notice(task, "bdb_restore - Failed to init database");
+        }
+        return_value = tmp_rval;
+        goto error_out;
+    }
+
+    if (0 == return_value) { /* only when the copyfile succeeded */
+        /* check the DSE_* files, if any */
+        tmp_rval = dse_conf_verify(li, real_src_dir);
+        if (0 != tmp_rval)
+            slapi_log_err(SLAPI_LOG_WARNING,
+                          "bdb_restore", "Unable to verify the index configuration\n");
+    }
+
+    if (li->li_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE) {
+        /* command line: close the database down again */
+        tmp_rval = dblayer_close(li, dbmode);
+        if (0 != tmp_rval) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_restore", "Failed to close database\n");
+        }
+    } else {
+        allinstance_set_busy(li); /* on-line mode */
+    }
+
+    return_value = tmp_rval ? tmp_rval : return_value;
+
+error_out:
+    /* Free the restore src dir, but only if we allocated it above */
+    if (real_src_dir && (real_src_dir != src_dir)) {
+        /* If this was an FRI restore and the staging area exists, go ahead and remove it */
+        slapi_ch_free_string(&real_src_dir);
+    }
+    slapi_ch_free_string(&changelogdir);
+    return return_value;
+}
+
+static char *
+bdb__import_file_name(ldbm_instance *inst)
+{
+    char *fname = slapi_ch_smprintf("%s/.import_%s",
+                                    inst->inst_parent_dir_name,
+                                    inst->inst_dir_name);
+    return fname;
+}
+
+static char *
+bdb_restore_file_name(struct ldbminfo *li)
+{
+    char *fname = slapi_ch_smprintf("%s/../.restore", li->li_directory);
+
+    return fname;
+}
+
+static int
+bdb_file_open(char *fname, int flags, int mode, PRFileDesc **prfd)
+{
+    int rc = 0;
+    *prfd = PR_Open(fname, flags, mode);
+
+    if (NULL == *prfd)
+        rc = PR_GetError();
+    if (rc && rc != PR_FILE_NOT_FOUND_ERROR) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_file_open", "Failed to open file: %s, error: (%d) %s\n",
+                      fname, rc, slapd_pr_strerror(rc));
+    }
+    return rc;
+}
+
+int
+dblayer_import_file_init(ldbm_instance *inst)
+{
+    int rc = -1;
+    PRFileDesc *prfd = NULL;
+    char *fname = bdb__import_file_name(inst);
+    rc = bdb_file_open(fname, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, inst->inst_li->li_mode, &prfd);
+    if (prfd) {
+        PR_Close(prfd);
+        rc = 0;
+    }
+    slapi_ch_free_string(&fname);
+    return rc;
+}
+
+int
+dblayer_restore_file_init(struct ldbminfo *li)
+{
+    int rc = -1;
+    PRFileDesc *prfd;
+    char *fname = bdb_restore_file_name(li);
+    rc = bdb_file_open(fname, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, li->li_mode, &prfd);
+    if (prfd) {
+        PR_Close(prfd);
+        rc = 0;
+    }
+    slapi_ch_free_string(&fname);
+    return rc;
+}
+void
+dblayer_import_file_update(ldbm_instance *inst)
+{
+    PRFileDesc *prfd;
+    char *fname = bdb__import_file_name(inst);
+    bdb_file_open(fname, PR_RDWR, inst->inst_li->li_mode, &prfd);
+
+    if (prfd) {
+        char *line = slapi_ch_smprintf("import of %s succeeded", inst->inst_dir_name);
+        slapi_write_buffer(prfd, line, strlen(line));
+        slapi_ch_free_string(&line);
+        PR_Close(prfd);
+    }
+    slapi_ch_free_string(&fname);
+}
+
+int
+bdb_file_check(char *fname, int mode)
+{
+    int rc = 0;
+    int err;
+    PRFileDesc *prfd;
+    err = bdb_file_open(fname, PR_RDWR, mode, &prfd);
+
+    if (prfd) {
+        /* file exists, additional check on size */
+        PRFileInfo64 prfinfo;
+        rc = 1;
+        /* read it */
+        err = PR_GetOpenFileInfo64(prfd, &prfinfo);
+        if (err == PR_SUCCESS && 0 == prfinfo.size) {
+            /* it is empty restore or import has failed */
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_file_check", "Previous import or restore failed, file: %s is empty\n", fname);
+        }
+        PR_Close(prfd);
+        PR_Delete(fname);
+    } else {
+        if (PR_FILE_NOT_FOUND_ERROR == err) {
+            rc = 0;
+        } else {
+            /* file exists, but we cannot open it */
+            rc = 1;
+            /* error is already looged try to delete it*/
+            PR_Delete(fname);
+        }
+    }
+
+    return rc;
+}
+int
+dblayer_import_file_check(ldbm_instance *inst)
+{
+    int rc;
+    char *fname = bdb__import_file_name(inst);
+    rc = bdb_file_check(fname, inst->inst_li->li_mode);
+    slapi_ch_free_string(&fname);
+    return rc;
+}
+
+static int
+bdb_restore_file_check(struct ldbminfo *li)
+{
+    int rc;
+    char *fname = bdb_restore_file_name(li);
+    rc = bdb_file_check(fname, li->li_mode);
+    slapi_ch_free_string(&fname);
+    return rc;
+}
+
+void
+dblayer_restore_file_update(struct ldbminfo *li, char *directory)
+{
+    PRFileDesc *prfd;
+    char *fname = bdb_restore_file_name(li);
+    bdb_file_open(fname, PR_RDWR, li->li_mode, &prfd);
+    slapi_ch_free_string(&fname);
+    if (prfd) {
+        char *line = slapi_ch_smprintf("restore of %s succeeded", directory);
+        slapi_write_buffer(prfd, line, strlen(line));
+        slapi_ch_free_string(&line);
+        PR_Close(prfd);
+    }
+}
+
+
+/*
+ * to change the db extention (e.g., .db3 -> .db4)
+ */
+int
+dblayer_update_db_ext(ldbm_instance *inst, char *oldext, char *newext)
+{
+    struct attrinfo *a = NULL;
+    struct ldbminfo *li = NULL;
+    dblayer_private *priv = NULL;
+    DB *thisdb = NULL;
+    int rval = 0;
+    char *ofile = NULL;
+    char *nfile = NULL;
+    char inst_dir[MAXPATHLEN];
+    char *inst_dirp;
+
+    if (NULL == inst) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_update_db_ext", "Null instance is passed\n");
+        return -1; /* non zero */
+    }
+    li = inst->inst_li;
+    priv = li->li_dblayer_private;
+    inst_dirp = dblayer_get_full_inst_dir(li, inst, inst_dir, MAXPATHLEN);
+    if (NULL == inst_dirp || '\0' == *inst_dirp) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_update_db_ext", "Instance dir is NULL\n");
+        if (inst_dirp != inst_dir) {
+            slapi_ch_free_string(&inst_dirp);
+        }
+        return -1; /* non zero */
+    }
+    for (a = (struct attrinfo *)avl_getfirst(inst->inst_attrs);
+         NULL != a;
+         a = (struct attrinfo *)avl_getnext()) {
+        PRFileInfo64 info;
+        ofile = slapi_ch_smprintf("%s/%s%s", inst_dirp, a->ai_type, oldext);
+
+        if (PR_GetFileInfo64(ofile, &info) != PR_SUCCESS) {
+            slapi_ch_free_string(&ofile);
+            continue;
+        }
+
+        /* db->rename disable DB in it; we need to create for each */
+        rval = db_create(&thisdb, ((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV, 0);
+        if (0 != rval) {
+            slapi_log_err(SLAPI_LOG_ERR, "dblayer_update_db_ext", "db_create returned %d (%s)\n",
+                          rval, dblayer_strerror(rval));
+            goto done;
+        }
+        nfile = slapi_ch_smprintf("%s/%s%s", inst_dirp, a->ai_type, newext);
+        slapi_log_err(SLAPI_LOG_TRACE, "dblayer_update_db_ext", "Rename %s -> %s\n",
+                      ofile, nfile);
+
+        rval = thisdb->rename(thisdb, (const char *)ofile, NULL /* subdb */,
+                              (const char *)nfile, 0);
+        if (0 != rval) {
+            slapi_log_err(SLAPI_LOG_ERR, "dblayer_update_db_ext", "Rename returned %d (%s)\n",
+                          rval, dblayer_strerror(rval));
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "dblayer_update_db_ext", "Index (%s) Failed to update index %s -> %s\n",
+                          inst->inst_name, ofile, nfile);
+            goto done;
+        }
+        slapi_ch_free_string(&ofile);
+        slapi_ch_free_string(&nfile);
+    }
+
+    rval = db_create(&thisdb, ((bdb_db_env *)priv->dblayer_env)->bdb_DB_ENV, 0);
+    if (0 != rval) {
+        slapi_log_err(SLAPI_LOG_ERR, "dblayer_update_db_ext", "db_create returned %d (%s)\n",
+                      rval, dblayer_strerror(rval));
+        goto done;
+    }
+    ofile = slapi_ch_smprintf("%s/%s%s", inst_dirp, ID2ENTRY, oldext);
+    nfile = slapi_ch_smprintf("%s/%s%s", inst_dirp, ID2ENTRY, newext);
+    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_update_db_ext", "Rename %s -> %s\n",
+                  ofile, nfile);
+    rval = thisdb->rename(thisdb, (const char *)ofile, NULL /* subdb */,
+                          (const char *)nfile, 0);
+    if (0 != rval) {
+        slapi_log_err(SLAPI_LOG_ERR, "dblayer_update_db_ext", "Rename returned %d (%s)\n",
+                      rval, dblayer_strerror(rval));
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_update_db_ext", "Index (%s) Failed to update index %s -> %s\n",
+                      inst->inst_name, ofile, nfile);
+    }
+done:
+    slapi_ch_free_string(&ofile);
+    slapi_ch_free_string(&nfile);
+    if (inst_dirp != inst_dir) {
+        slapi_ch_free_string(&inst_dirp);
+    }
+
+    return rval;
+}
+
+/*
+ * delete the index files belonging to the instance
+ */
+int
+dblayer_delete_indices(ldbm_instance *inst)
+{
+    int rval = -1;
+    struct attrinfo *a = NULL;
+    int i;
+
+    if (NULL == inst) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "dblayer_delete_indices", "NULL instance is passed\n");
+        return rval;
+    }
+    rval = 0;
+    for (a = (struct attrinfo *)avl_getfirst(inst->inst_attrs), i = 0;
+         NULL != a;
+         a = (struct attrinfo *)avl_getnext(), i++) {
+        rval += bdb_rm_db_file(inst->inst_be, a, PR_TRUE, i /* chkpt; 1st time only */);
+    }
+    return rval;
+}
+
+void
+bdb_set_recovery_required(struct ldbminfo *li)
+{
+    if (NULL == li || NULL == li->li_dblayer_config) {
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_set_recovery_required", "No dblayer info\n");
+        return;
+    }
+    BDB_CONFIG(li)->bdb_recovery_required = 1;
+}
+
+int
+bdb_get_info(Slapi_Backend *be, int cmd, void **info)
+{
+    int rc = -1;
+    struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
+    dblayer_private *prv = NULL;
+    bdb_db_env *penv = NULL;
+
+    if ( !info) {
+        return rc;
+    }
+
+    if (li) {
+        prv = li->li_dblayer_private;
+        if (prv) {
+            penv = (bdb_db_env *)prv->dblayer_env;
+        }
+    }
+
+    switch (cmd) {
+    case BACK_INFO_DBENV: {
+            if (penv && penv->bdb_DB_ENV) {
+                *(DB_ENV **)info = penv->bdb_DB_ENV;
+                rc = 0;
+            }
+        break;
+    }
+    case BACK_INFO_DBENV_OPENFLAGS: {
+            if (penv) {
+                *(int *)info = penv->bdb_openflags;
+                rc = 0;
+            }
+        break;
+    }
+    case BACK_INFO_DB_PAGESIZE: {
+            if (li && BDB_CONFIG(li)->bdb_page_size) {
+                *(uint32_t *)info = BDB_CONFIG(li)->bdb_page_size;
+            } else {
+                *(uint32_t *)info = DBLAYER_PAGESIZE;
+            }
+            rc = 0;
+        break;
+    }
+    case BACK_INFO_INDEXPAGESIZE: {
+            if (li && BDB_CONFIG(li)->bdb_index_page_size) {
+                *(uint32_t *)info = BDB_CONFIG(li)->bdb_index_page_size;
+            } else {
+                *(uint32_t *)info = DBLAYER_INDEX_PAGESIZE;
+            }
+            rc = 0;
+        break;
+    }
+    case BACK_INFO_DIRECTORY: {
+        if (li) {
+            *(char **)info = li->li_directory;
+            rc = 0;
+        }
+        break;
+    }
+    case BACK_INFO_DBHOME_DIRECTORY: {
+        if (li) {
+            if (BDB_CONFIG(li)->bdb_dbhome_directory &&
+                BDB_CONFIG(li)->bdb_dbhome_directory[0] != '\0') {
+                *(char **)info = BDB_CONFIG(li)->bdb_dbhome_directory;
+            } else {
+                *(char **)info = BDB_CONFIG(li)->bdb_home_directory;
+            }
+            rc = 0;
+        }
+        break;
+    }
+    case BACK_INFO_LOG_DIRECTORY: {
+        if (li) {
+            *(char **)info = bdb_config_db_logdirectory_get_ext((void *)li);
+            rc = 0;
+        }
+        break;
+    }
+    case BACK_INFO_IS_ENTRYRDN: {
+        *(int *)info = entryrdn_get_switch();
+        break;
+    }
+    case BACK_INFO_INDEX_KEY : {
+        rc = get_suffix_key(be, (struct _back_info_index_key *)info);
+        break;
+    }
+    default:
+        break;
+    }
+
+    return rc;
+}
+
+int
+bdb_set_info(Slapi_Backend *be, int cmd, void **info)
+{
+    int rc = -1;
+
+    switch (cmd) {
+    case BACK_INFO_INDEX_KEY : {
+        rc = set_suffix_key(be, (struct _back_info_index_key *)info);
+        break;
+    }
+    default:
+        break;
+    }
+
+    return rc;
+}
+
+int
+bdb_back_ctrl(Slapi_Backend *be, int cmd, void *info)
+{
+    int rc = -1;
+    if (!be || !info) {
+        return rc;
+    }
+
+    switch (cmd) {
+    case BACK_INFO_CRYPT_INIT: {
+        back_info_crypt_init *crypt_init = (back_info_crypt_init *)info;
+        rc = back_crypt_init(crypt_init->be, crypt_init->dn,
+                             crypt_init->encryptionAlgorithm,
+                             &(crypt_init->state_priv));
+        break;
+    }
+    case BACK_INFO_CRYPT_ENCRYPT_VALUE: {
+        back_info_crypt_value *crypt_value = (back_info_crypt_value *)info;
+        rc = back_crypt_encrypt_value(crypt_value->state_priv, crypt_value->in,
+                                      &(crypt_value->out));
+        break;
+    }
+    case BACK_INFO_CRYPT_DECRYPT_VALUE: {
+        back_info_crypt_value *crypt_value = (back_info_crypt_value *)info;
+        rc = back_crypt_decrypt_value(crypt_value->state_priv, crypt_value->in,
+                                      &(crypt_value->out));
+        break;
+    }
+    default:
+        break;
+    }
+
+    return rc;
+}

+ 163 - 0
ldap/servers/slapd/back-ldbm/db-bdb/bdb_layer.h

@@ -0,0 +1,163 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (C) 2019 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+
+#include "../back-ldbm.h"
+#include "../dblayer.h"
+
+#define BDB_CONFIG(li) ((bdb_config *)(li)->li_dblayer_config)
+
+typedef struct bdb_db_env
+{
+    DB_ENV *bdb_DB_ENV;
+    Slapi_RWLock *bdb_env_lock;
+    int bdb_openflags;
+    int bdb_priv_flags;
+    PRLock *bdb_thread_count_lock;      /* lock for thread_count_cv */
+    PRCondVar *bdb_thread_count_cv;     /* condition variable for housekeeping thread shutdown */
+    PRInt32 bdb_thread_count;           /* Tells us how many threads are running,
+                                         * used to figure out when they're all stopped */
+} bdb_db_env;
+
+/* structure which holds our stuff */
+typedef struct bdb_config
+{
+    char *bdb_home_directory;
+    char *bdb_log_directory;
+    char *bdb_dbhome_directory;  /* default path for relative inst paths */
+    char **bdb_data_directories; /* passed to set_data_dir
+                                      * including bdb_dbhome_directory */
+    char **bdb_db_config;
+    int bdb_ncache;
+    int bdb_previous_ncache;
+    int bdb_tx_max;
+    uint64_t bdb_cachesize;
+    uint64_t bdb_previous_cachesize; /* Cache size when we last shut down--
+                                        * used to determine if we delete
+                                        * the mpool */
+    int bdb_recovery_required;
+    int bdb_txn_wait; /* Default is "off" (DB_TXN_NOWAIT) but for
+                                     * support purpose it could be helpful to set
+                                     * "on" so that backend hang on deadlock */
+    int bdb_enable_transactions;
+    int bdb_durable_transactions;
+    int bdb_checkpoint_interval;
+    int bdb_circular_logging;
+    uint32_t bdb_page_size;       /* db page size if configured,
+                                     * otherwise default to DBLAYER_PAGESIZE */
+    uint32_t bdb_index_page_size; /* db index page size if configured,
+                                     * otherwise default to
+                                     * DBLAYER_INDEX_PAGESIZE */
+    uint64_t bdb_logfile_size;    /* How large can one logfile be ? */
+    uint64_t bdb_logbuf_size;     /* how large log buffer can be */
+    int bdb_trickle_percentage;
+    int bdb_cache_config; /* Special cache configurations
+                                     * e.g. force file-based mpool */
+    int bdb_lib_version;
+    int bdb_spin_count;         /* DB Mutex spin count, 0 == use default */
+    int bdb_named_regions;      /* Should the regions be named sections,
+                                     * or backed by files ? */
+    int bdb_private_mem;        /* private memory will be used for
+                                     * allocation of regions and mutexes */
+    int bdb_private_import_mem; /* private memory will be used for
+                                     * allocation of regions and mutexes for
+                                     * import */
+    long bdb_shm_key;           /* base segment ID for named regions */
+    int bdb_debug;                /* Will libdb emit debugging info into our log ? */
+    int bdb_debug_verbose;              /* Get libdb to exhale debugging info */
+    int bdb_debug_checkpointing;     /* Enable debugging messages from checkpointing */
+    perfctrs_private *perf_private; /* Private data for performance counters code */
+    int bdb_stop_threads;       /* Used to signal to threads that they should stop ASAP */
+    int bdb_lockdown;           /* use DB_LOCKDOWN */
+#define BDB_LOCK_NB_MIN 10000
+    int bdb_lock_config;
+    int bdb_previous_lock_config;  /* Max lock count when we last shut down--
+                                      * used to determine if we delete the mpool */
+    u_int32_t bdb_deadlock_policy; /* i.e. the atype to DB_ENV->lock_detect in deadlock_threadmain */
+    int bdb_compactdb_interval;    /* interval to execute compact id2entry dbs */
+} bdb_config;
+
+int bdb_init(struct ldbminfo *li, config_info *config_array);
+
+int bdb_close(struct ldbminfo *li, int flags);
+int bdb_start(struct ldbminfo *li, int flags);
+int bdb_instance_start(backend *be, int flags);
+int bdb_backup(struct ldbminfo *li, char *dest_dir, Slapi_Task *task);
+int bdb_verify(Slapi_PBlock *pb);
+int bdb_db2ldif(Slapi_PBlock *pb);
+int bdb_db2index(Slapi_PBlock *pb);
+int bdb_ldif2db(Slapi_PBlock *pb);
+int bdb_db_size(Slapi_PBlock *pb);
+int bdb_upgradedb(Slapi_PBlock *pb);
+int bdb_upgradednformat(Slapi_PBlock *pb);
+int bdb_upgradeddformat(Slapi_PBlock *pb);
+int bdb_restore(struct ldbminfo *li, char *src_dir, Slapi_Task *task);
+int bdb_cleanup(struct ldbminfo *li);
+int bdb_txn_begin(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock);
+int bdb_txn_commit(struct ldbminfo *li, back_txn *txn, PRBool use_lock);
+int bdb_txn_abort(struct ldbminfo *li, back_txn *txn, PRBool use_lock);
+int bdb_get_db(backend *be, char *indexname, int open_flag, struct attrinfo *ai, DB **ppDB);
+int bdb_rm_db_file(backend *be, struct attrinfo *a, PRBool use_lock, int no_force_chkpt);
+int bdb_delete_db(struct ldbminfo *li);
+int bdb_import_main(void *arg);
+int bdb_get_info(Slapi_Backend *be, int cmd, void **info);
+int bdb_set_info(Slapi_Backend *be, int cmd, void **info);
+int bdb_back_ctrl(Slapi_Backend *be, int cmd, void *info);
+int bdb_config_load_dse_info(struct ldbminfo *li);
+int bdb_config_internal_set(struct ldbminfo *li, char *attrname, char *value);
+void bdb_public_config_get(struct ldbminfo *li, char *attrname, char *value);
+int bdb_public_config_set(struct ldbminfo *li, char *attrname, int apply_mod, int mod_op, int phase, char *value);
+
+/* instance functions */
+int bdb_instance_cleanup(struct ldbm_instance *inst);
+int bdb_instance_config_set(ldbm_instance *inst, char *attrname, int mod_apply, int mod_op, int phase, struct berval *value);
+int bdb_instance_create(struct ldbm_instance *inst);
+int bdb_instance_search_callback(Slapi_Entry *e, int *returncode, char *returntext, ldbm_instance *inst);
+
+/* function for autotuning */
+int bdb_start_autotune(struct ldbminfo *li);
+
+/* helper functions */
+int bdb_get_aux_id2entry(backend *be, DB **ppDB, DB_ENV **ppEnv, char **path);
+int bdb_get_aux_id2entry_ext(backend *be, DB **ppDB, DB_ENV **ppEnv, char **path, int flags);
+int bdb_release_aux_id2entry(backend *be, DB *pDB, DB_ENV *pEnv);
+char *bdb_get_home_dir(struct ldbminfo *li, int *dbhome);
+int bdb_copy_directory(struct ldbminfo *li, Slapi_Task *task, char *src_dir, char *dest_dir, int restore, int *cnt, int indexonly, int is_changelog);
+int bdb_remove_env(struct ldbminfo *li);
+int bdb_bt_compare(DB *db, const DBT *dbt1, const DBT *dbt2);
+int bdb_open_huge_file(const char *path, int oflag, int mode);
+int bdb_check_and_set_import_cache(struct ldbminfo *li);
+int bdb_close_file(DB **db);
+int bdb_post_close(struct ldbminfo *li, int dbmode);
+int bdb_config_set(void *arg, char *attr_name, config_info *config_array, struct berval *bval, char *err_buf, int phase, int apply_mod, int mod_op);
+void bdb_config_get(void *arg, config_info *config, char *buf);
+int add_op_attrs(Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *ep, int *status);
+int bdb_back_ldif2db(Slapi_PBlock *pb);
+void bdb_set_recovery_required(struct ldbminfo *li);
+void *bdb_config_db_logdirectory_get_ext(void *arg);
+int bdb_db_remove(bdb_db_env *env, char const path[], char const dbName[]);
+int bdb_memp_stat(struct ldbminfo *li, DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp);
+int bdb_memp_stat_instance(ldbm_instance *inst, DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp);
+void bdb_set_env_debugging(DB_ENV *pEnv, bdb_config *conf);
+void bdb_back_free_incl_excl(char **include, char **exclude);
+int bdb_back_ok_to_dump(const char *dn, char **include, char **exclude);
+int bdb_back_fetch_incl_excl(Slapi_PBlock *pb, char ***include, char ***exclude);
+PRUint64 bdb_get_id2entry_size(ldbm_instance *inst);
+
+
+/* bdb version functions */
+int bdb_version_write(struct ldbminfo *li, const char *directory, const char *dataversion, PRUint32 flags);
+int bdb_version_read(struct ldbminfo *li, const char *directory, char **ldbmversion, char **dataversion);
+int bdb_version_exists(struct ldbminfo *li, const char *directory);
+
+/* config functions */
+int bdb_instance_delete_instance_entry_callback(struct ldbminfo *li, struct ldbm_instance *inst);
+int bdb_instance_post_delete_instance_entry_callback(struct ldbminfo *li, struct ldbm_instance *inst);
+int bdb_instance_add_instance_entry_callback(struct ldbminfo *li, struct ldbm_instance *inst);
+int bdb_instance_postadd_instance_entry_callback(struct ldbminfo *li, struct ldbm_instance *inst);
+void bdb_config_setup_default(struct ldbminfo *li);

+ 3311 - 0
ldap/servers/slapd/back-ldbm/db-bdb/bdb_ldif2db.c

@@ -0,0 +1,3311 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (C) 2019 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+/* bdb_ldif2db.c
+ *
+ * common functions for import (old and new) and export
+ * the export code (db2ldif)
+ * code for db2index (is this still in use?)
+ */
+
+#include "bdb_layer.h"
+#include "../vlv_srch.h"
+#include "../import.h"
+
+#define DB2INDEX_ANCESTORID 0x1   /* index ancestorid */
+#define DB2INDEX_ENTRYRDN 0x2     /* index entryrdn */
+#define DB2LDIF_ENTRYRDN 0x4      /* export entryrdn */
+#define DB2INDEX_OBJECTCLASS 0x10 /* for reindexing "objectclass: nstombstone" */
+
+#define LDIF2LDBM_EXTBITS(x) ((x)&0xf)
+
+typedef struct _export_args
+{
+    struct backentry *ep;
+    int decrypt;
+    int options;
+    int printkey;
+    IDList *idl;
+    NIDS idindex;
+    ID lastid;
+    int fd;
+    Slapi_Task *task;
+    char **include_suffix;
+    char **exclude_suffix;
+    int *cnt;
+    int *lastcnt;
+    IDList *pre_exported_idl; /* exported IDList, which ID is larger than
+                                 its children's ID.  It happens when an entry
+                                 is added and existing entries are moved under
+                                 the newly added entry. */
+} export_args;
+
+/* static functions */
+
+static int ldbm_exclude_attr_from_export(struct ldbminfo *li,
+                                         const char *attr,
+                                         int dump_uniqueid);
+
+static int _get_and_add_parent_rdns(backend *be, DB *db, back_txn *txn, ID id, Slapi_RDN *srdn, ID *pid, int index_ext, int run_from_cmdline, export_args *eargs);
+static int _export_or_index_parents(ldbm_instance *inst, DB *db, back_txn *txn, ID currentid, char *rdn, ID id, ID pid, int run_from_cmdline, struct _export_args *eargs, int type, Slapi_RDN *psrdn);
+
+/**********  common routines for classic/deluxe import code **********/
+
+static size_t import_config_index_buffer_size = DEFAULT_IMPORT_INDEX_BUFFER_SIZE;
+
+void
+import_configure_index_buffer_size(size_t size)
+{
+    import_config_index_buffer_size = size;
+}
+
+size_t
+import_get_index_buffer_size()
+{
+    return import_config_index_buffer_size;
+}
+
+
+void
+bdb_back_free_incl_excl(char **include, char **exclude)
+{
+    if (include) {
+        charray_free(include);
+    }
+    if (exclude) {
+        charray_free(exclude);
+    }
+}
+
+
+/*
+ * add_op_attrs - add the parentid, entryid, dncomp,
+ * and entrydn operational attributes to an entry.
+ * Also---new improved washes whiter than white version
+ * now removes any bogus operational attributes you're not
+ * allowed to specify yourself on entries.
+ * Currenty the list of these is: numSubordinates, hasSubordinates
+ */
+int
+add_op_attrs(Slapi_PBlock *pb, struct ldbminfo *li __attribute__((unused)), struct backentry *ep, int *status)
+{
+    backend *be;
+    char *pdn;
+    ID pid = 0;
+    int save_old_pid = 0;
+    int is_tombstone = 0;
+
+    slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+
+    /*
+     * add the parentid and entryid operational attributes
+     */
+
+    if (NULL != status) {
+        if (IMPORT_ADD_OP_ATTRS_SAVE_OLD_PID == *status) {
+            save_old_pid = 1;
+        }
+        *status = IMPORT_ADD_OP_ATTRS_OK;
+    }
+
+    is_tombstone = slapi_entry_flag_is_set(ep->ep_entry,
+                                           SLAPI_ENTRY_FLAG_TOMBSTONE);
+    /* parentid */
+    if ((pdn = slapi_dn_parent_ext(backentry_get_ndn(ep), is_tombstone)) != NULL) {
+        int err = 0;
+
+        /*
+         * read the entrydn/entryrdn index to get the id of the parent
+         * If this entry's parent is not present in the index,
+         * we'll get a DB_NOTFOUND error here.
+         * In olden times, we just ignored this, but now...
+         * we see this as meaning that the entry is either a
+         * suffix entry, or its erroneous. So, we signal this to the
+         * caller via the status parameter.
+         */
+        if (entryrdn_get_switch()) { /* subtree-rename: on */
+            Slapi_DN sdn;
+            slapi_sdn_init(&sdn);
+            slapi_sdn_set_dn_byval(&sdn, pdn);
+            err = entryrdn_index_read_ext(be, &sdn, &pid,
+                                          TOMBSTONE_INCLUDED, NULL);
+            slapi_sdn_done(&sdn);
+            if (DB_NOTFOUND == err) {
+                /*
+                 * Could be a tombstone. E.g.,
+                 * nsuniqueid=042d8081-..-ca8fe9f7,uid=tuser,o=abc,com
+                 * If so, need to get the grandparent of the leaf.
+                 */
+                if (slapi_entry_flag_is_set(ep->ep_entry,
+                                            SLAPI_ENTRY_FLAG_TOMBSTONE) &&
+                    (0 == strncasecmp(pdn, SLAPI_ATTR_UNIQUEID,
+                                      sizeof(SLAPI_ATTR_UNIQUEID) - 1))) {
+                    char *ppdn = slapi_dn_parent(pdn);
+                    slapi_ch_free_string(&pdn);
+                    if (NULL == ppdn) {
+                        if (NULL != status) {
+                            *status = IMPORT_ADD_OP_ATTRS_NO_PARENT;
+                            goto next;
+                        }
+                    }
+                    pdn = ppdn;
+                    slapi_sdn_set_dn_byval(&sdn, pdn);
+                    err = entryrdn_index_read(be, &sdn, &pid, NULL);
+                    slapi_sdn_done(&sdn);
+                }
+            }
+            if (err) {
+                if (DB_NOTFOUND != err && 1 != err) {
+                    slapi_log_err(SLAPI_LOG_ERR, "add_op_attrs", "database error %d\n", err);
+                    slapi_ch_free_string(&pdn);
+                    return (-1);
+                }
+                if (NULL != status) {
+                    *status = IMPORT_ADD_OP_ATTRS_NO_PARENT;
+                }
+            }
+        } else {
+            struct berval bv;
+            IDList *idl = NULL;
+            bv.bv_val = pdn;
+            bv.bv_len = strlen(pdn);
+            if ((idl = index_read(be, LDBM_ENTRYDN_STR, indextype_EQUALITY,
+                                  &bv, NULL, &err)) != NULL) {
+                pid = idl_firstid(idl);
+                idl_free(&idl);
+            } else {
+                /* empty idl */
+                if (0 != err && DB_NOTFOUND != err) {
+                    slapi_log_err(SLAPI_LOG_ERR, "add_op_attrs", "database error %d\n", err);
+                    slapi_ch_free_string(&pdn);
+                    return (-1);
+                }
+                if (NULL != status) {
+                    *status = IMPORT_ADD_OP_ATTRS_NO_PARENT;
+                }
+            }
+        }
+        slapi_ch_free_string(&pdn);
+    } else {
+        if (NULL != status) {
+            *status = IMPORT_ADD_OP_ATTRS_NO_PARENT;
+        }
+    }
+next:
+    /* Get rid of attributes you're not allowed to specify yourself */
+    slapi_entry_delete_values(ep->ep_entry, hassubordinates, NULL);
+    slapi_entry_delete_values(ep->ep_entry, numsubordinates, NULL);
+
+    /* Upgrade DN format only */
+    /* Set current parentid to e_aux_attrs to remove it from the index file. */
+    if (save_old_pid) {
+        Slapi_Attr *pid_attr = NULL;
+        pid_attr = attrlist_remove(&ep->ep_entry->e_attrs, "parentid");
+        if (pid_attr) {
+            attrlist_add(&ep->ep_entry->e_aux_attrs, pid_attr);
+        }
+    }
+
+    /* Add the entryid, parentid and entrydn operational attributes */
+    /* Note: This function is provided by the Add code */
+    add_update_entry_operational_attributes(ep, pid);
+
+    return (0);
+}
+
+
+/**********  ldif2db entry point  **********/
+
+/*
+    Some notes about this stuff:
+
+    The front-end does call our init routine before calling us here.
+    So, we get the regular chance to parse the config file etc.
+    However, it does _NOT_ call our start routine, so we need to
+    do whatever work that did and which we need for this work , here.
+    Furthermore, the front-end simply exits after calling us, so we need
+    to do any cleanup work here also.
+ */
+
+/*
+ * bdb_ldif2db - backend routine to convert an ldif file to
+ * a database.
+ */
+int
+bdb_ldif2db(Slapi_PBlock *pb)
+{
+    struct ldbminfo *li;
+    ldbm_instance *inst = NULL;
+    char *instance_name;
+    Slapi_Task *task = NULL;
+    int ret, task_flags;
+
+    slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
+    slapi_pblock_get(pb, SLAPI_BACKEND_INSTANCE_NAME, &instance_name);
+    slapi_pblock_get(pb, SLAPI_TASK_FLAGS, &task_flags);
+    slapi_pblock_get(pb, SLAPI_BACKEND_TASK, &task);
+
+    /* BEGIN complex dependencies of various initializations. */
+    /* hopefully this will go away once import is not run standalone... */
+
+
+    /* Find the instance that the ldif2db will be done on. */
+    inst = ldbm_instance_find_by_name(li, instance_name);
+    if (NULL == inst) {
+        slapi_task_log_notice(task, "Unknown ldbm instance %s", instance_name);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_ldif2db", "Unknown ldbm instance %s\n",
+                      instance_name);
+        return -1;
+    }
+
+    /* check if an import/restore is already ongoing... */
+    if ((instance_set_busy(inst) != 0)) {
+        slapi_task_log_notice(task,
+                "Backend instance '%s' already in the middle of  another task",
+                inst->inst_name);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_ldif2db", "ldbm: '%s' is already in the middle of "
+                                                            "another task and cannot be disturbed.\n",
+                      inst->inst_name);
+        return -1;
+    } else {
+        uint64_t refcnt;
+        refcnt = slapi_counter_get_value(inst->inst_ref_count);
+        if (refcnt > 0) {
+            slapi_task_log_notice(task,
+                    "Backend instance '%s': there are %" PRIu64 " pending operation(s)."
+                    " Import can not proceed until they are completed.\n",
+                    inst->inst_name, refcnt);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_ldif2db",
+                    "ldbm: '%s' there are %" PRIu64 " pending operation(s)."
+                     " Import can not proceed until they are completed.\n",
+                    inst->inst_name, refcnt);
+            instance_set_not_busy(inst);
+            return -1;
+        }
+    }
+
+    if ((task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE)) {
+        if ((ret = dblayer_import_file_init(inst))) {
+            slapi_task_log_notice(task,
+                    "Backend instance '%s' Failed to write import file, error %d: %s",
+                    inst->inst_name, ret, slapd_pr_strerror(ret));
+            slapi_log_err(SLAPI_LOG_ERR,
+                    "bdb_ldif2db", "%s: Failed to write import file, error %d: %s\n",
+                    inst->inst_name, ret, slapd_pr_strerror(ret));
+            return -1;
+        }
+    }
+
+    /***** prepare & init libdb and dblayer *****/
+
+    if (!(task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE)) {
+        /* shutdown this instance of the db */
+        slapi_log_err(SLAPI_LOG_INFO, "bdb_ldif2db", "Bringing %s offline...\n",
+                      instance_name);
+        slapi_mtn_be_disable(inst->inst_be);
+
+        cache_clear(&inst->inst_cache, CACHE_TYPE_ENTRY);
+        if (entryrdn_get_switch()) {
+            cache_clear(&inst->inst_dncache, CACHE_TYPE_DN);
+        }
+        dblayer_instance_close(inst->inst_be);
+        dblayer_delete_indices(inst);
+    } else {
+        if (bdb_config_internal_set(li, CONFIG_DB_TRANSACTION_LOGGING, "off")) {
+            goto fail;
+        }
+
+        /* If USN plugin is enabled,
+         * initialize the USN counter to get the next USN */
+        if (plugin_enabled("USN", li->li_identity)) {
+            /* close immediately; no need to run db threads */
+            ret = bdb_start(li,
+                                DBLAYER_NORMAL_MODE | DBLAYER_NO_DBTHREADS_MODE);
+            if (ret) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_ldif2db", "bdb_start failed! %s (%d)\n",
+                              dblayer_strerror(ret), ret);
+                goto fail;
+            }
+            /* initialize the USN counter */
+            ldbm_usn_init(li);
+            ret = dblayer_close(li, DBLAYER_NORMAL_MODE);
+            if (ret != 0) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_ldif2db", "dblayer_close failed! %s (%d)\n",
+                              dblayer_strerror(ret), ret);
+            }
+        }
+
+        if (0 != (ret = bdb_start(li, DBLAYER_IMPORT_MODE))) {
+            if (LDBM_OS_ERR_IS_DISKFULL(ret)) {
+                slapi_log_err(SLAPI_LOG_ALERT, "bdb_ldif2db", "Failed to init database.  "
+                                                                      "There is either insufficient disk space or "
+                                                                      "insufficient memory available to initialize the "
+                                                                      "database.\n");
+                slapi_log_err(SLAPI_LOG_ALERT, "bdb_ldif2db", "Please check that\n"
+                                                                      "1) disks are not full,\n"
+                                                                      "2) no file exceeds the file size limit,\n"
+                                                                      "3) the configured dbcachesize is not too large for the available memory on this machine.\n");
+            } else {
+                slapi_log_err(SLAPI_LOG_ERR, "bdb_ldif2db", "Failed to init database "
+                                                                    "(error %d: %s)\n",
+                              ret, dblayer_strerror(ret));
+            }
+            goto fail;
+        }
+    }
+
+    /* Delete old database files */
+    dblayer_delete_instance_dir(inst->inst_be);
+    /* it's okay to fail -- the directory might have already been deleted */
+
+    /* bdb_instance_start will init the id2entry index. */
+    /* it also (finally) fills in inst_dir_name */
+    ret = bdb_instance_start(inst->inst_be, DBLAYER_IMPORT_MODE);
+    if (ret != 0) {
+        goto fail;
+    }
+
+    vlv_init(inst);
+
+    /***** done init libdb and dblayer *****/
+
+    /* always use "new" import code now */
+    slapi_pblock_set(pb, SLAPI_BACKEND, inst->inst_be);
+    ret = bdb_back_ldif2db(pb);
+    if (ret == 0) {
+        if (task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE) {
+            dblayer_import_file_update(inst);
+        } else {
+            slapi_be_set_flag(inst->inst_be, SLAPI_BE_FLAG_POST_IMPORT);
+        }
+    }
+    return ret;
+
+fail:
+    /* DON'T enable the backend -- leave it offline */
+    instance_set_not_busy(inst);
+    return ret;
+}
+
+
+/**********  db2ldif, db2index  **********/
+
+
+/* fetch an IDL for the series of subtree specs */
+/* (used for db2ldif) */
+static IDList *
+bdb_fetch_subtrees(backend *be, char **include, int *err)
+{
+    int i;
+    ID id;
+    IDList *idltotal = NULL, *idltmp;
+    back_txn *txn = NULL;
+    struct berval bv;
+    Slapi_DN sdn; /* Used only if entryrdn_get_switch is true */
+
+    *err = 0;
+    slapi_sdn_init(&sdn);
+    /* for each subtree spec... */
+    for (i = 0; include[i]; i++) {
+        IDList *idl = NULL;
+        const char *suffix = slapi_sdn_get_ndn(slapi_be_getsuffix(be, 0));
+        char *parentdn = slapi_ch_strdup(suffix);
+        char *nextdn = NULL;
+        int matched = 0;
+        int issubsuffix = 0;
+
+        /*
+         * avoid a case that an include suffix is applied to the backend of
+         * its sub suffix
+         * e.g., suffix: dc=example,dc=com (backend userRoot)
+         *       sub suffix: ou=sub,dc=example,dc=com (backend subUserRoot)
+         * When this CLI db2ldif -s "dc=example,dc=com" is executed,
+         * skip checking "dc=example,dc=com" in entrydn of subUserRoot.
+         */
+        while (NULL != parentdn &&
+               NULL != (nextdn = slapi_dn_parent(parentdn))) {
+            slapi_ch_free_string(&parentdn);
+            if (0 == slapi_UTF8CASECMP(nextdn, include[i])) {
+                issubsuffix = 1; /* suffix of be is a subsuffix of include[i] */
+                break;
+            }
+            parentdn = nextdn;
+        }
+        slapi_ch_free_string(&parentdn);
+        slapi_ch_free_string(&nextdn);
+        if (issubsuffix) {
+            continue;
+        }
+
+        /*
+         * avoid a case that an include suffix is applied to the unrelated
+         * backend.
+         * e.g., suffix: dc=example,dc=com (backend userRoot)
+         *       suffix: dc=test,dc=com (backend testRoot))
+         * When this CLI db2ldif -s "dc=example,dc=com" is executed,
+         * skip checking "dc=example,dc=com" in entrydn of testRoot.
+         */
+        parentdn = slapi_ch_strdup(include[i]);
+        while (NULL != parentdn &&
+               NULL != (nextdn = slapi_dn_parent(parentdn))) {
+            slapi_ch_free_string(&parentdn);
+            if (0 == slapi_UTF8CASECMP(nextdn, (char *)suffix)) {
+                matched = 1;
+                break;
+            }
+            parentdn = nextdn;
+        }
+        slapi_ch_free_string(&parentdn);
+        slapi_ch_free_string(&nextdn);
+        if (!matched) {
+            continue;
+        }
+
+        /*
+         * First map the suffix to its entry ID.
+         * Note that the suffix is already normalized.
+         */
+        if (entryrdn_get_switch()) { /* subtree-rename: on */
+            slapi_sdn_set_dn_byval(&sdn, include[i]);
+            *err = entryrdn_index_read(be, &sdn, &id, NULL);
+            if (*err) {
+                if (DB_NOTFOUND == *err) {
+                    slapi_log_err(SLAPI_LOG_INFO,
+                                  "bdb_fetch_subtrees", "entryrdn not indexed on '%s'; "
+                                                         "entry %s may not be added to the database yet.\n",
+                                  include[i], include[i]);
+                    *err = 0; /* not a problem */
+                } else {
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "bdb_fetch_subtrees", "Reading %s failed on entryrdn; %d\n",
+                                  include[i], *err);
+                }
+                slapi_sdn_done(&sdn);
+                continue;
+            }
+        } else {
+            bv.bv_val = include[i];
+            bv.bv_len = strlen(include[i]);
+            idl = index_read(be, LDBM_ENTRYDN_STR, indextype_EQUALITY, &bv, txn, err);
+            if (idl == NULL) {
+                if (DB_NOTFOUND == *err) {
+                    slapi_log_err(SLAPI_LOG_INFO,
+                                  "bdb_fetch_subtrees", "entrydn not indexed on '%s'; "
+                                                         "entry %s may not be added to the database yet.\n",
+                                  include[i], include[i]);
+                    *err = 0; /* not a problem */
+                } else {
+                    slapi_log_err(SLAPI_LOG_ERR, "bdb_fetch_subtrees",
+                                  "Reading %s failed on entrydn; %d\n",
+                                  include[i], *err);
+                }
+                continue;
+            }
+            id = idl_firstid(idl);
+            idl_free(&idl);
+        }
+
+        /*
+         * Now get all the descendants of that suffix.
+         */
+        if (entryrdn_get_noancestorid()) {
+            /* subtree-rename: on && no ancestorid */
+            *err = entryrdn_get_subordinates(be, &sdn, id, &idl, txn, 0);
+        } else {
+            *err = ldbm_ancestorid_read(be, txn, id, &idl);
+        }
+        slapi_sdn_done(&sdn);
+        if (idl == NULL) {
+            if (DB_NOTFOUND == *err) {
+                slapi_log_err(SLAPI_LOG_BACKLDBM,
+                              "bdb_fetch_subtrees", "Entry id %u has no descendants according to %s. "
+                                                     "Index file created by this reindex will be empty.\n",
+                              id, entryrdn_get_noancestorid() ? "entryrdn" : "ancestorid");
+                *err = 0; /* not a problem */
+            } else {
+                slapi_log_err(SLAPI_LOG_WARNING,
+                              "bdb_fetch_subtrees", "%s not indexed on %u\n",
+                              entryrdn_get_noancestorid() ? "entryrdn" : "ancestorid", id);
+            }
+            continue;
+        }
+
+        /* Insert the suffix itself */
+        idl_insert(&idl, id);
+
+        /* Merge the idlists */
+        if (!idltotal) {
+            idltotal = idl;
+        } else if (idl) {
+            idltmp = idl_union(be, idltotal, idl);
+            idl_free(&idltotal);
+            idl_free(&idl);
+            idltotal = idltmp;
+        }
+    } /* for (i = 0; include[i]; i++) */
+
+    return idltotal;
+}
+
+
+static int
+export_one_entry(struct ldbminfo *li,
+                 ldbm_instance *inst,
+                 export_args *expargs)
+{
+    backend *be = inst->inst_be;
+    int rc = 0;
+    Slapi_Attr *this_attr = NULL, *next_attr = NULL;
+    char *type = NULL;
+    DBT data = {0};
+    int len = 0;
+
+    if (!bdb_back_ok_to_dump(backentry_get_ndn(expargs->ep),
+                              expargs->include_suffix,
+                              expargs->exclude_suffix)) {
+        goto bail; /* go to next loop */
+    }
+    if (!(expargs->options & SLAPI_DUMP_STATEINFO) &&
+        slapi_entry_flag_is_set(expargs->ep->ep_entry,
+                                SLAPI_ENTRY_FLAG_TOMBSTONE)) {
+        /* We only dump the tombstones if the user needs to create
+         * a replica from the ldif */
+        goto bail; /* go to next loop */
+    }
+    (*expargs->cnt)++;
+
+    /* do not output attributes that are in the "exclude" list */
+    /* Also, decrypt any encrypted attributes, if we're asked to */
+    rc = slapi_entry_first_attr(expargs->ep->ep_entry, &this_attr);
+    while (0 == rc) {
+        int dump_uniqueid = (expargs->options & SLAPI_DUMP_UNIQUEID) ? 1 : 0;
+        rc = slapi_entry_next_attr(expargs->ep->ep_entry,
+                                   this_attr, &next_attr);
+        slapi_attr_get_type(this_attr, &type);
+        if (ldbm_exclude_attr_from_export(li, type, dump_uniqueid)) {
+            slapi_entry_delete_values(expargs->ep->ep_entry, type, NULL);
+        }
+        this_attr = next_attr;
+    }
+    if (expargs->decrypt) {
+        /* Decrypt in place */
+        rc = attrcrypt_decrypt_entry(be, expargs->ep);
+        if (rc) {
+            slapi_log_err(SLAPI_LOG_ERR, "export_one_entry", "Failed to decrypt entry [%s] : %d\n",
+                          slapi_sdn_get_dn(&expargs->ep->ep_entry->e_sdn), rc);
+        }
+    }
+    /*
+     * Check if userPassword value is hashed or not.
+     * If it is not, put "{CLEAR}" in front of the password value.
+     */
+    {
+        char *pw = slapi_entry_attr_get_charptr(expargs->ep->ep_entry,
+                                                "userpassword");
+        if (pw && !slapi_is_encoded(pw)) {
+            /* clear password does not have {CLEAR} storage scheme */
+            struct berval *vals[2];
+            struct berval val;
+            val.bv_val = slapi_ch_smprintf("{CLEAR}%s", pw);
+            val.bv_len = strlen(val.bv_val);
+            vals[0] = &val;
+            vals[1] = NULL;
+            rc = slapi_entry_attr_replace(expargs->ep->ep_entry,
+                                          "userpassword", vals);
+            if (rc) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "export_one_entry", "%s: Failed to add clear password storage scheme: %d\n",
+                              slapi_sdn_get_dn(&expargs->ep->ep_entry->e_sdn), rc);
+            }
+            slapi_ch_free_string(&val.bv_val);
+        }
+        slapi_ch_free_string(&pw);
+    }
+    data.data = slapi_entry2str_with_options(expargs->ep->ep_entry,
+                                             &len, expargs->options);
+    data.size = len + 1;
+
+    if (expargs->printkey & EXPORT_PRINTKEY) {
+        char idstr[32];
+
+        sprintf(idstr, "# entry-id: %lu\n", (u_long)expargs->ep->ep_id);
+        rc = write(expargs->fd, idstr, strlen(idstr));
+        PR_ASSERT(rc > 0);
+    }
+    rc = write(expargs->fd, data.data, len);
+    PR_ASSERT(rc > 0);
+    rc = write(expargs->fd, "\n", 1);
+    PR_ASSERT(rc > 0);
+    rc = 0;
+    if ((*expargs->cnt) % 1000 == 0) {
+        int percent;
+
+        if (expargs->idl) {
+            percent = (expargs->idindex * 100 / expargs->idl->b_nids);
+        } else {
+            percent = (expargs->ep->ep_id * 100 / expargs->lastid);
+        }
+        if (expargs->task) {
+            slapi_task_log_status(expargs->task,
+                                  "%s: Processed %d entries (%d%%).",
+                                  inst->inst_name, *expargs->cnt, percent);
+            slapi_task_log_notice(expargs->task,
+                                  "%s: Processed %d entries (%d%%).",
+                                  inst->inst_name, *expargs->cnt, percent);
+        }
+        slapi_log_err(SLAPI_LOG_INFO, "export_one_entry", "export %s: Processed %d entries (%d%%).\n",
+                      inst->inst_name, *expargs->cnt, percent);
+        *expargs->lastcnt = *expargs->cnt;
+    }
+bail:
+    slapi_ch_free(&(data.data));
+    return rc;
+}
+
+/*
+ * bdb_db2ldif - backend routine to convert database to an
+ * ldif file.
+ * (reunified at last)
+ */
+#define LDBM2LDIF_BUSY (-2)
+#define RUVRDN SLAPI_ATTR_UNIQUEID "=" RUV_STORAGE_ENTRY_UNIQUEID
+int
+bdb_db2ldif(Slapi_PBlock *pb)
+{
+    backend *be = NULL;
+    struct ldbminfo *li = NULL;
+    DB *db = NULL;
+    DBC *dbc = NULL;
+    struct backentry *ep;
+    struct backentry *pending_ruv = NULL;
+    DBT key = {0};
+    DBT data = {0};
+    char *fname = NULL;
+    int printkey, rc, ok_index;
+    int return_value = 0;
+    int nowrap = 0;
+    int nobase64 = 0;
+    NIDS idindex = 0;
+    ID temp_id;
+    char **exclude_suffix = NULL;
+    char **include_suffix = NULL;
+    int decrypt = 0;
+    int32_t dump_replica = 0;
+    int dump_uniqueid = 1;
+    int fd = STDOUT_FILENO;
+    IDList *idl = NULL; /* optimization for -s include lists */
+    int cnt = 0, lastcnt = 0;
+    int options = 0;
+    int keepgoing = 1;
+    int isfirst = 1;
+    int appendmode = 0;
+    int appendmode_1 = 0;
+    int noversion = 0;
+    ID lastid = 0;
+    int task_flags;
+    Slapi_Task *task;
+    int run_from_cmdline = 0;
+    char *instance_name = NULL;
+    ldbm_instance *inst = NULL;
+    int str2entry_options = 0;
+    int retry;
+    int we_start_the_backends = 0;
+    int server_running;
+    export_args eargs = {0};
+    int32_t suffix_written = 0;
+    int32_t skip_ruv = 0;
+
+    slapi_log_err(SLAPI_LOG_TRACE, "bdb_db2ldif", "=>\n");
+
+    slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
+    slapi_pblock_get(pb, SLAPI_TASK_FLAGS, &task_flags);
+    slapi_pblock_get(pb, SLAPI_DB2LDIF_DECRYPT, &decrypt);
+    slapi_pblock_get(pb, SLAPI_DB2LDIF_SERVER_RUNNING, &server_running);
+    slapi_pblock_get(pb, SLAPI_BACKEND_TASK, &task);
+    run_from_cmdline = (task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE);
+
+    dump_replica = slapi_pblock_get_ldif_dump_replica(pb);
+    if (run_from_cmdline) {
+        li->li_flags |= SLAPI_TASK_RUNNING_FROM_COMMANDLINE;
+        if (!dump_replica) {
+            we_start_the_backends = 1;
+        }
+    }
+
+    if (run_from_cmdline && BDB_CONFIG(li)->bdb_private_mem && server_running) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_db2ldif", "Cannot export the database while the server "
+                                             "is running and nsslapd-db-private-mem option is used, please use ldif2db.pl\n");
+        return_value = -1;
+        goto bye;
+    }
+
+    slapi_pblock_get(pb, SLAPI_BACKEND_INSTANCE_NAME, &instance_name);
+    if (run_from_cmdline) {
+
+        /* Now that we have processed the config information, we look for
+         * the be that should do the db2ldif. */
+        inst = ldbm_instance_find_by_name(li, instance_name);
+        if (NULL == inst) {
+            slapi_task_log_notice(task, "Unknown backend instance: %s", instance_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif", "Unknown backend instance: %s\n",
+                          instance_name);
+            return_value = -1;
+            goto bye;
+        }
+        /* [605974] command db2ldif should not be able to run when on-line
+         * import is running */
+        if (dblayer_in_import(inst)) {
+            slapi_task_log_notice(task, "Backend instance '%s' is busy", instance_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif", "Backend instance '%s' is busy\n",
+                          instance_name);
+            return_value = -1;
+            goto bye;
+        }
+
+        /* store the be in the pb */
+        be = inst->inst_be;
+        slapi_pblock_set(pb, SLAPI_BACKEND, be);
+    } else {
+        slapi_pblock_get(pb, SLAPI_BACKEND, &be);
+        if (!be) {
+            slapi_task_log_notice(task, "No backend for: %s", instance_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif", "No backend for: %s\n", instance_name);
+            return_value = -1;
+            goto bye;
+        }
+        inst = (ldbm_instance *)be->be_instance_info;
+        if (!inst) {
+            slapi_task_log_notice(task, "Unknown backend instance: %s",instance_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif", "Unknown backend instance: %s\n",instance_name);
+            return_value = -1;
+            goto bye;
+        }
+
+        /* check if an import/restore is already ongoing... */
+        if (instance_set_busy(inst) != 0) {
+            slapi_task_log_notice(task,
+                    "Backend instance '%s' is already in the middle of another task and cannot be disturbed.",
+                    inst->inst_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif", "Backend instance '%s' is already in the middle"
+                                                                " of another task and cannot be disturbed.\n",
+                          inst->inst_name);
+            return_value = LDBM2LDIF_BUSY;
+            goto bye;
+        }
+    }
+
+    bdb_back_fetch_incl_excl(pb, &include_suffix, &exclude_suffix);
+
+    str2entry_options = (dump_replica ? 0 : SLAPI_STR2ENTRY_TOMBSTONE_CHECK);
+
+    slapi_pblock_get(pb, SLAPI_DB2LDIF_FILE, &fname);
+    slapi_pblock_get(pb, SLAPI_DB2LDIF_PRINTKEY, &printkey);
+    slapi_pblock_get(pb, SLAPI_DB2LDIF_DUMP_UNIQUEID, &dump_uniqueid);
+
+    /* tsk, overloading printkey.  shame on me. */
+    ok_index = !(printkey & EXPORT_ID2ENTRY_ONLY);
+    printkey &= ~EXPORT_ID2ENTRY_ONLY;
+
+    nobase64 = (printkey & EXPORT_MINIMAL_ENCODING);
+    printkey &= ~EXPORT_MINIMAL_ENCODING;
+    nowrap = (printkey & EXPORT_NOWRAP);
+    printkey &= ~EXPORT_NOWRAP;
+    appendmode = (printkey & EXPORT_APPENDMODE);
+    printkey &= ~EXPORT_APPENDMODE;
+    appendmode_1 = (printkey & EXPORT_APPENDMODE_1);
+    printkey &= ~EXPORT_APPENDMODE_1;
+    noversion = (printkey & EXPORT_NOVERSION);
+    printkey &= ~EXPORT_NOVERSION;
+
+    /* decide whether to dump uniqueid */
+    if (dump_uniqueid)
+        options |= SLAPI_DUMP_UNIQUEID;
+    if (nowrap)
+        options |= SLAPI_DUMP_NOWRAP;
+    if (nobase64)
+        options |= SLAPI_DUMP_MINIMAL_ENCODING;
+    if (dump_replica)
+        options |= SLAPI_DUMP_STATEINFO;
+
+    if (fname == NULL) {
+        slapi_task_log_notice(task, "%s: no LDIF filename supplied.", inst->inst_name);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif", "db2ldif: no LDIF filename supplied\n");
+        return_value = -1;
+        goto bye;
+    }
+
+    if (strcmp(fname, "-")) { /* not '-' */
+        if (appendmode) {
+            if (appendmode_1) {
+                fd = bdb_open_huge_file(fname, O_WRONLY | O_CREAT | O_TRUNC,
+                                            SLAPD_DEFAULT_FILE_MODE);
+            } else {
+                fd = bdb_open_huge_file(fname, O_WRONLY | O_CREAT | O_APPEND,
+                                            SLAPD_DEFAULT_FILE_MODE);
+            }
+        } else {
+            /* open it */
+            fd = bdb_open_huge_file(fname, O_WRONLY | O_CREAT | O_TRUNC,
+                                        SLAPD_DEFAULT_FILE_MODE);
+        }
+        if (fd < 0) {
+            slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+            slapi_task_log_notice(task,
+                    "Backend %s: can't open %s: %d (%s) while running as user \"%s\"",
+                    inst->inst_name, fname, errno, dblayer_strerror(errno), slapdFrontendConfig->localuserinfo->pw_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif",
+                    "db2ldif: %s: can't open %s: %d (%s) while running as user \"%s\"\n",
+                    inst->inst_name, fname, errno, dblayer_strerror(errno), slapdFrontendConfig->localuserinfo->pw_name);
+            return_value = -1;
+            goto bye;
+        }
+    } else { /* '-' */
+        fd = STDOUT_FILENO;
+    }
+
+    if (we_start_the_backends) {
+        if (0 != bdb_start(li, DBLAYER_EXPORT_MODE)) {
+            slapi_task_log_notice(task, "Failed to init database for: %s", inst->inst_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif",
+                    "db2ldif: Failed to init database: %s\n",
+                    inst->inst_name);
+            return_value = -1;
+            goto bye;
+        }
+        /* bdb_instance_start will init the id2entry index. */
+        if (0 != bdb_instance_start(be, DBLAYER_EXPORT_MODE)) {
+            slapi_task_log_notice(task, "Failed to start database instance: %s", inst->inst_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif",
+                    "db2ldif: Failed to start database instance: %s\n",
+                    inst->inst_name);
+            return_value = -1;
+            goto bye;
+        }
+    }
+
+    /* idl manipulation requires nextid to be init'd now */
+    if (include_suffix && ok_index)
+        get_ids_from_disk(be);
+
+    if (((dblayer_get_id2entry(be, &db)) != 0) || (db == NULL)) {
+        slapi_task_log_notice(task,
+                "Backend instance '%s' Unable to open/create database(id2entry)",
+                inst->inst_name);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif",
+                "Could not open/create id2entry for: %s\n",
+                inst->inst_name);
+        return_value = -1;
+        goto bye;
+    }
+
+    /* if an include_suffix was given (and we're pretty sure the
+     * entrydn and ancestorid indexes are valid), we try to
+     * assemble an id-list of candidates instead of plowing thru
+     * the whole database.  this is a big performance improvement
+     * when exporting config info (which is usually on the order
+     * of 100 entries) from a database that may be on the order of
+     * GIGS in size.
+     */
+    {
+        /* Here, we assume that the table is ordered in EID-order,
+         * which it is !
+         */
+        /* get a cursor to we can walk over the table */
+        return_value = db->cursor(db, NULL, &dbc, 0);
+        if (0 != return_value || NULL == dbc) {
+            slapi_task_log_notice(task,
+                    "Backend instance '%s' Failed to get database cursor: %s (%d)",
+                    inst->inst_name, dblayer_strerror(return_value), return_value);
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_db2ldif", "Backend instance '%s'  Failed to get cursor for db2ldif: %s (%d)\n",
+                          inst->inst_name, dblayer_strerror(return_value), return_value);
+            return_value = -1;
+            goto bye;
+        }
+        key.flags = DB_DBT_MALLOC;
+        data.flags = DB_DBT_MALLOC;
+        return_value = dbc->c_get(dbc, &key, &data, DB_LAST);
+        if (0 != return_value) {
+            keepgoing = 0;
+        } else {
+            lastid = id_stored_to_internal((char *)key.data);
+            slapi_ch_free(&(key.data));
+            slapi_ch_free(&(data.data));
+            isfirst = 1;
+        }
+    }
+    if (include_suffix && ok_index && !dump_replica) {
+        int err;
+
+        idl = bdb_fetch_subtrees(be, include_suffix, &err);
+        if (NULL == idl) {
+            if (err) {
+                /* most likely, indexes are bad. */
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_db2ldif", "Backend %s: Failed to fetch subtree lists (error %d) %s\n",
+                              inst->inst_name, err, dblayer_strerror(err));
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_db2ldif", "Possibly the entrydn/entryrdn or ancestorid index is "
+                                                     "corrupted or does not exist.\n");
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_db2ldif", "Attempting direct unindexed export instead.\n");
+            }
+            ok_index = 0;
+            idl = NULL;
+        } else if (ALLIDS(idl)) {
+            /* allids list is no help at all -- revert to trawling
+             * the whole list. */
+            ok_index = 0;
+            idl_free(&idl);
+        }
+        idindex = 0;
+    }
+
+    /* When user has specifically asked not to print the version
+     * or when this is not the first backend that is append into
+     * this file : don't print the version
+     */
+    if ((!noversion) && ((!appendmode) || (appendmode_1))) {
+        char vstr[64];
+        int myversion = 1; /* XXX: ldif version;
+                 * needs to be modified when version
+                 * control begins.
+                 */
+
+        sprintf(vstr, "version: %d\n\n", myversion);
+        rc = write(fd, vstr, strlen(vstr));
+        PR_ASSERT(rc > 0);
+        rc = 0;
+    }
+
+    eargs.decrypt = decrypt;
+    eargs.options = options;
+    eargs.printkey = printkey;
+    eargs.idl = idl;
+    eargs.lastid = lastid;
+    eargs.fd = fd;
+    eargs.task = task;
+    eargs.include_suffix = include_suffix;
+    eargs.exclude_suffix = exclude_suffix;
+
+    while (keepgoing) {
+        /*
+         * All database operations in a transactional environment,
+         * including non-transactional reads can receive a return of
+         * DB_LOCK_DEADLOCK. Which operation gets aborted depends
+         * on the deadlock detection policy, but can include
+         * non-transactional reads (in which case the single
+         * operation should just be retried).
+         */
+
+        if (idl) {
+            /* exporting from an ID list */
+            if (idindex >= idl->b_nids)
+                break;
+            id_internal_to_stored(idl->b_ids[idindex], (char *)&temp_id);
+            key.data = (char *)&temp_id;
+            key.size = sizeof(temp_id);
+            data.flags = DB_DBT_MALLOC;
+
+            for (retry = 0; retry < RETRY_TIMES; retry++) {
+                return_value = db->get(db, NULL, &key, &data, 0);
+                if (return_value != DB_LOCK_DEADLOCK)
+                    break;
+            }
+            if (return_value) {
+                slapi_task_log_notice(task, "Backend %s: Failed to read entry %lu, err %d\n",
+                        inst->inst_name, (u_long)idl->b_ids[idindex], return_value);
+                slapi_log_err(SLAPI_LOG_ERR, "bdb_db2ldif",
+                        "db2ldif: Backend %s: failed to read entry %lu, err %d\n",
+                        inst->inst_name, (u_long)idl->b_ids[idindex],
+                        return_value);
+                return_value = -1;
+                break;
+            }
+            /* back to internal format: */
+            temp_id = idl->b_ids[idindex];
+            idindex++;
+        } else {
+            /* follow the cursor */
+            key.flags = DB_DBT_MALLOC;
+            data.flags = DB_DBT_MALLOC;
+            if (isfirst) {
+                for (retry = 0; retry < RETRY_TIMES; retry++) {
+                    return_value = dbc->c_get(dbc, &key, &data, DB_FIRST);
+                    if (return_value != DB_LOCK_DEADLOCK)
+                        break;
+                }
+                isfirst = 0;
+            } else {
+                for (retry = 0; retry < RETRY_TIMES; retry++) {
+                    return_value = dbc->c_get(dbc, &key, &data, DB_NEXT);
+                    if (return_value != DB_LOCK_DEADLOCK)
+                        break;
+                }
+            }
+
+            if (DB_NOTFOUND == return_value) {
+                /* reached the end of the database,
+                 * check if ruv is pending and write it
+                 */
+                if (pending_ruv) {
+                    eargs.ep = pending_ruv;
+                    eargs.idindex = idindex;
+                    eargs.cnt = &cnt;
+                    eargs.lastcnt = &lastcnt;
+                    rc = export_one_entry(li, inst, &eargs);
+                    backentry_free(&pending_ruv);
+                }
+                break;
+            }
+
+            if (0 != return_value) {
+                /* error reading database */
+                 break;
+            }
+
+            /* back to internal format */
+            temp_id = id_stored_to_internal((char *)key.data);
+            slapi_ch_free(&(key.data));
+        }
+        if (idl_id_is_in_idlist(eargs.pre_exported_idl, temp_id)) {
+            /* it's already exported */
+            slapi_ch_free(&(data.data));
+            continue;
+        }
+
+        /* call post-entry plugin */
+        plugin_call_entryfetch_plugins((char **)&data.dptr, &data.dsize);
+
+        ep = backentry_alloc();
+        if (entryrdn_get_switch()) {
+            char *rdn = NULL;
+
+            /* rdn is allocated in get_value_from_string */
+            rc = get_value_from_string((const char *)data.dptr, "rdn", &rdn);
+            if (rc) {
+                /* data.dptr may not include rdn: ..., try "dn: ..." */
+                ep->ep_entry = slapi_str2entry(data.dptr,
+                                               str2entry_options | SLAPI_STR2ENTRY_NO_ENTRYDN);
+            } else {
+                char *pid_str = NULL;
+                char *pdn = NULL;
+                ID pid = NOID;
+                char *dn = NULL;
+                struct backdn *bdn = NULL;
+                Slapi_RDN psrdn = {0};
+
+                /* get a parent pid */
+                rc = get_value_from_string((const char *)data.dptr,
+                                           LDBM_PARENTID_STR, &pid_str);
+                if (rc) {
+                    /* this could be a suffix or the RUV entry.
+                     * If it is the ruv and the suffix is not written
+                     * keep the ruv and export as last entry.
+                     *
+                     * The reason for this is that if the RUV entry is in the
+                     * ldif before the suffix entry then at an attempt to import
+                     * that ldif the RUV entry would be skipped because the parent
+                     * does not exist. Later a new RUV would be generated with
+                     * a different database generation and replication is broken
+                     */
+                    if (suffix_written) {
+                        /* this must be the RUV, just continue and write it */
+                        rc = 0;
+                    } else if (0 == strcasecmp(rdn, RUVRDN)) {
+                        /* this is the RUV and the suffix is not yet written
+                         * make it pending and continue with next entry
+                         */
+                        skip_ruv = 1;
+                        rc = 0;
+                    } else {
+                        /* this has to be the suffix */
+                        suffix_written = 1;
+                        rc = 0;
+                    }
+                } else {
+                    pid = (ID)strtol(pid_str, (char **)NULL, 10);
+                    slapi_ch_free_string(&pid_str);
+                    /* if pid is larger than the current pid temp_id,
+                     * the parent entry has to be exported first. */
+                    if (temp_id < pid &&
+                        !idl_id_is_in_idlist(eargs.pre_exported_idl, pid)) {
+
+                        eargs.idindex = idindex;
+                        eargs.cnt = &cnt;
+                        eargs.lastcnt = &lastcnt;
+
+                        rc = _export_or_index_parents(inst, db, NULL, temp_id,
+                                                      rdn, temp_id, pid, run_from_cmdline,
+                                                      &eargs, DB2LDIF_ENTRYRDN, &psrdn);
+                        if (rc) {
+                            slapi_rdn_done(&psrdn);
+                            backentry_free(&ep);
+                            continue;
+                        }
+                    }
+                }
+
+                bdn = dncache_find_id(&inst->inst_dncache, temp_id);
+                if (bdn) {
+                    /* don't free dn */
+                    dn = (char *)slapi_sdn_get_dn(bdn->dn_sdn);
+                    CACHE_RETURN(&inst->inst_dncache, &bdn);
+                    slapi_rdn_done(&psrdn);
+                } else {
+                    int myrc = 0;
+                    Slapi_DN *sdn = NULL;
+                    rc = entryrdn_lookup_dn(be, rdn, temp_id, &dn, NULL, NULL);
+                    if (rc) {
+                        /* We cannot use the entryrdn index;
+                         * Compose dn from the entries in id2entry */
+                        slapi_log_err(SLAPI_LOG_TRACE,
+                                      "bdb_db2ldif", "entryrdn is not available; "
+                                                             "composing dn (rdn: %s, ID: %d)\n",
+                                      rdn, temp_id);
+                        if (NOID != pid) { /* if not a suffix */
+                            if (NULL == slapi_rdn_get_rdn(&psrdn)) {
+                                /* This time just to get the parents' rdn
+                                 * most likely from dn cache. */
+                                rc = _get_and_add_parent_rdns(be, db, NULL, pid,
+                                                              &psrdn, NULL, 0,
+                                                              run_from_cmdline, NULL);
+                                if (rc) {
+                                    slapi_log_err(SLAPI_LOG_WARNING,
+                                                  "bdb_db2ldif", "Skip ID %d\n", pid);
+                                    slapi_ch_free_string(&rdn);
+                                    slapi_rdn_done(&psrdn);
+                                    backentry_free(&ep);
+                                    continue;
+                                }
+                            }
+                            /* Generate DN string from Slapi_RDN */
+                            rc = slapi_rdn_get_dn(&psrdn, &pdn);
+                            if (rc) {
+                                slapi_log_err(SLAPI_LOG_WARNING,
+                                              "bdb_db2ldif", "Failed to compose dn for "
+                                                                     "(rdn: %s, ID: %d) from Slapi_RDN\n",
+                                              rdn, temp_id);
+                                slapi_ch_free_string(&rdn);
+                                slapi_rdn_done(&psrdn);
+                                backentry_free(&ep);
+                                continue;
+                            }
+                        }
+                        dn = slapi_ch_smprintf("%s%s%s",
+                                               rdn, pdn ? "," : "", pdn ? pdn : "");
+                        slapi_ch_free_string(&pdn);
+                    }
+                    slapi_rdn_done(&psrdn);
+                    /* dn is not dup'ed in slapi_sdn_new_dn_passin.
+                     * It's set to bdn and put in the dn cache. */
+                    /* don't free dn */
+                    sdn = slapi_sdn_new_dn_passin(dn);
+                    bdn = backdn_init(sdn, temp_id, 0);
+                    myrc = CACHE_ADD(&inst->inst_dncache, bdn, NULL);
+                    if (myrc) {
+                        backdn_free(&bdn);
+                        slapi_log_err(SLAPI_LOG_CACHE, "bdb_db2ldif",
+                                      "%s is already in the dn cache (%d)\n",
+                                      dn, myrc);
+                    } else {
+                        CACHE_RETURN(&inst->inst_dncache, &bdn);
+                        slapi_log_err(SLAPI_LOG_CACHE, "bdb_db2ldif",
+                                      "entryrdn_lookup_dn returned: %s, "
+                                      "and set to dn cache\n",
+                                      dn);
+                    }
+                }
+                ep->ep_entry = slapi_str2entry_ext(dn, NULL, data.dptr,
+                                                   str2entry_options | SLAPI_STR2ENTRY_NO_ENTRYDN);
+                slapi_ch_free_string(&rdn);
+            }
+        } else {
+            ep->ep_entry = slapi_str2entry(data.dptr, str2entry_options);
+        }
+        slapi_ch_free(&(data.data));
+
+        if ((ep->ep_entry) != NULL) {
+            ep->ep_id = temp_id;
+        } else {
+            slapi_log_err(SLAPI_LOG_WARNING, "bdb_db2ldif", "Skipping "
+                                                                    "badly formatted entry with id %lu\n",
+                          (u_long)temp_id);
+            backentry_free(&ep);
+            continue;
+        }
+
+        if (skip_ruv) {
+            /* now we keep a copy of the ruv entry
+             * and continue with the next entry
+             */
+            pending_ruv = ep;
+            ep = NULL;
+            skip_ruv = 0;
+            continue;
+        }
+
+        eargs.ep = ep;
+        eargs.idindex = idindex;
+        eargs.cnt = &cnt;
+        eargs.lastcnt = &lastcnt;
+        rc = export_one_entry(li, inst, &eargs);
+        backentry_free(&ep);
+    }
+    /* DB_NOTFOUND -> successful end */
+    if (return_value == DB_NOTFOUND)
+        return_value = 0;
+
+    /* done cycling thru entries to write */
+    if (lastcnt != cnt) {
+        if (task) {
+            slapi_task_log_status(task,
+                                  "%s: Processed %d entries (100%%).",
+                                  inst->inst_name, cnt);
+            slapi_task_log_notice(task,
+                                  "%s: Processed %d entries (100%%).",
+                                  inst->inst_name, cnt);
+        }
+        slapi_log_err(SLAPI_LOG_INFO, "bdb_db2ldif",
+                      "export %s: Processed %d entries (100%%).\n",
+                      inst->inst_name, cnt);
+    }
+bye:
+    if (idl) {
+        idl_free(&idl);
+    }
+    if (dbc) {
+        dbc->c_close(dbc);
+    }
+
+    dblayer_release_id2entry(be, db);
+
+    if (fd > STDERR_FILENO) {
+        close(fd);
+    }
+
+    slapi_log_err(SLAPI_LOG_TRACE, "bdb_db2ldif", "<=\n");
+
+    if (we_start_the_backends && NULL != li) {
+        if (0 != dblayer_close(li, DBLAYER_EXPORT_MODE)) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_db2ldif", "db2ldif: Failed to close database\n");
+        }
+    }
+
+    if (!run_from_cmdline && inst && (LDBM2LDIF_BUSY != return_value)) {
+        instance_set_not_busy(inst);
+    }
+
+    bdb_back_free_incl_excl(include_suffix, exclude_suffix);
+    idl_free(&(eargs.pre_exported_idl));
+
+    return (return_value);
+}
+
+static void
+bdb2index_bad_vlv(Slapi_Task *task, ldbm_instance *inst, char *index)
+{
+    char *text = vlv_getindexnames(inst->inst_be);
+
+    if (task) {
+        slapi_task_log_status(task, "%s: Unknown VLV index '%s'",
+                              inst->inst_name, index);
+        slapi_task_log_notice(task, "%s: Unknown VLV index '%s'",
+                              inst->inst_name, index);
+        slapi_task_log_notice(task, "%s: Known VLV indexes are: %s",
+                              inst->inst_name, text);
+    }
+    slapi_log_err(SLAPI_LOG_ERR,
+                  "bdb2index_bad_vlv", "Unknown VLV Index named '%s'\n", index);
+    slapi_log_err(SLAPI_LOG_ERR,
+                  "bdb2index_bad_vlv", "Known VLV Indexes are: %s\n", text);
+    slapi_ch_free_string(&text);
+}
+
+int
+bdb_db2index(Slapi_PBlock *pb)
+{
+    char *instance_name;
+    struct ldbminfo *li;
+    int task_flags, run_from_cmdline;
+    ldbm_instance *inst;
+    backend *be;
+    DB *db = NULL; /* DB handle for id2entry */
+    DBC *dbc = NULL;
+    char **indexAttrs = NULL;
+    struct vlvIndex **pvlv = NULL;
+    DBT key = {0};
+    DBT data = {0};
+    IDList *idl = NULL; /* optimization for vlv index creation */
+    int numvlv = 0;
+    int return_value = -1;
+    int rc = -1;
+    ID temp_id;
+    int i, j, vlvidx;
+    ID lastid;
+    struct backentry *ep = NULL;
+    char *type;
+    NIDS idindex = 0;
+    int count = 0;
+    Slapi_Attr *attr;
+    Slapi_Task *task;
+    int isfirst = 1;
+    int index_ext = 0;
+    struct vlvIndex *vlvip = NULL;
+    back_txn txn;
+    ID suffixid = NOID; /* holds the id of the suffix entry */
+    Slapi_Value **nstombstone_vals = NULL;
+    int istombstone = 0;
+
+    slapi_log_err(SLAPI_LOG_TRACE, "bdb_db2index", "=>\n");
+    if (g_get_shutdown() || c_get_shutdown()) {
+        return return_value;
+    }
+
+    slapi_pblock_get(pb, SLAPI_BACKEND_INSTANCE_NAME, &instance_name);
+    slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
+    slapi_pblock_get(pb, SLAPI_TASK_FLAGS, &task_flags);
+    run_from_cmdline = (task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE);
+    slapi_pblock_get(pb, SLAPI_BACKEND_TASK, &task);
+
+    inst = ldbm_instance_find_by_name(li, instance_name);
+    if (NULL == inst) {
+        slapi_task_log_notice(task, "Unknown ldbm instance %s", instance_name);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_db2index", "Unknown ldbm instance %s\n",
+                      instance_name);
+        return return_value;
+    }
+    be = inst->inst_be;
+    slapi_pblock_set(pb, SLAPI_BACKEND, be);
+
+    /* would love to be able to turn off transactions here, but i don't
+     * think it's in the cards...
+     */
+    if (run_from_cmdline) {
+        /* Turn off transactions */
+        ldbm_config_internal_set(li, CONFIG_DB_TRANSACTION_LOGGING, "off");
+
+        if (0 != dblayer_start(li, DBLAYER_INDEX_MODE)) {
+            slapi_task_log_notice(task, "Failed to init database: %s", instance_name);
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "ldbm2index", "Failed to init database: %s\n", instance_name);
+            return return_value;
+        }
+
+        /* dblayer_instance_start will init the id2entry index. */
+        if (0 != dblayer_instance_start(be, DBLAYER_INDEX_MODE)) {
+            slapi_task_log_notice(task, "Failed to start instance: %s", instance_name);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_db2index", "db2ldif: Failed to start instance\n");
+            return return_value;
+        }
+
+        /* Initialise the Virtual List View code */
+        vlv_init(inst);
+    }
+
+    /* make sure no other tasks are going, and set the backend readonly */
+    if (instance_set_busy_and_readonly(inst) != 0) {
+        slapi_task_log_notice(task,
+                "%s: is already in the middle of another task and cannot be disturbed.",
+                inst->inst_name);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_db2index", "ldbm: '%s' is already in the middle of "
+                                                             "another task and cannot be disturbed.\n",
+                      inst->inst_name);
+        return return_value;
+    }
+
+    if (((dblayer_get_id2entry(be, &db)) != 0) || (db == NULL)) {
+        slapi_task_log_notice(task,
+                "%s: Could not open/create database (id2entry)",
+                inst->inst_name);
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_db2index", "Could not open/create database(id2entry)\n");
+        goto err_min;
+    }
+
+    /* get a cursor to we can walk over the table */
+    rc = db->cursor(db, NULL, &dbc, 0);
+    if (0 != rc) {
+        slapi_task_log_notice(task,
+                "%s: Failed to get database cursor for ldbm2index",
+                inst->inst_name);
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_db2index", "%s: Failed to get cursor for ldbm2index\n",
+                      inst->inst_name);
+        goto err_min;
+    }
+
+    /* ask for the last id so we can give cute percentages */
+    key.flags = DB_DBT_MALLOC;
+    data.flags = DB_DBT_MALLOC;
+    rc = dbc->c_get(dbc, &key, &data, DB_LAST);
+    if (rc == DB_NOTFOUND) {
+        lastid = 0;
+        isfirst = 0; /* neither a first nor a last */
+    } else if (rc == 0) {
+        lastid = id_stored_to_internal((char *)key.data);
+        slapi_ch_free(&(key.data));
+        slapi_ch_free(&(data.data));
+        isfirst = 1;
+    } else {
+        slapi_task_log_notice(task, "%s: Failed to seek within id2entry (BAD %d)",
+                    inst->inst_name, return_value);
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "bdb_db2index", "%s: Failed to seek within id2entry (BAD %d)\n",
+                      inst->inst_name, return_value);
+        goto err_out;
+    }
+
+    /* Work out which indexes we should build */
+    /* explanation: for archaic reasons, the list of indexes is passed to
+     * ldif2index as a string list, where each string either starts with a
+     * 't' (normal index) or a 'T' (vlv index).
+     * example: "tcn" (normal index cn)
+     */
+    /* NOTE (LK): This part in determining the attrs to reindex belongs to the layer above
+     * the selection of attributes is independent of the backend implementation.
+     * butit requires a method to pass the selection to this lkower level indexing function
+     * either by extension of the pblock or the argument list
+     * TBD
+     */
+    {
+        char **attrs = NULL;
+        struct attrinfo *ai = NULL;
+
+        slapi_pblock_get(pb, SLAPI_DB2INDEX_ATTRS, &attrs);
+        for (i = 0; attrs && attrs[i]; i++) {
+            if (g_get_shutdown() || c_get_shutdown()) {
+                goto err_out;
+            }
+            switch (attrs[i][0]) {
+            case 't': /* attribute type to index */
+                db2index_add_indexed_attr(be, attrs[i]);
+                ainfo_get(be, attrs[i] + 1, &ai);
+                /* the ai was added above, if it didn't already exist */
+                PR_ASSERT(ai != NULL);
+                if (strcasecmp(attrs[i] + 1, LDBM_ANCESTORID_STR) == 0) {
+                    slapi_task_log_notice(task, "%s: Indexing %s",
+                        inst->inst_name, LDBM_ANCESTORID_STR);
+                    slapi_log_err(SLAPI_LOG_INFO, "bdb_db2index", "%s: Indexing %s\n",
+                                  inst->inst_name, LDBM_ANCESTORID_STR);
+                    index_ext |= DB2INDEX_ANCESTORID;
+                } else if (strcasecmp(attrs[i] + 1, LDBM_ENTRYRDN_STR) == 0) {
+                    if (entryrdn_get_switch()) { /* subtree-rename: on */
+                        slapi_task_log_notice(task, "%s: Indexing %s",
+                             inst->inst_name, LDBM_ENTRYRDN_STR);
+                        slapi_log_err(SLAPI_LOG_INFO, "bdb_db2index", "%s: Indexing %s\n",
+                                      inst->inst_name, LDBM_ENTRYRDN_STR);
+                        index_ext |= DB2INDEX_ENTRYRDN;
+                    } else {
+                        slapi_task_log_notice(task, "%s: Requested to index %s, but %s is off",
+                                inst->inst_name, LDBM_ENTRYRDN_STR, CONFIG_ENTRYRDN_SWITCH);
+                        slapi_log_err(SLAPI_LOG_WARNING,
+                                      "bdb_db2index", "%s: Requested to index %s, but %s is off\n",
+                                      inst->inst_name, LDBM_ENTRYRDN_STR,
+                                      CONFIG_ENTRYRDN_SWITCH);
+                        goto err_out;
+                    }
+                } else if (strcasecmp(attrs[i] + 1, LDBM_ENTRYDN_STR) == 0) {
+                    if (entryrdn_get_switch()) { /* subtree-rename: on */
+                        slapi_task_log_notice(task, "%s: Requested to index %s, but %s is on",
+                                inst->inst_name, LDBM_ENTRYDN_STR, CONFIG_ENTRYRDN_SWITCH);
+                        slapi_log_err(SLAPI_LOG_WARNING,
+                                      "bdb_db2index", "%s: Requested to index %s, but %s is on\n",
+                                      inst->inst_name, LDBM_ENTRYDN_STR,
+                                      CONFIG_ENTRYRDN_SWITCH);
+                        goto err_out;
+                    } else {
+                        charray_add(&indexAttrs, attrs[i] + 1);
+                        ai->ai_indexmask |= INDEX_OFFLINE;
+                        slapi_task_log_notice(task, "%s: Indexing attribute: %s",
+                                inst->inst_name, attrs[i] + 1);
+                        slapi_log_err(SLAPI_LOG_INFO,
+                                      "bdb_db2index", "%s: Indexing attribute: %s\n",
+                                      inst->inst_name, attrs[i] + 1);
+                    }
+                } else {
+                    if (strcasecmp(attrs[i] + 1, SLAPI_ATTR_OBJECTCLASS) == 0) {
+                        index_ext |= DB2INDEX_OBJECTCLASS;
+                    }
+                    charray_add(&indexAttrs, attrs[i] + 1);
+                    ai->ai_indexmask |= INDEX_OFFLINE;
+                    slapi_task_log_notice(task, "%s: Indexing attribute: %s",
+                            inst->inst_name, attrs[i] + 1);
+                    slapi_log_err(SLAPI_LOG_INFO,
+                                  "bdb_db2index", "%s: Indexing attribute: %s\n",
+                                  inst->inst_name, attrs[i] + 1);
+                }
+                dblayer_erase_index_file(be, ai, PR_TRUE, i /* chkpt; 1st time only */);
+                break;
+            case 'T': /* VLV Search to index */
+                vlvip = vlv_find_searchname((attrs[i]) + 1, be);
+                if (vlvip == NULL) {
+                    bdb2index_bad_vlv(task, inst, attrs[i] + 1);
+                } else {
+                    vlvIndex_go_offline(vlvip, be);
+                    if (pvlv == NULL) {
+                        pvlv = (struct vlvIndex **)slapi_ch_calloc(1,
+                                                                   sizeof(struct vlvIndex *));
+                    } else {
+                        pvlv = (struct vlvIndex **)slapi_ch_realloc((char *)pvlv,
+                                                                    (numvlv + 1) * sizeof(struct vlvIndex *));
+                    }
+                    pvlv[numvlv] = vlvip;
+                    numvlv++;
+                    /* Get rid of the index if it already exists */
+                    PR_Delete(vlvIndex_filename(vlvip));
+                    slapi_task_log_notice(task, "%s: Indexing VLV: %s", inst->inst_name, attrs[i] + 1);
+                    slapi_log_err(SLAPI_LOG_INFO, "bdb_db2index", "%s: Indexing VLV: %s\n",
+                                  inst->inst_name, attrs[i] + 1);
+                }
+                break;
+            }
+        }
+    }
+
+    /* if we're only doing vlv indexes, we can accomplish this with an
+     * idl composed from the ancestorid list, instead of traversing the
+     * entire database.
+     */
+    if (!indexAttrs && !LDIF2LDBM_EXTBITS(index_ext) && pvlv) {
+        int err;
+        char **suffix_list = NULL;
+
+        /* create suffix list */
+        for (vlvidx = 0; vlvidx < numvlv; vlvidx++) {
+            char *s =
+                slapi_ch_strdup(slapi_sdn_get_ndn(vlvIndex_getBase(pvlv[vlvidx])));
+            /* 's' is passed in */
+            charray_add(&suffix_list, s);
+        }
+        idl = bdb_fetch_subtrees(be, suffix_list, &err);
+        charray_free(suffix_list);
+        if (!idl) {
+            /* most likely, indexes are bad if err is set. */
+            if (0 != err) {
+                slapi_task_log_notice(task,
+                          "%s: WARNING: Failed to fetch subtree lists (err %d) -- "
+                          "attempting brute-force method instead.",
+                          inst->inst_name, err);
+                slapi_log_err(SLAPI_LOG_WARNING,
+                              "bdb_db2index", "%s: Failed to fetch subtree lists: (%d) %s\n",
+                              inst->inst_name, err, dblayer_strerror(err));
+                slapi_log_err(SLAPI_LOG_WARNING,
+                              "bdb_db2index", "%s: Possibly the entrydn/entryrdn or ancestorid index "
+                                                      "is corrupted or does not exist.\n",
+                              inst->inst_name);
+                slapi_log_err(SLAPI_LOG_WARNING, "bdb_db2index",
+                              "%s: Attempting brute-force method instead.\n",
+                              inst->inst_name);
+            }
+        } else if (ALLIDS(idl)) {
+            /* that's no help. */
+            idl_free(&idl);
+        }
+    }
+
+    if (idl) {
+        /* don't need that cursor, we have a shopping list. */
+        dbc->c_close(dbc);
+    }
+
+    dblayer_txn_init(li, &txn);
+
+    while (1) {
+        if (g_get_shutdown() || c_get_shutdown()) {
+            goto err_out;
+        }
+        if (idl) {
+            if (idindex >= idl->b_nids)
+                break;
+            id_internal_to_stored(idl->b_ids[idindex], (char *)&temp_id);
+            key.data = (char *)&temp_id;
+            key.size = sizeof(temp_id);
+            data.flags = DB_DBT_MALLOC;
+
+            rc = db->get(db, NULL, &key, &data, 0);
+            if (rc) {
+                slapi_log_err(SLAPI_LOG_ERR, "bdb_db2index", "%s: Failed "
+                                                                     "to read database, errno=%d (%s)\n",
+                              inst->inst_name, rc, dblayer_strerror(rc));
+                slapi_task_log_notice(task, "%s: Failed to read database, err %d (%s)",
+                        inst->inst_name, rc, dblayer_strerror(rc));
+                break;
+            }
+            /* back to internal format: */
+            temp_id = idl->b_ids[idindex];
+        } else {
+            key.flags = DB_DBT_MALLOC;
+            data.flags = DB_DBT_MALLOC;
+            if (isfirst) {
+                rc = dbc->c_get(dbc, &key, &data, DB_FIRST);
+                isfirst = 0;
+            } else {
+                rc = dbc->c_get(dbc, &key, &data, DB_NEXT);
+            }
+
+            if (DB_NOTFOUND == rc) {
+                break;
+            } else if (0 != rc) {
+                slapi_log_err(SLAPI_LOG_ERR, "bdb_db2index",
+                        "%s: Failed to read database, errno=%d (%s)\n",
+                        inst->inst_name, rc, dblayer_strerror(rc));
+                slapi_task_log_notice(task, "%s: Failed to read database, err %d (%s)",
+                        inst->inst_name, rc, dblayer_strerror(rc));
+                break;
+            }
+            temp_id = id_stored_to_internal((char *)key.data);
+            slapi_ch_free(&(key.data));
+        }
+        idindex++;
+
+        /* call post-entry plugin */
+        plugin_call_entryfetch_plugins((char **)&data.dptr, &data.dsize);
+
+        ep = backentry_alloc();
+        if (entryrdn_get_switch()) {
+            char *rdn = NULL;
+            int rc = 0;
+
+            /* rdn is allocated in get_value_from_string */
+            rc = get_value_from_string((const char *)data.dptr, "rdn", &rdn);
+            if (rc) {
+                /* data.dptr may not include rdn: ..., try "dn: ..." */
+                ep->ep_entry = slapi_str2entry(data.dptr,
+                                               SLAPI_STR2ENTRY_NO_ENTRYDN);
+            } else {
+                char *pid_str = NULL;
+                char *pdn = NULL;
+                ID pid = NOID;
+                char *dn = NULL;
+                struct backdn *bdn = NULL;
+                Slapi_RDN psrdn = {0};
+
+                /* get a parent pid */
+                rc = get_value_from_string((const char *)data.dptr,
+                                           LDBM_PARENTID_STR, &pid_str);
+                if (rc || !pid_str) {
+                    /* see if this is a suffix or some entry without a parent id
+                       e.g. a tombstone entry */
+                    Slapi_DN sufdn;
+
+                    slapi_sdn_init_dn_byref(&sufdn, rdn);
+                    if (slapi_be_issuffix(be, &sufdn)) {
+                        rc = 0;             /* is a suffix */
+                        suffixid = temp_id; /* this is the ID of a suffix entry */
+                    } else {
+                        /* assume the parent entry is the suffix entry for this backend
+                           set pid to the id of that entry */
+                        pid = suffixid;
+                    }
+                    slapi_sdn_done(&sufdn);
+                }
+                if (pid_str) {
+                    pid = (ID)strtol(pid_str, (char **)NULL, 10);
+                    slapi_ch_free_string(&pid_str);
+                    /* if pid is larger than the current pid temp_id,
+                     * the parent entry has to be exported first. */
+                    if (temp_id < pid) {
+                        rc = _export_or_index_parents(inst, db, &txn, temp_id,
+                                                      rdn, temp_id, pid, run_from_cmdline,
+                                                      NULL, index_ext, &psrdn);
+                        if (rc) {
+                            backentry_free(&ep);
+                            continue;
+                        }
+                    }
+                }
+
+                bdn = dncache_find_id(&inst->inst_dncache, temp_id);
+                if (bdn) {
+                    /* don't free dn */
+                    dn = (char *)slapi_sdn_get_dn(bdn->dn_sdn);
+                    CACHE_RETURN(&inst->inst_dncache, &bdn);
+                } else {
+                    int myrc = 0;
+                    Slapi_DN *sdn = NULL;
+                    rc = entryrdn_lookup_dn(be, rdn, temp_id, &dn, NULL, NULL);
+                    if (rc) {
+                        /* We cannot use the entryrdn index;
+                         * Compose dn from the entries in id2entry */
+                        slapi_log_err(SLAPI_LOG_TRACE, "bdb_db2index",
+                                      "entryrdn is not available; "
+                                      "composing dn (rdn: %s, ID: %d)\n",
+                                      rdn, temp_id);
+                        if (NOID != pid) { /* if not a suffix */
+                            if (NULL == slapi_rdn_get_rdn(&psrdn)) {
+                                /* This time just to get the parents' rdn
+                                 * most likely from dn cache. */
+                                rc = _get_and_add_parent_rdns(be, db, &txn, pid,
+                                                              &psrdn, NULL, 0,
+                                                              run_from_cmdline, NULL);
+                                if (rc) {
+                                    slapi_log_err(SLAPI_LOG_WARNING,
+                                                  "bdb_db2index", "Skip ID %d\n", pid);
+                                    slapi_log_err(SLAPI_LOG_WARNING,
+                                                  "bdb_db2index", "Parent entry (ID %d) of entry. "
+                                                                          "(ID %d, rdn: %s) does not exist.\n",
+                                                  pid, temp_id, rdn);
+                                    slapi_log_err(SLAPI_LOG_WARNING,
+                                                  "bdb_db2index", "We recommend to export the backend "
+                                                                          "instance %s and reimport it.\n",
+                                                  instance_name);
+                                    slapi_ch_free_string(&rdn);
+                                    slapi_rdn_done(&psrdn);
+                                    backentry_free(&ep);
+                                    continue;
+                                }
+                            }
+                            /* Generate DN string from Slapi_RDN */
+                            rc = slapi_rdn_get_dn(&psrdn, &pdn);
+                            if (rc) {
+                                slapi_log_err(SLAPI_LOG_ERR,
+                                              "bdb_db2index", "Failed to compose dn for "
+                                                                      "(rdn: %s, ID: %d) from Slapi_RDN\n",
+                                              rdn, temp_id);
+                                slapi_ch_free_string(&rdn);
+                                slapi_rdn_done(&psrdn);
+                                backentry_free(&ep);
+                                continue;
+                            }
+                        }
+                        dn = slapi_ch_smprintf("%s%s%s",
+                                               rdn, pdn ? "," : "", pdn ? pdn : "");
+                        slapi_ch_free_string(&pdn);
+                    }
+                    /* dn is not dup'ed in slapi_sdn_new_dn_passin.
+                     * It's set to bdn and put in the dn cache. */
+                    /* don't free dn */
+                    sdn = slapi_sdn_new_dn_passin(dn);
+                    bdn = backdn_init(sdn, temp_id, 0);
+                    myrc = CACHE_ADD(&inst->inst_dncache, bdn, NULL);
+                    if (myrc) {
+                        backdn_free(&bdn);
+                        slapi_log_err(SLAPI_LOG_CACHE, "bdb_db2index",
+                                      "%s is already in the dn cache (%d)\n",
+                                      dn, myrc);
+                    } else {
+                        CACHE_RETURN(&inst->inst_dncache, &bdn);
+                        slapi_log_err(SLAPI_LOG_CACHE, "bdb_db2index",
+                                      "entryrdn_lookup_dn returned: %s, "
+                                      "and set to dn cache\n",
+                                      dn);
+                    }
+                }
+                slapi_rdn_done(&psrdn);
+                ep->ep_entry = slapi_str2entry_ext(dn, NULL, data.dptr,
+                                                   SLAPI_STR2ENTRY_NO_ENTRYDN);
+                slapi_ch_free_string(&rdn);
+            }
+        } else {
+            ep->ep_entry = slapi_str2entry(data.dptr, 0);
+        }
+        slapi_ch_free(&(data.data));
+
+        if (ep->ep_entry != NULL) {
+            ep->ep_id = temp_id;
+        } else {
+            slapi_task_log_notice(task, "%s: WARNING: skipping badly formatted entry (id %lu)",
+                    inst->inst_name, (u_long)temp_id);
+            slapi_log_err(SLAPI_LOG_WARNING,
+                          "bdb_db2index", "%s: Skipping badly formatted entry (id %lu)\n",
+                          inst->inst_name, (u_long)temp_id);
+            backentry_free(&ep);
+            continue;
+        }
+
+        /*
+         * If this entry is a tombstone, update the 'nstombstonecsn' index
+         */
+        if (ep->ep_entry->e_flags & SLAPI_ENTRY_FLAG_TOMBSTONE) {
+            const CSN *tombstone_csn = entry_get_deletion_csn(ep->ep_entry);
+            char deletion_csn_str[CSN_STRSIZE];
+
+            istombstone = 1;
+            if (!nstombstone_vals) {
+                nstombstone_vals = (Slapi_Value **)slapi_ch_calloc(2, sizeof(Slapi_Value *));
+                *nstombstone_vals = slapi_value_new_string(SLAPI_ATTR_VALUE_TOMBSTONE);
+            }
+            if (tombstone_csn) {
+                if (!run_from_cmdline) {
+                    rc = dblayer_txn_begin(be, NULL, &txn);
+                    if (0 != rc) {
+                        slapi_log_err(SLAPI_LOG_ERR, "bdb_db2index",
+                                      "%s: Failed to begin txn for update index '%s' (err %d: %s)\n",
+                                      inst->inst_name, SLAPI_ATTR_TOMBSTONE_CSN, rc, dblayer_strerror(rc));
+                        slapi_task_log_notice(task,
+                                "%s: ERROR: failed to begin txn for update index '%s' (err %d: %s)",
+                                inst->inst_name, SLAPI_ATTR_TOMBSTONE_CSN, rc, dblayer_strerror(rc));
+                        return_value = -2;
+                        goto err_out;
+                    }
+                }
+
+                csn_as_string(tombstone_csn, PR_FALSE, deletion_csn_str);
+                rc = index_addordel_string(be, SLAPI_ATTR_TOMBSTONE_CSN, deletion_csn_str,
+                                           ep->ep_id, BE_INDEX_ADD, &txn);
+                if (rc != 0) {
+                    slapi_log_err(SLAPI_LOG_ERR, "bdb_db2index",
+                                  "%s: Failed to update index '%s' (err %d: %s)\n",
+                                  inst->inst_name, SLAPI_ATTR_TOMBSTONE_CSN, rc, dblayer_strerror(rc));
+                    slapi_task_log_notice(task,
+                            "%s: ERROR: failed to update index '%s' (err %d: %s)",
+                            inst->inst_name, SLAPI_ATTR_TOMBSTONE_CSN, rc, dblayer_strerror(rc));
+                    if (!run_from_cmdline) {
+                        dblayer_txn_abort(be, &txn);
+                    }
+                    return_value = -2;
+                    goto err_out;
+                }
+                if (!run_from_cmdline) {
+                    rc = dblayer_txn_commit(be, &txn);
+                    if (0 != rc) {
+                        slapi_log_err(SLAPI_LOG_ERR, "bdb_db2index",
+                                      "%s: Failed to commit txn for update index '%s' (err %d: %s)\n",
+                                      inst->inst_name, SLAPI_ATTR_TOMBSTONE_CSN, rc, dblayer_strerror(rc));
+                        slapi_task_log_notice(task,
+                                "%s: ERROR: failed to commit txn for update index '%s' (err %d: %s)",
+                                inst->inst_name, SLAPI_ATTR_TOMBSTONE_CSN, rc, dblayer_strerror(rc));
+                        return_value = -2;
+                        goto err_out;
+                    }
+                }
+            }
+        } else {
+            istombstone = 0;
+        }
+
+        /*
+         * Update the attribute indexes
+         */
+        if (indexAttrs) {
+            if (istombstone && !(index_ext & (DB2INDEX_ENTRYRDN | DB2INDEX_OBJECTCLASS))) {
+                /* if it is a tombstone entry, just entryrdn or "objectclass: nstombstone"
+                 * need to be reindexed.  the to-be-indexed list does not contain them. */
+                backentry_free(&ep);
+                continue;
+            }
+            for (i = slapi_entry_first_attr(ep->ep_entry, &attr); i == 0;
+                 i = slapi_entry_next_attr(ep->ep_entry, attr, &attr)) {
+                Slapi_Value **svals;
+
+                slapi_attr_get_type(attr, &type);
+                for (j = 0; indexAttrs[j] != NULL; j++) {
+                    int is_tombstone_obj = 0;
+                    if (g_get_shutdown() || c_get_shutdown()) {
+                        goto err_out;
+                    }
+                    if (slapi_attr_type_cmp(indexAttrs[j], type, SLAPI_TYPE_CMP_SUBTYPE) == 0) {
+                        if (istombstone) {
+                            if (!slapi_attr_type_cmp(indexAttrs[j], SLAPI_ATTR_OBJECTCLASS, SLAPI_TYPE_CMP_SUBTYPE)) {
+                                is_tombstone_obj = 1; /* is tombstone && is objectclass. need to index "nstombstone"*/
+                            } else if (slapi_attr_type_cmp(indexAttrs[j], LDBM_ENTRYRDN_STR, SLAPI_TYPE_CMP_SUBTYPE)) {
+                                /* Entry is a tombstone && this index is not an entryrdn. */
+                                continue;
+                            }
+                        }
+                        svals = attr_get_present_values(attr);
+
+                        if (!run_from_cmdline) {
+                            rc = dblayer_txn_begin(be, NULL, &txn);
+                            if (0 != rc) {
+                                slapi_log_err(SLAPI_LOG_ERR,
+                                              "bdb_db2index", "%s: Failed to begin txn for update index '%s'\n",
+                                              inst->inst_name, indexAttrs[j]);
+                                slapi_log_err(SLAPI_LOG_ERR,
+                                              "bdb_db2index", "%s: Error %d: %s\n", inst->inst_name, rc,
+                                              dblayer_strerror(rc));
+                                slapi_task_log_notice(task,
+                                        "%s: ERROR: failed to begin txn for update index '%s' (err %d: %s)",
+                                        inst->inst_name, indexAttrs[j], rc, dblayer_strerror(rc));
+                                return_value = -2;
+                                goto err_out;
+                            }
+                        }
+                        if (is_tombstone_obj) {
+                            rc = index_addordel_values_sv(be, indexAttrs[j], nstombstone_vals, NULL, ep->ep_id, BE_INDEX_ADD, &txn);
+                        } else {
+                            rc = index_addordel_values_sv(be, indexAttrs[j], svals, NULL, ep->ep_id, BE_INDEX_ADD, &txn);
+                        }
+                        if (rc) {
+                            slapi_log_err(SLAPI_LOG_ERR,
+                                          "bdb_db2index", "%s: Failed to update index '%s'\n",
+                                          inst->inst_name, indexAttrs[j]);
+                            slapi_log_err(SLAPI_LOG_ERR,
+                                          "bdb_db2index", "%s: Error %d: %s\n", inst->inst_name, rc,
+                                          dblayer_strerror(rc));
+                            slapi_task_log_notice(task,
+                                    "%s: ERROR: failed to update index '%s' (err %d: %s)",
+                                    inst->inst_name, indexAttrs[j], rc, dblayer_strerror(rc));
+                            if (!run_from_cmdline) {
+                                dblayer_txn_abort(be, &txn);
+                            }
+                            return_value = -2;
+                            goto err_out;
+                        }
+                        if (!run_from_cmdline) {
+                            rc = dblayer_txn_commit(be, &txn);
+                            if (0 != rc) {
+                                slapi_log_err(SLAPI_LOG_ERR,
+                                              "bdb_db2index", "%s: Failed to commit txn for "
+                                                                      "update index '%s'\n",
+                                              inst->inst_name, indexAttrs[j]);
+                                slapi_log_err(SLAPI_LOG_ERR,
+                                              "bdb_db2index", "%s: Error %d: %s\n", inst->inst_name, rc,
+                                              dblayer_strerror(rc));
+                                slapi_task_log_notice(task,
+                                        "%s: ERROR: failed to commit txn for  update index '%s' (err %d: %s)",
+                                        inst->inst_name, indexAttrs[j], rc, dblayer_strerror(rc));
+                                return_value = -2;
+                                goto err_out;
+                            }
+                        }
+                    }
+                }
+            }
+        }
+
+        /*
+         * If it is NOT a tombstone entry, update the Virtual List View indexes.
+         */
+        for (vlvidx = 0; !istombstone && (vlvidx < numvlv); vlvidx++) {
+            char *ai = "Unknown index";
+
+            if (g_get_shutdown() || c_get_shutdown()) {
+                goto err_out;
+            }
+            if (indexAttrs && indexAttrs[vlvidx]) {
+                ai = indexAttrs[vlvidx];
+            }
+            if (!run_from_cmdline) {
+                rc = dblayer_txn_begin(be, NULL, &txn);
+                if (0 != rc) {
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "bdb_db2index", "%s: Failed to begin txn for update index '%s'\n",
+                                  inst->inst_name, ai);
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "bdb_db2index", "%s: Error %d: %s\n", inst->inst_name, rc,
+                                  dblayer_strerror(rc));
+                    slapi_task_log_notice(task,
+                            "%s: ERROR: failed to begin txn for update index '%s' (err %d: %s)",
+                            inst->inst_name, ai, rc, dblayer_strerror(rc));
+                    return_value = -2;
+                    goto err_out;
+                }
+            }
+            /*
+             * lock is needed around vlv_update_index to protect the
+             * vlv structure.
+             */
+            vlv_acquire_lock(be);
+            vlv_update_index(pvlv[vlvidx], &txn, li, pb, NULL, ep);
+            vlv_release_lock(be);
+            if (!run_from_cmdline) {
+                rc = dblayer_txn_commit(be, &txn);
+                if (0 != rc) {
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "bdb_db2index", "%s: Failed to commit txn for update index '%s'\n",
+                                  inst->inst_name, ai);
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "bdb_db2index", "%s: Error %d: %s\n", inst->inst_name, rc,
+                                  dblayer_strerror(rc));
+                    slapi_task_log_notice(task,
+                            "%s: ERROR: failed to commit txn for update index '%s' (err %d: %s)",
+                            inst->inst_name, ai, rc, dblayer_strerror(rc));
+                    return_value = -2;
+                    goto err_out;
+                }
+            }
+        }
+
+        /*
+         * Update the ancestorid and entryrdn index
+         */
+        if (!entryrdn_get_noancestorid() && (index_ext & DB2INDEX_ANCESTORID)) {
+            rc = ldbm_ancestorid_index_entry(be, ep, BE_INDEX_ADD, NULL);
+            if (rc != 0) {
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_db2index", "%s: Failed to update index 'ancestorid'\n",
+                              inst->inst_name);
+                slapi_log_err(SLAPI_LOG_ERR,
+                              "bdb_db2index", "%s: Error %d: %s\n", inst->inst_name, rc,
+                              dblayer_strerror(rc));
+                slapi_task_log_notice(task,
+                        "%s: ERROR: failed to update index 'ancestorid' (err %d: %s)",
+                        inst->inst_name, rc, dblayer_strerror(rc));
+                return_value = -2;
+                goto err_out;
+            }
+        }
+        if (index_ext & DB2INDEX_ENTRYRDN) {
+            if (entryrdn_get_switch()) { /* subtree-rename: on */
+                if (!run_from_cmdline) {
+                    rc = dblayer_txn_begin(be, NULL, &txn);
+                    if (0 != rc) {
+                        slapi_log_err(SLAPI_LOG_ERR,
+                                      "bdb_db2index", "%s: ERROR: failed to begin txn for update index 'entryrdn'\n",
+                                      inst->inst_name);
+                        slapi_log_err(SLAPI_LOG_ERR, "bdb_db2index", "%s: Error %d: %s\n",
+                                      inst->inst_name, rc, dblayer_strerror(rc));
+                        slapi_task_log_notice(task,
+                                "%s: ERROR: failed to begin txn for update index 'entryrdn' (err %d: %s)",
+                                inst->inst_name, rc, dblayer_strerror(rc));
+                        return_value = -2;
+                        goto err_out;
+                    }
+                }
+                rc = entryrdn_index_entry(be, ep, BE_INDEX_ADD, &txn);
+                if (rc) {
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "bdb_db2index", "%s: Failed to update index 'entryrdn'\n",
+                                  inst->inst_name);
+                    slapi_log_err(SLAPI_LOG_ERR,
+                                  "bdb_db2index", "%s: Error %d: %s\n", inst->inst_name, rc,
+                                  dblayer_strerror(rc));
+                    slapi_task_log_notice(task,
+                            "%s: ERROR: failed to update index 'entryrdn' (err %d: %s)",
+                            inst->inst_name, rc, dblayer_strerror(rc));
+                    if (!run_from_cmdline) {
+                        dblayer_txn_abort(be, &txn);
+                    }
+                    return_value = -2;
+                    goto err_out;
+                }
+                if (!run_from_cmdline) {
+                    rc = dblayer_txn_commit(be, &txn);
+                    if (0 != rc) {
+                        slapi_log_err(SLAPI_LOG_ERR,
+                                      "bdb_db2index", "%s: Failed to commit txn for "
+                                                              "update index 'entryrdn'\n",
+                                      inst->inst_name);
+                        slapi_log_err(SLAPI_LOG_ERR, "bdb_db2index", "%s: Error %d: %s\n",
+                                      inst->inst_name, rc, dblayer_strerror(rc));
+                        slapi_task_log_notice(task,
+                                "%s: ERROR: failed to commit txn for update index 'entryrdn' (err %d: %s)",
+                                inst->inst_name, rc, dblayer_strerror(rc));
+                        return_value = -2;
+                        goto err_out;
+                    }
+                }
+            }
+        }
+
+        count++;
+        if ((count % 1000) == 0) {
+            int percent;
+
+            if (idl) {
+                percent = (idindex * 100 / (idl->b_nids ? idl->b_nids : 1));
+            } else {
+                percent = (ep->ep_id * 100 / (lastid ? lastid : 1));
+            }
+            if (task) {
+                /* NGK - This should eventually be cleaned up to use the
+                 * public task API */
+                task->task_progress = (idl ? idindex : ep->ep_id);
+                task->task_work = (idl ? idl->b_nids : lastid);
+                slapi_task_status_changed(task);
+                slapi_task_log_status(task, "%s: Indexed %d entries (%d%%).",
+                                      inst->inst_name, count, percent);
+                slapi_task_log_notice(task, "%s: Indexed %d entries (%d%%).",
+                                      inst->inst_name, count, percent);
+            }
+            slapi_log_err(SLAPI_LOG_INFO, "bdb_db2index", "%s: Indexed %d entries (%d%%).\n",
+                          inst->inst_name, count, percent);
+        }
+
+        backentry_free(&ep);
+    }
+
+    /* if we got here, we finished successfully */
+
+    /* activate all the indexes we added */
+    for (i = 0; indexAttrs && indexAttrs[i]; i++) {
+        struct attrinfo *ai = NULL;
+
+        ainfo_get(be, indexAttrs[i], &ai);
+        PR_ASSERT(ai != NULL);
+        ai->ai_indexmask &= ~INDEX_OFFLINE;
+    }
+    for (vlvidx = 0; vlvidx < numvlv; vlvidx++) {
+        vlvIndex_go_online(pvlv[vlvidx], be);
+    }
+
+    /* if it was a task, its status will be updated later after backend is ready for update */
+    slapi_log_err(SLAPI_LOG_INFO, "bdb_db2index", "%s: Finished indexing.\n",
+                  inst->inst_name);
+    return_value = 0; /* success */
+err_out:
+    backentry_free(&ep); /* if ep or *ep is NULL, it does nothing */
+    if (idl) {
+        idl_free(&idl);
+    } else {
+        dbc->c_close(dbc);
+    }
+    if (return_value < 0) { /* error case: undo vlv indexing */
+        /* if jumped to out due to an error, vlv lock has not been released */
+        for (vlvidx = 0; vlvidx < numvlv; vlvidx++) {
+            vlvIndex_go_offline(pvlv[vlvidx], be);
+            vlv_acquire_lock(be);
+            vlvIndex_delete(&pvlv[vlvidx]);
+            vlv_release_lock(be);
+        }
+    }
+err_min:
+    dblayer_release_id2entry(be, db); /* nope */
+    instance_set_not_busy(inst);
+
+    if (return_value == 0) {
+        if (task) {
+            slapi_task_log_status(task, "%s: Finished indexing.",
+                    inst->inst_name);
+            slapi_task_log_notice(task, "%s: Finished indexing.",
+                    inst->inst_name);
+        }
+    }
+
+    if (run_from_cmdline) {
+        dblayer_instance_close(be);
+        if (0 != dblayer_close(li, DBLAYER_INDEX_MODE)) {
+            slapi_log_err(SLAPI_LOG_ERR,
+                          "bdb_db2index", "%s: Failed to close database\n", inst->inst_name);
+        }
+    }
+
+    valuearray_free(&nstombstone_vals);
+    if (indexAttrs) {
+        slapi_ch_free((void **)&indexAttrs);
+    }
+    if (pvlv) {
+        slapi_ch_free((void **)&pvlv);
+    }
+
+    slapi_log_err(SLAPI_LOG_TRACE, "bdb_db2index", "<=\n");
+
+    return return_value;
+}
+
+/*
+ * Determine if the given normalized 'attr' is to be excluded from LDIF
+ * exports.
+ *
+ * Returns a non-zero value if:
+ *    1) The 'attr' is in the configured list of attribute types that
+ *       are to be excluded.
+ * OR 2) dump_uniqueid is non-zero and 'attr' is the unique ID attribute.
+ *
+ * Return 0 if the attribute is not to be excluded.
+ */
+static int
+ldbm_exclude_attr_from_export(struct ldbminfo *li, const char *attr, int dump_uniqueid)
+
+{
+    int i, rc = 0;
+
+    if (!dump_uniqueid && 0 == strcasecmp(SLAPI_ATTR_UNIQUEID, attr)) {
+        rc = 1; /* exclude */
+
+    } else if (NULL != li && NULL != li->li_attrs_to_exclude_from_export) {
+        for (i = 0; li->li_attrs_to_exclude_from_export[i] != NULL; ++i) {
+            if (0 == strcasecmp(li->li_attrs_to_exclude_from_export[i],
+                                attr)) {
+                rc = 1; /* exclude */
+                break;
+            }
+        }
+    }
+
+    return (rc);
+}
+
+/*
+ * ldbm_back_upgradedb -
+ *
+ * functions to convert idl from the old format to the new one
+ * (604921) Support a database uprev process any time post-install
+ */
+
+int upgradedb_core(Slapi_PBlock *pb, ldbm_instance *inst);
+int upgradedb_copy_logfiles(struct ldbminfo *li, char *destination_dir, int restore);
+int upgradedb_delete_indices_4cmd(ldbm_instance *inst, int flags);
+
+/*
+ * ldbm_back_upgradedb -
+ *    check the DB version and if it's old idl'ed index,
+ *    then reindex using new idl.
+ *
+ * standalone only -- not allowed to run while DS is up.
+ */
+int
+bdb_upgradedb(Slapi_PBlock *pb)
+{
+
+    struct ldbminfo *li;
+    Object *inst_obj = NULL;
+    ldbm_instance *inst = NULL;
+    int run_from_cmdline = 0;
+    int task_flags = 0;
+    int server_running = 0;
+    int rval = 0;
+    int backup_rval = 0;
+    int upgrade_rval = 0;
+    char *dest_dir = NULL;
+    char *orig_dest_dir = NULL;
+    char *home_dir = NULL;
+    char *src_dbversion = NULL;
+    char *dest_dbversion = NULL;
+    int up_flags;
+    Slapi_Task *task;
+    char inst_dir[MAXPATHLEN];
+    char *inst_dirp = NULL;
+    int cnt = 0;
+    PRFileInfo64 info = {0};
+    PRUint32 dbversion_flags = DBVERSION_ALL;
+
+    slapi_pblock_get(pb, SLAPI_SEQ_TYPE, &up_flags);
+    slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_upgradedb", "Reindexing all...\n");
+    slapi_pblock_get(pb, SLAPI_TASK_FLAGS, &task_flags);
+    slapi_pblock_get(pb, SLAPI_BACKEND_TASK, &task);
+    slapi_pblock_get(pb, SLAPI_DB2LDIF_SERVER_RUNNING, &server_running);
+
+    run_from_cmdline = (task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE);
+    slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
+
+    if (run_from_cmdline) {
+        if (bdb_check_and_set_import_cache(li) < 0) {
+            return -1;
+        }
+    } else {
+        Object *inst_obj, *inst_obj2;
+        ldbm_instance *inst = NULL;
+
+        /* server is up -- mark all backends busy */
+        slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_upgradedb",
+                      "server is up -- marking all LDBM backends busy\n");
+        for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+             inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+            inst = (ldbm_instance *)object_get_data(inst_obj);
+            /* check if an import/restore is already ongoing... */
+            /* BUSY flag is cleared at the end of import_main (join thread);
+               it should not cleared in this thread [610347] */
+            if (instance_set_busy(inst) != 0) {
+                slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradedb",
+                              "ldbm: '%s' is already in the middle of "
+                              "another task and cannot be disturbed.\n",
+                              inst->inst_name);
+                slapi_task_log_notice(task,
+                        "Backend '%s' is already in the middle of another task and cannot be disturbed.\n",
+                        inst->inst_name);
+
+                /* painfully, we have to clear the BUSY flags on the
+                 * backends we'd already marked...
+                 */
+                for (inst_obj2 = objset_first_obj(li->li_instance_set);
+                     inst_obj2 && (inst_obj2 != inst_obj);
+                     inst_obj2 = objset_next_obj(li->li_instance_set, inst_obj2)) {
+                    inst = (ldbm_instance *)object_get_data(inst_obj2);
+                    instance_set_not_busy(inst);
+                }
+                if (inst_obj2 && inst_obj2 != inst_obj)
+                    object_release(inst_obj2);
+                object_release(inst_obj);
+                return -1;
+            }
+        }
+    }
+    if ((up_flags & SLAPI_UPGRADEDB_DN2RDN) && !entryrdn_get_switch()) {
+        /*
+         * DN2RDN option (-r) is given, but subtree-rename is off.
+         * Print an error and back off.
+         */
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradedb",
+                      "DN2RDN option (-r) is given, but %s is off in "
+                      "dse.ldif.  Please change the value to on.\n",
+                      CONFIG_ENTRYRDN_SWITCH);
+        return -1;
+    }
+
+    inst_obj = objset_first_obj(li->li_instance_set);
+    if (inst_obj) {
+        inst = (ldbm_instance *)object_get_data(inst_obj);
+        if (!(up_flags & SLAPI_UPGRADEDB_FORCE)) { /* upgrade idl to new */
+            int need_upgrade = 0;
+            li->li_flags |= LI_FORCE_MOD_CONFIG;
+            /* set new idl */
+            ldbm_config_internal_set(li, CONFIG_IDL_SWITCH, "new");
+            /* First check the dbversion */
+            rval = check_db_inst_version(inst);
+            need_upgrade = (DBVERSION_NEED_IDL_OLD2NEW & rval);
+            if (!need_upgrade && (up_flags & SLAPI_UPGRADEDB_DN2RDN)) {
+                need_upgrade = (rval & DBVERSION_NEED_DN2RDN);
+            }
+            if (!need_upgrade) {
+                need_upgrade = (rval & (DBVERSION_UPGRADE_3_4 | DBVERSION_UPGRADE_4_4));
+            }
+            if (!need_upgrade) {
+                slapi_log_err(SLAPI_LOG_INFO, "ldbm_back_upgradedb",
+                              "Index version is up-to-date\n");
+                return 0;
+            }
+        }
+    } else {
+        slapi_log_err(SLAPI_LOG_WARNING,
+                      "ldbm_back_upgradedb", "No instance to be upgraded\n");
+        return -1;
+    }
+
+    /* we are going to go forward */
+    /*
+     * First, backup index files and checkpoint log files
+     * since the server is not up and running, we can just copy them.
+     */
+    slapi_pblock_get(pb, SLAPI_SEQ_VAL, &dest_dir);
+    if (NULL == dest_dir) {
+        slapi_log_err(SLAPI_LOG_ERR, "upgrade DB",
+                      "Backup directory is not specified.\n");
+        return -1;
+    }
+
+    orig_dest_dir = dest_dir;
+    normalize_dir(dest_dir);
+    /* clean up the backup dir first, then create it */
+    rval = PR_GetFileInfo64(dest_dir, &info);
+    if (PR_SUCCESS == rval) {
+        if (PR_FILE_DIRECTORY == info.type) /* directory exists */
+        {
+            time_t tm = slapi_current_utc_time();
+
+            char *tmpname = slapi_ch_smprintf("%s/%ld", dest_dir, tm);
+            dest_dir = tmpname;
+        } else /* not a directory */
+            PR_Delete(dest_dir);
+    }
+
+    if (mkdir_p(dest_dir, 0700) < 0)
+        goto fail0;
+
+    if (run_from_cmdline)
+        if (bdb_config_internal_set(li, CONFIG_DB_TRANSACTION_LOGGING, "off")) {
+            goto fail1;
+        }
+
+    for (inst_obj = objset_first_obj(li->li_instance_set);
+         inst_obj;
+         inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+        if (run_from_cmdline) {
+            /* need to call bdb_start for each instance,
+               since dblayer_close is called in upgradedb_core =>
+               bdb_back_ldif2db */
+            if (0 != bdb_start(li, DBLAYER_IMPORT_MODE)) {
+                slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradedb",
+                              "Failed to init database\n");
+                goto fail1;
+            }
+        }
+
+        inst = (ldbm_instance *)object_get_data(inst_obj);
+        slapi_pblock_set(pb, SLAPI_BACKEND, inst->inst_be);
+        slapi_pblock_set(pb, SLAPI_BACKEND_INSTANCE_NAME, inst->inst_name);
+
+        /* Back up */
+        inst_dirp = dblayer_get_full_inst_dir(inst->inst_li, inst,
+                                              inst_dir, MAXPATHLEN);
+        backup_rval = bdb_copy_directory(li, NULL /* task */,
+                                             inst_dirp, dest_dir, 0 /*backup*/,
+                                             &cnt, 0, 0);
+        if (inst_dirp != inst_dir)
+            slapi_ch_free_string(&inst_dirp);
+        if (backup_rval < 0) {
+            slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradedb",
+                          "Failed to backup index files (instance %s).\n", inst_dirp);
+            goto fail1;
+        }
+
+        /* delete index files to be reindexed */
+        if (run_from_cmdline) {
+            rval = upgradedb_delete_indices_4cmd(inst, up_flags);
+            if (rval) {
+                upgrade_rval += rval;
+                slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_upgradedb",
+                              "Can't clean up indices in %s\n", inst->inst_dir_name);
+                continue; /* Need to make all backups; continue */
+            }
+        } else {
+            rval = dblayer_delete_indices(inst);
+            if (rval) {
+                upgrade_rval += rval;
+                slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_upgradedb",
+                              "Can't clean up indices in %s\n", inst->inst_dir_name);
+                continue; /* Need to make all backups; continue */
+            }
+        }
+
+        rval = upgradedb_core(pb, inst);
+        if (rval) {
+            upgrade_rval += rval;
+            slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradedb",
+                          "upgradedb: Failed to upgrade database %s\n",
+                          inst->inst_name);
+            if (run_from_cmdline) {
+                continue; /* Need to make all backups; continue */
+            }
+        }
+    }
+
+    /* copy transaction logs */
+    backup_rval += upgradedb_copy_logfiles(li, dest_dir, 0);
+
+    /* copy DBVERSION */
+    home_dir = bdb_get_home_dir(li, NULL);
+    src_dbversion = slapi_ch_smprintf("%s/%s", home_dir, DBVERSION_FILENAME);
+    dest_dbversion = slapi_ch_smprintf("%s/%s", dest_dir, DBVERSION_FILENAME);
+    backup_rval += dblayer_copyfile(src_dbversion, dest_dbversion, 0, 0600);
+
+    if (upgrade_rval) {
+        goto fail1;
+    }
+
+    /* upgrade idl to new; otherwise no need to modify idl-switch */
+    if (!(up_flags & SLAPI_UPGRADEDB_FORCE)) {
+        replace_ldbm_config_value(CONFIG_IDL_SWITCH, "new", li);
+    }
+
+    /* write db version files */
+    bdb_version_write(li, home_dir, NULL, DBVERSION_ALL);
+
+    if ((up_flags & SLAPI_UPGRADEDB_DN2RDN) && entryrdn_get_switch()) {
+        /* exclude dnformat to allow upgradednformat later */
+        dbversion_flags = DBVERSION_ALL ^ DBVERSION_DNFORMAT;
+        ;
+    }
+    inst_obj = objset_first_obj(li->li_instance_set);
+    while (NULL != inst_obj) {
+        char *inst_dirp = NULL;
+        inst_dirp = dblayer_get_full_inst_dir(li, inst, inst_dir, MAXPATHLEN);
+        inst = (ldbm_instance *)object_get_data(inst_obj);
+        bdb_version_write(li, inst_dirp, NULL, dbversion_flags);
+        inst_obj = objset_next_obj(li->li_instance_set, inst_obj);
+        if (inst_dirp != inst_dir)
+            slapi_ch_free_string(&inst_dirp);
+    }
+
+    /* close the database down again */
+    if (run_from_cmdline) {
+        if (0 != dblayer_close(li, DBLAYER_IMPORT_MODE)) {
+            slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradedb",
+                          "Failed to close database\n");
+            goto fail1;
+        }
+    }
+
+    /* delete backup */
+    if (NULL != dest_dir)
+        ldbm_delete_dirs(dest_dir);
+
+    if (dest_dir != orig_dest_dir)
+        slapi_ch_free_string(&dest_dir);
+
+    slapi_ch_free_string(&src_dbversion);
+    slapi_ch_free_string(&dest_dbversion);
+
+    return 0;
+
+fail1:
+
+    /* we started dblayer with DBLAYER_IMPORT_MODE
+     * We just want not to generate a guardian file...
+     */
+    if (0 != dblayer_close(li, DBLAYER_ARCHIVE_MODE))
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradedb",
+                      "Failed to close database\n");
+
+    /* restore from the backup, if possible */
+    if (NULL != dest_dir) {
+        /* If the backup was successfull and ugrade failed... */
+        if ((0 == backup_rval) && upgrade_rval) {
+            backup_rval = bdb_restore(li, dest_dir, NULL);
+        }
+        /* restore is done; clean up the backup dir */
+        if (0 == backup_rval) {
+            ldbm_delete_dirs(dest_dir);
+        }
+    }
+    slapi_ch_free_string(&src_dbversion);
+    slapi_ch_free_string(&dest_dbversion);
+
+fail0:
+    if (dest_dir != orig_dest_dir)
+        slapi_ch_free_string(&dest_dir);
+
+    return rval + upgrade_rval;
+}
+
+#define LOG "log."
+#define LOGLEN 4
+int
+upgradedb_copy_logfiles(struct ldbminfo *li, char *destination_dir, int restore)
+{
+    PRDir *dirhandle = NULL;
+    PRDirEntry *direntry = NULL;
+    char *src;
+    char *dest;
+    int srclen;
+    int destlen;
+    int rval = 0;
+    int len0 = 0;
+    int len1 = 0;
+    char *from = NULL;
+    char *to = NULL;
+
+    if (restore) {
+        src = destination_dir;
+        dest = li->li_directory;
+    } else {
+        src = li->li_directory;
+        dest = destination_dir;
+    }
+    if (NULL == src || '\0' == *src) {
+        slapi_log_err(SLAPI_LOG_ERR, "upgradedb_copy_logfiles",
+                      "NULL src directory\n");
+        return -1;
+    }
+    if (NULL == dest || '\0' == *dest) {
+        slapi_log_err(SLAPI_LOG_ERR, "upgradedb_copy_logfiles",
+                      "NULL dest directory\n");
+        return -1;
+    }
+    srclen = strlen(src);
+    destlen = strlen(dest);
+
+    /* Open the instance dir so we can look what's in it. */
+    dirhandle = PR_OpenDir(src);
+    if (NULL == dirhandle)
+        return -1;
+
+    while (NULL != (direntry =
+                        PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+        if (NULL == direntry->name)
+            break;
+
+        if (0 == strncmp(direntry->name, LOG, 4)) {
+            int filelen = strlen(direntry->name);
+            char *p, *endp;
+            int fromlen, tolen;
+            int notalog = 0;
+
+            endp = (char *)direntry->name + filelen;
+            for (p = (char *)direntry->name + LOGLEN; p < endp; p++) {
+                if (!isdigit(*p)) {
+                    notalog = 1;
+                    break;
+                }
+            }
+            if (notalog)
+                continue; /* go to next file */
+
+            fromlen = srclen + filelen + 2;
+            if (len0 < fromlen) {
+                slapi_ch_free_string(&from);
+                from = slapi_ch_calloc(1, fromlen);
+                len0 = fromlen;
+            }
+            PR_snprintf(from, len0, "%s/%s", src, direntry->name);
+            tolen = destlen + filelen + 2;
+            if (len1 < tolen) {
+                slapi_ch_free_string(&to);
+                to = slapi_ch_calloc(1, tolen);
+                len1 = tolen;
+            }
+            PR_snprintf(to, len1, "%s/%s", dest, direntry->name);
+            rval = dblayer_copyfile(from, to, 1, DEFAULT_MODE);
+            if (rval < 0)
+                break;
+        }
+    }
+    slapi_ch_free_string(&from);
+    slapi_ch_free_string(&to);
+    PR_CloseDir(dirhandle);
+
+    return rval;
+}
+
+int
+upgradedb_delete_indices_4cmd(ldbm_instance *inst, int flags __attribute__((unused)))
+{
+    PRDir *dirhandle = NULL;
+    PRDirEntry *direntry = NULL;
+    int rval = 0;
+    char fullpath[MAXPATHLEN];
+    char *fullpathp = fullpath;
+    char inst_dir[MAXPATHLEN];
+    char *inst_dirp = dblayer_get_full_inst_dir(inst->inst_li, inst,
+                                                inst_dir, MAXPATHLEN);
+
+    slapi_log_err(SLAPI_LOG_TRACE, "upgradedb_delete_indices_4cmd",
+                  "%s\n", inst_dir);
+    dirhandle = PR_OpenDir(inst_dirp);
+    if (!dirhandle) {
+        slapi_log_err(SLAPI_LOG_ERR, "upgradedb_delete_indices_4cmd",
+                      "PR_OpenDir failed\n");
+        if (inst_dirp != inst_dir)
+            slapi_ch_free_string(&inst_dirp);
+        return -1;
+    }
+
+    while (NULL != (direntry =
+                        PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+        PRFileInfo64 info;
+        int len;
+
+        if (!direntry->name)
+            break;
+
+        if (0 == strcmp(direntry->name, ID2ENTRY LDBM_FILENAME_SUFFIX))
+            continue;
+
+        len = strlen(inst_dirp) + strlen(direntry->name) + 2;
+        if (len > MAXPATHLEN) {
+            fullpathp = (char *)slapi_ch_malloc(len);
+        }
+        sprintf(fullpathp, "%s/%s", inst_dirp, direntry->name);
+        rval = PR_GetFileInfo64(fullpathp, &info);
+        if (PR_SUCCESS == rval && PR_FILE_DIRECTORY != info.type) {
+            PR_Delete(fullpathp);
+            slapi_log_err(SLAPI_LOG_TRACE, "upgradedb_delete_indices_4cmd",
+                          "%s deleted\n", fullpath);
+        }
+        if (fullpathp != fullpath)
+            slapi_ch_free_string(&fullpathp);
+    }
+    PR_CloseDir(dirhandle);
+    if (inst_dirp != inst_dir)
+        slapi_ch_free_string(&inst_dirp);
+    return rval;
+}
+
+/*
+ * upgradedb_core
+ */
+int
+upgradedb_core(Slapi_PBlock *pb, ldbm_instance *inst)
+{
+    backend *be = NULL;
+    int task_flags = 0;
+    int run_from_cmdline = 0;
+
+    slapi_pblock_get(pb, SLAPI_TASK_FLAGS, &task_flags);
+    run_from_cmdline = (task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE);
+
+    be = inst->inst_be;
+    slapi_log_err(SLAPI_LOG_INFO, "upgradedb_core",
+                  "%s: Start upgradedb.\n", inst->inst_name);
+
+    if (!run_from_cmdline) {
+        /* shutdown this instance of the db */
+        slapi_log_err(SLAPI_LOG_TRACE, "upgradedb_core",
+                      "Bringing %s offline...\n", inst->inst_name);
+        slapi_mtn_be_disable(inst->inst_be);
+
+        cache_clear(&inst->inst_cache, CACHE_TYPE_ENTRY);
+        if (entryrdn_get_switch()) {
+            cache_clear(&inst->inst_dncache, CACHE_TYPE_DN);
+        }
+        dblayer_instance_close(be);
+    }
+
+    /* bdb_instance_start will init the id2entry index. */
+    if (0 != bdb_instance_start(be, DBLAYER_IMPORT_MODE)) {
+        slapi_log_err(SLAPI_LOG_ERR, "upgradedb_core",
+                      "Failed to init instance %s\n", inst->inst_name);
+        return -1;
+    }
+
+    if (run_from_cmdline)
+        vlv_init(inst); /* Initialise the Virtual List View code */
+
+    return bdb_back_ldif2db(pb);
+}
+
+/* Used by the reindex and export (subtree rename must be on)*/
+/* Note: If DB2LDIF_ENTRYRDN or DB2INDEX_ENTRYRDN is set to index_ext,
+ *       the specified operation is executed.
+ *       If 0 is passed, just Slapi_RDN srdn is filled and returned.
+ */
+static int
+_get_and_add_parent_rdns(backend *be,
+                         DB *db,
+                         back_txn *txn,
+                         ID id,           /* input */
+                         Slapi_RDN *srdn, /* output */
+                         ID *pid,         /* output */
+                         int index_ext,   /* DB2LDIF_ENTRYRDN | DB2INDEX_ENTRYRDN | 0 */
+                         int run_from_cmdline,
+                         export_args *eargs)
+{
+    int rc = -1;
+    Slapi_RDN mysrdn = {0};
+    struct backdn *bdn = NULL;
+    ldbm_instance *inst = NULL;
+    struct ldbminfo *li = NULL;
+    struct backentry *ep = NULL;
+    char *rdn = NULL;
+    DBT key, data;
+    char *pid_str = NULL;
+    ID storedid;
+    ID temp_pid = NOID;
+
+    if (!entryrdn_get_switch()) { /* entryrdn specific code */
+        return rc;
+    }
+
+    if (NULL == be || NULL == srdn) {
+        slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                      "Empty %s\n", NULL == be ? "be" : "srdn");
+        return rc;
+    }
+
+    inst = (ldbm_instance *)be->be_instance_info;
+    li = inst->inst_li;
+    memset(&data, 0, sizeof(data));
+
+    /* first, try the dn cache */
+    bdn = dncache_find_id(&inst->inst_dncache, id);
+    if (bdn) {
+        /* Luckily, found the parent in the dn cache!  */
+        if (slapi_rdn_get_rdn(srdn)) { /* srdn is already in use */
+            rc = slapi_rdn_init_all_dn(&mysrdn, slapi_sdn_get_dn(bdn->dn_sdn));
+            if (rc) {
+                slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                              "Failed to convert DN %s to RDN\n",
+                              slapi_rdn_get_rdn(&mysrdn));
+                slapi_rdn_done(&mysrdn);
+                CACHE_RETURN(&inst->inst_dncache, &bdn);
+                goto bail;
+            }
+            rc = slapi_rdn_add_srdn_to_all_rdns(srdn, &mysrdn);
+            if (rc) {
+                slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                              "Failed to merge Slapi_RDN %s to RDN\n",
+                              slapi_sdn_get_dn(bdn->dn_sdn));
+            }
+            slapi_rdn_done(&mysrdn);
+        } else { /* srdn is empty */
+            rc = slapi_rdn_init_all_dn(srdn, slapi_sdn_get_dn(bdn->dn_sdn));
+            if (rc) {
+                slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                              "Failed to convert DN %s to RDN\n",
+                              slapi_sdn_get_dn(bdn->dn_sdn));
+                CACHE_RETURN(&inst->inst_dncache, &bdn);
+                goto bail;
+            }
+        }
+        CACHE_RETURN(&inst->inst_dncache, &bdn);
+    }
+
+    if (!bdn || (index_ext & (DB2LDIF_ENTRYRDN | DB2INDEX_ENTRYRDN)) || pid) {
+        /* not in the dn cache or DB2LDIF or caller is expecting the parent ID;
+         * read id2entry */
+        if (NULL == db) {
+            slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                          "Empty db\n");
+            goto bail;
+        }
+        id_internal_to_stored(id, (char *)&storedid);
+        key.size = key.ulen = sizeof(ID);
+        key.data = &storedid;
+        key.flags = DB_DBT_USERMEM;
+
+        memset(&data, 0, sizeof(data));
+        data.flags = DB_DBT_MALLOC;
+        rc = db->get(db, NULL, &key, &data, 0);
+        if (rc) {
+            slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                          "Failed to position cursor at ID " ID_FMT "\n", id);
+            goto bail;
+        }
+        /* rdn is allocated in get_value_from_string */
+        rc = get_value_from_string((const char *)data.dptr, "rdn", &rdn);
+        if (rc) {
+            slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                          "Failed to get rdn of entry " ID_FMT "\n", id);
+            goto bail;
+        }
+        /* rdn is going to be set to srdn */
+        rc = slapi_rdn_init_all_dn(&mysrdn, rdn);
+        if (rc < 0) { /* expect rc == 1 since we are setting "rdn" not "dn" */
+            slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                          "Failed to add rdn %s of entry " ID_FMT "\n", rdn, id);
+            goto bail;
+        }
+        /* pid */
+        rc = get_value_from_string((const char *)data.dptr,
+                                   LDBM_PARENTID_STR, &pid_str);
+        if (rc) {
+            rc = 0; /* assume this is a suffix */
+            temp_pid = NOID;
+        } else {
+            temp_pid = (ID)strtol(pid_str, (char **)NULL, 10);
+            slapi_ch_free_string(&pid_str);
+        }
+        if (pid) {
+            *pid = temp_pid;
+        }
+    }
+    if (!bdn) {
+        if (NOID != temp_pid) {
+            rc = _get_and_add_parent_rdns(be, db, txn, temp_pid, &mysrdn, NULL,
+                                          id < temp_pid ? index_ext : 0, run_from_cmdline, eargs);
+            if (rc) {
+                goto bail;
+            }
+        }
+        rc = slapi_rdn_add_srdn_to_all_rdns(srdn, &mysrdn);
+        if (rc) {
+            slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                          "Failed to merge Slapi_RDN %s to RDN\n",
+                          slapi_rdn_get_rdn(&mysrdn));
+            goto bail;
+        }
+    }
+
+    if (index_ext & (DB2LDIF_ENTRYRDN | DB2INDEX_ENTRYRDN)) {
+        char *dn = NULL;
+        ep = backentry_alloc();
+        rc = slapi_rdn_get_dn(srdn, &dn);
+        if (rc) {
+            slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                          "Failed to compose dn for "
+                          "(rdn: %s, ID: %d) from Slapi_RDN\n",
+                          rdn, id);
+            goto bail;
+        }
+        ep->ep_entry = slapi_str2entry_ext(dn, NULL, data.dptr,
+                                           SLAPI_STR2ENTRY_NO_ENTRYDN);
+        ep->ep_id = id;
+        slapi_ch_free_string(&dn);
+    }
+
+    if (index_ext & DB2INDEX_ENTRYRDN) {
+        if (txn && !run_from_cmdline) {
+            rc = dblayer_txn_begin(be, NULL, txn);
+            if (rc) {
+                slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                              "%s: Failed to begin txn for update "
+                              "index 'entryrdn'\n",
+                              inst->inst_name);
+                slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                              "%s: Error %d: %s\n",
+                              inst->inst_name, rc, dblayer_strerror(rc));
+                goto bail;
+            }
+        }
+        rc = entryrdn_index_entry(be, ep, BE_INDEX_ADD, txn);
+        if (rc) {
+            slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                          "%s: Failed to update index 'entryrdn'\n",
+                          inst->inst_name);
+            slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                          "%s: Error %d: %s\n", inst->inst_name, rc,
+                          dblayer_strerror(rc));
+            if (txn && !run_from_cmdline) {
+                dblayer_txn_abort(be, txn);
+            }
+            goto bail;
+        }
+        if (txn && !run_from_cmdline) {
+            rc = dblayer_txn_commit(be, txn);
+            if (rc) {
+                slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                              "%s: Failed to commit txn for "
+                              "update index 'entryrdn'\n",
+                              inst->inst_name);
+                slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                              "%s: Error %d: %s\n",
+                              inst->inst_name, rc, dblayer_strerror(rc));
+                goto bail;
+            }
+        }
+    } else if (index_ext & DB2LDIF_ENTRYRDN) {
+        if (NULL == eargs) {
+            slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                          "Empty export args\n");
+            rc = -1;
+            goto bail;
+        }
+        eargs->ep = ep;
+        rc = export_one_entry(li, inst, eargs);
+        if (rc) {
+            slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                          "Failed to export an entry %s\n",
+                          slapi_sdn_get_dn(slapi_entry_get_sdn(ep->ep_entry)));
+            goto bail;
+        }
+        rc = idl_append_extend(&(eargs->pre_exported_idl), id);
+        if (rc) {
+            slapi_log_err(SLAPI_LOG_ERR, "_get_and_add_parent_rdns",
+                          "Failed add %d to exported idl\n", id);
+        }
+    }
+
+bail:
+    backentry_free(&ep);
+    slapi_rdn_done(&mysrdn);
+    slapi_ch_free(&data.data);
+    slapi_ch_free_string(&rdn);
+    return rc;
+}
+
+/* Used by the reindex and export (subtree rename must be on)*/
+static int
+_export_or_index_parents(ldbm_instance *inst,
+                         DB *db,
+                         back_txn *txn,
+                         ID currentid, /* current id to compare with */
+                         char *rdn,    /* my rdn */
+                         ID id,        /* my id */
+                         ID pid,       /* parent id */
+                         int run_from_cmdline,
+                         export_args *eargs,
+                         int type, /* DB2LDIF_ENTRYRDN or DB2INDEX_ENTRYRDN */
+                         Slapi_RDN *psrdn /* output */)
+{
+    int rc = -1;
+    ID temp_pid = 0;
+    char *prdn = NULL;
+    Slapi_DN *psdn = NULL;
+    ID ppid = 0;
+    char *pprdn = NULL;
+    backend *be = inst->inst_be;
+
+    if (!entryrdn_get_switch()) { /* entryrdn specific code */
+        return rc;
+    }
+
+    /* in case the parent is not already exported */
+    rc = entryrdn_get_parent(be, rdn, id, &prdn, &temp_pid, NULL);
+    if (rc) { /* entryrdn is not available. */
+        /* get the parent info from the id2entry (no add) */
+        rc = _get_and_add_parent_rdns(be, db, txn, pid, psrdn, &ppid, 0,
+                                      run_from_cmdline, NULL);
+        if (rc) {
+            slapi_log_err(SLAPI_LOG_ERR, "_export_or_index_parents",
+                          "Failed to get the DN of ID %d\n", pid);
+            goto bail;
+        }
+        prdn = slapi_ch_strdup(slapi_rdn_get_rdn(psrdn));
+    } else { /* we have entryrdn */
+        if (pid != temp_pid) {
+            slapi_log_err(SLAPI_LOG_WARNING, "_export_or_index_parents",
+                          "parentid conflict found between entryrdn (%d) and "
+                          "id2entry (%d)\n",
+                          temp_pid, pid);
+            slapi_log_err(SLAPI_LOG_WARNING, "_export_or_index_parents",
+                          "Ignoring entryrdn\n");
+        } else {
+            struct backdn *bdn = NULL;
+            char *pdn = NULL;
+
+            bdn = dncache_find_id(&inst->inst_dncache, pid);
+            if (!bdn) {
+                /* we put pdn to dn cache, which could be used
+                 * in _get_and_add_parent_rdns */
+                rc = entryrdn_lookup_dn(be, prdn, pid, &pdn, NULL, NULL);
+                if (0 == rc) {
+                    int myrc = 0;
+                    /* pdn is put in DN cache.  No need to free it here,
+                     * since it'll be free'd when evicted from the cache. */
+                    psdn = slapi_sdn_new_dn_passin(pdn);
+                    bdn = backdn_init(psdn, pid, 0);
+                    myrc = CACHE_ADD(&inst->inst_dncache, bdn, NULL);
+                    if (myrc) {
+                        backdn_free(&bdn);
+                        slapi_log_err(SLAPI_LOG_CACHE,
+                                      "_export_or_index_parents",
+                                      "%s is already in the dn cache (%d)\n",
+                                      pdn, myrc);
+                    } else {
+                        CACHE_RETURN(&inst->inst_dncache, &bdn);
+                        slapi_log_err(SLAPI_LOG_CACHE,
+                                      "_export_or_index_parents",
+                                      "entryrdn_lookup_dn returned: %s, "
+                                      "and set to dn cache\n",
+                                      pdn);
+                    }
+                }
+            }
+        }
+    }
+
+    /* check one more upper level */
+    if (0 == ppid) {
+        rc = entryrdn_get_parent(be, prdn, pid, &pprdn, &ppid, NULL);
+        slapi_ch_free_string(&pprdn);
+        if (rc) { /* entryrdn is not available */
+            slapi_log_err(SLAPI_LOG_ERR, "_export_or_index_parents",
+                          "Failed to get the parent of ID %d\n", pid);
+            goto bail;
+        }
+    }
+    if (ppid > currentid &&
+        (!eargs || !idl_id_is_in_idlist(eargs->pre_exported_idl, ppid))) {
+        Slapi_RDN ppsrdn = {0};
+        rc = _export_or_index_parents(inst, db, txn, currentid, prdn, pid,
+                                      ppid, run_from_cmdline, eargs, type, &ppsrdn);
+        if (rc) {
+            goto bail;
+        }
+        slapi_rdn_done(&ppsrdn);
+    }
+    slapi_rdn_done(psrdn);
+    rc = _get_and_add_parent_rdns(be, db, txn, pid, psrdn, NULL,
+                                  type, run_from_cmdline, eargs);
+    if (rc) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "_export_or_index_parents", "Failed to get rdn for ID: %d\n", pid);
+        slapi_rdn_done(psrdn);
+    }
+bail:
+    slapi_ch_free_string(&prdn);
+    return rc;
+}
+
+/*
+ * bdb_upgradednformat
+ *
+ * Update old DN format in entrydn and the leaf attr value to the new one
+ *
+ * The implementation would be similar to the upgradedb for new idl.
+ * Scan each entry, checking the entrydn value with the normalized dn.
+ * If they don't match,
+ *   replace the old entrydn value with the new one in the entry
+ *   in id2entry.db4.
+ *   also get the leaf RDN attribute value, unescape it, and check
+ *   if it is in the entry.  If not, add it.
+ * Then, update the key in the entrydn index and the leaf RDN attribute
+ * (if need it).
+ *
+ * Return value:  0: success (the backend instance includes update
+ *                   candidates for DRYRUN mode)
+ *                1: the backend instance is up-to-date (DRYRUN mode only)
+ *               -1: error
+ *
+ * standalone only -- not allowed to run while DS is up.
+ */
+int
+bdb_upgradednformat(Slapi_PBlock *pb)
+{
+    int rc = -1;
+    struct ldbminfo *li = NULL;
+    int run_from_cmdline = 0;
+    int task_flags = 0;
+    int server_running = 0;
+    Slapi_Task *task;
+    ldbm_instance *inst = NULL;
+    char *instance_name = NULL;
+    backend *be = NULL;
+    PRStatus prst = 0;
+    PRFileInfo64 prfinfo = {0};
+    PRDir *dirhandle = NULL;
+    PRDirEntry *direntry = NULL;
+    size_t id2entrylen = 0;
+    int found = 0;
+    char *rawworkdbdir = NULL;
+    char *workdbdir = NULL;
+    char *origdbdir = NULL;
+    char *origlogdir = NULL;
+    char *originstparentdir = NULL;
+    char *sep = NULL;
+    char *ldbmversion = NULL;
+    char *dataversion = NULL;
+    int ud_flags = 0;
+    int result = 0;
+
+    slapi_pblock_get(pb, SLAPI_TASK_FLAGS, &task_flags);
+    slapi_pblock_get(pb, SLAPI_BACKEND_TASK, &task);
+    slapi_pblock_get(pb, SLAPI_DB2LDIF_SERVER_RUNNING, &server_running);
+    slapi_pblock_get(pb, SLAPI_BACKEND_INSTANCE_NAME, &instance_name);
+    slapi_pblock_get(pb, SLAPI_SEQ_TYPE, &ud_flags);
+
+    run_from_cmdline = (task_flags & SLAPI_TASK_RUNNING_FROM_COMMANDLINE);
+    slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
+    if (run_from_cmdline) {
+        ldbm_config_load_dse_info(li);
+        if (bdb_check_and_set_import_cache(li) < 0) {
+            return -1;
+        }
+    } else {
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradednformat",
+                      " Online mode is not supported. "
+                      "Shutdown the server and run the tool\n");
+        goto bail;
+    }
+
+    /* Find the instance that the ldif2db will be done on. */
+    inst = ldbm_instance_find_by_name(li, instance_name);
+    if (NULL == inst) {
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradednformat",
+                      "Unknown ldbm instance %s\n", instance_name);
+        goto bail;
+    }
+    slapi_log_err(SLAPI_LOG_INFO, "ldbm_back_upgradednformat",
+                  "%s: Start upgrade dn format.\n", inst->inst_name);
+
+    slapi_pblock_set(pb, SLAPI_BACKEND, inst->inst_be);
+    slapi_pblock_get(pb, SLAPI_SEQ_VAL, &rawworkdbdir);
+    normalize_dir(rawworkdbdir); /* remove trailing spaces and slashes */
+
+    prst = PR_GetFileInfo64(rawworkdbdir, &prfinfo);
+    if (PR_FAILURE == prst || PR_FILE_DIRECTORY != prfinfo.type) {
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradednformat",
+                      "Working DB instance dir %s is not a directory\n",
+                      rawworkdbdir);
+        goto bail;
+    }
+    dirhandle = PR_OpenDir(rawworkdbdir);
+    if (!dirhandle) {
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradednformat",
+                      "Failed to open working DB instance dir %s\n",
+                      rawworkdbdir);
+        goto bail;
+    }
+    id2entrylen = strlen(ID2ENTRY);
+    while ((direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+        if (!direntry->name)
+            break;
+        if (0 == strncasecmp(ID2ENTRY, direntry->name, id2entrylen)) {
+            found = 1;
+            break;
+        }
+    }
+    PR_CloseDir(dirhandle);
+
+    if (!found) {
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradednformat",
+                      "Working DB instance dir %s does not include %s file\n",
+                      rawworkdbdir, ID2ENTRY);
+        goto bail;
+    }
+
+    if (run_from_cmdline) {
+        if (bdb_config_internal_set(li, CONFIG_DB_TRANSACTION_LOGGING, "off")){
+            goto bail;
+        }
+    }
+
+    /* We have to work on the copied db.  So, the path should be set here. */
+    origdbdir = li->li_directory;
+    origlogdir = BDB_CONFIG(li)->bdb_log_directory;
+    originstparentdir = inst->inst_parent_dir_name;
+
+    workdbdir = rel2abspath(rawworkdbdir);
+
+    result = bdb_version_read(li, workdbdir, &ldbmversion, &dataversion);
+    if (result == 0 && ldbmversion) {
+        char *ptr = PL_strstr(ldbmversion, BDB_DNFORMAT);
+        if (ptr) {
+            /* DN format is RFC 4514 compliant */
+            if (strlen(ptr) == strlen(BDB_DNFORMAT)) { /* no version */
+                /*
+                 * DN format is RFC 4514 compliant.
+                 * But it hasn't taken care of the multiple spaces yet.
+                 */
+                ud_flags &= ~SLAPI_UPGRADEDNFORMAT;
+                ud_flags |= SLAPI_UPGRADEDNFORMAT_V1;
+                slapi_pblock_set(pb, SLAPI_SEQ_TYPE, &ud_flags);
+                rc = 3; /* 0: need upgrade (dn norm sp, only) */
+            } else {
+                /* DN format already takes care of the multiple spaces */
+                slapi_log_err(SLAPI_LOG_INFO, "ldbm_back_upgradednformat",
+                              "Instance %s in %s is up-to-date\n",
+                              instance_name, workdbdir);
+                rc = 0; /* 0: up-to-date */
+                goto bail;
+            }
+        } else {
+            /* DN format is not RFC 4514 compliant */
+            ud_flags |= SLAPI_UPGRADEDNFORMAT | SLAPI_UPGRADEDNFORMAT_V1;
+            slapi_pblock_set(pb, SLAPI_SEQ_TYPE, &ud_flags);
+            rc = 1; /* 0: need upgrade (both) */
+        }
+    } else {
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradednformat",
+                      "Failed to get DBVERSION (Instance name: %s, dir %s)\n",
+                      instance_name, workdbdir);
+        rc = -1; /* error */
+        goto bail;
+    }
+
+    sep = PL_strrchr(workdbdir, '/');
+    if (!sep) {
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradednformat",
+                      "Working DB instance dir %s does not include %s file\n",
+                      workdbdir, ID2ENTRY);
+        goto bail;
+    }
+    *sep = '\0';
+    li->li_directory = workdbdir;
+    BDB_CONFIG(li)->bdb_log_directory = workdbdir;
+    inst->inst_parent_dir_name = workdbdir;
+
+    if (run_from_cmdline) {
+        if (0 != bdb_start(li, DBLAYER_IMPORT_MODE)) {
+            slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradednformat",
+                          "Failed to init database\n");
+            goto bail;
+        }
+    }
+
+    /* bdb_instance_start will init the id2entry index. */
+    be = inst->inst_be;
+    if (0 != bdb_instance_start(be, DBLAYER_IMPORT_MODE)) {
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradednformat",
+                      "Failed to init instance %s\n", inst->inst_name);
+        goto bail;
+    }
+
+    if (run_from_cmdline) {
+        vlv_init(inst); /* Initialise the Virtual List View code */
+    }
+
+    rc = bdb_back_ldif2db(pb);
+
+    /* close the database */
+    if (run_from_cmdline) {
+        if (0 != dblayer_close(li, DBLAYER_IMPORT_MODE)) {
+            slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_upgradednformat",
+                          "Failed to close database\n");
+            goto bail;
+        }
+    }
+    *sep = '/';
+    if (((0 == rc) && !(ud_flags & SLAPI_DRYRUN)) ||
+        ((rc == 0) && (ud_flags & SLAPI_DRYRUN))) {
+        /* modify the DBVERSION files if the DN upgrade was successful OR
+         * if DRYRUN, the backend instance is up-to-date. */
+        bdb_version_write(li, workdbdir, NULL, DBVERSION_ALL); /* inst db dir */
+    }
+    /* Remove the DB env files */
+    bdb_remove_env(li);
+
+    li->li_directory = origdbdir;
+    BDB_CONFIG(li)->bdb_log_directory = origlogdir;
+    inst->inst_parent_dir_name = originstparentdir;
+
+bail:
+    slapi_ch_free_string(&workdbdir);
+    slapi_ch_free_string(&ldbmversion);
+    slapi_ch_free_string(&dataversion);
+    return rc;
+}

+ 394 - 0
ldap/servers/slapd/back-ldbm/db-bdb/bdb_misc.c

@@ -0,0 +1,394 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (C) 2019 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+#include "bdb_layer.h"
+
+/* TODO: make this a 64-bit return value */
+int
+bdb_db_size(Slapi_PBlock *pb)
+{
+    struct ldbminfo *li;
+    unsigned int size; /* TODO: make this a 64-bit return value */
+    int rc;
+
+    slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
+    rc = dblayer_database_size(li, &size);     /* TODO: make this a 64-bit return value */
+    slapi_pblock_set(pb, SLAPI_DBSIZE, &size); /* TODO: make this a 64-bit return value */
+
+    return rc;
+}
+int
+bdb_cleanup(struct ldbminfo *li)
+{
+
+    slapi_log_err(SLAPI_LOG_TRACE, "bdb_cleanup", "bdb backend specific cleanup\n");
+    /* We assume that dblayer_close has been called already */
+    dblayer_private *priv = li->li_dblayer_private;
+    int rval = 0;
+
+    if (NULL == priv) /* already terminated.  nothing to do */
+        return rval;
+
+    objset_delete(&(li->li_instance_set));
+
+    slapi_ch_free_string(&BDB_CONFIG(li)->bdb_log_directory);
+    slapi_ch_free((void **)&priv);
+    li->li_dblayer_private = NULL;
+
+    if (config_get_entryusn_global()) {
+        slapi_counter_destroy(&li->li_global_usn_counter);
+    }
+    slapi_ch_free((void **)&(li->li_dblayer_config));
+
+    return 0;
+}
+
+/* check if a DN is in the include list but NOT the exclude list
+ * [used by both ldif2db and db2ldif]
+ */
+int
+bdb_back_ok_to_dump(const char *dn, char **include, char **exclude)
+{
+    int i = 0;
+
+    if (!(include || exclude))
+        return (1);
+
+    if (exclude) {
+        i = 0;
+        while (exclude[i]) {
+            if (slapi_dn_issuffix(dn, exclude[i]))
+                return (0);
+            i++;
+        }
+    }
+
+    if (include) {
+        i = 0;
+        while (include[i]) {
+            if (slapi_dn_issuffix(dn, include[i]))
+                return (1);
+            i++;
+        }
+        /* not in include... bye. */
+        return (0);
+    }
+
+    return (1);
+}
+
+/* fetch include/exclude DNs from the pblock and normalize them --
+ * returns true if there are any include/exclude DNs
+ * [used by both ldif2db and db2ldif]
+ */
+int
+bdb_back_fetch_incl_excl(Slapi_PBlock *pb, char ***include, char ***exclude)
+{
+    char **pb_incl, **pb_excl;
+
+    slapi_pblock_get(pb, SLAPI_LDIF2DB_INCLUDE, &pb_incl);
+    slapi_pblock_get(pb, SLAPI_LDIF2DB_EXCLUDE, &pb_excl);
+    if ((NULL == include) || (NULL == exclude)) {
+        return 0;
+    }
+    *include = *exclude = NULL;
+
+    /* pb_incl/excl are both normalized */
+    *exclude = slapi_ch_array_dup(pb_excl);
+    *include = slapi_ch_array_dup(pb_incl);
+
+    return (pb_incl || pb_excl);
+}
+
+PRUint64
+bdb_get_id2entry_size(ldbm_instance *inst)
+{
+    struct ldbminfo *li = NULL;
+    char *id2entry_file = NULL;
+    PRFileInfo64 info;
+    int rc;
+    char inst_dir[MAXPATHLEN], *inst_dirp = NULL;
+
+    if (NULL == inst) {
+        return 0;
+    }
+    li = inst->inst_li;
+    inst_dirp = dblayer_get_full_inst_dir(li, inst, inst_dir, MAXPATHLEN);
+    id2entry_file = slapi_ch_smprintf("%s/%s", inst_dirp,
+                                      ID2ENTRY LDBM_FILENAME_SUFFIX);
+    if (inst_dirp != inst_dir) {
+        slapi_ch_free_string(&inst_dirp);
+    }
+    rc = PR_GetFileInfo64(id2entry_file, &info);
+    slapi_ch_free_string(&id2entry_file);
+    if (rc) {
+        return 0;
+    }
+    return info.size;
+}
+
+int
+bdb_start_autotune(struct ldbminfo *li)
+{
+    Object *inst_obj = NULL;
+    ldbm_instance *inst = NULL;
+    /* size_t is a platform unsigned int, IE uint64_t */
+    uint64_t total_cache_size = 0;
+    uint64_t entry_size = 0;
+    uint64_t dn_size = 0;
+    uint64_t zone_size = 0;
+    uint64_t import_size = 0;
+    uint64_t db_size = 0;
+    /* For clamping the autotune value to a 64Mb boundary */
+    uint64_t clamp_div = 0;
+    /* Backend count */
+    uint64_t backend_count = 0;
+
+    int_fast32_t autosize_percentage = 0;
+    int_fast32_t autosize_db_percentage_split = 0;
+    int_fast32_t import_percentage = 0;
+    util_cachesize_result issane;
+    char *msg = "";       /* This will be set by one of the two cache sizing paths below. */
+    char size_to_str[32]; /* big enough to hold %ld */
+
+
+    /* == Begin autotune == */
+
+    /*
+    * The process that we take here now defaults to autotune first, then override
+    * with manual values if so chosen.
+    *
+    * This means first off, we need to check for valid autosizing values.
+    * We then calculate what our system tuning would be. We clamp these to the
+    * nearest value. IE 487MB would be 510656512 bytes, so we clamp this to
+    * 536870912 bytes, aka 512MB. This is aligned to 64MB boundaries.
+    *
+    * Now that we have these values, we then check the values of dbcachesize
+    * and cachememsize. If they are 0, we set them to the auto-calculated value.
+    * If they are non-0, we skip the value.
+    *
+    * This way, we are really autotuning on "first run", and if the admin wants
+    * to up the values, they merely need to reset the value to 0, and let the
+    * server restart.
+    *
+    * wibrown 2017
+    */
+
+    /* sanity check the autosizing values,
+     no value or sum of values larger than 100.
+    */
+    backend_count = objset_size(li->li_instance_set);
+
+    /* If autosize == 0, set autosize_per to 10. */
+    if (li->li_cache_autosize <= 0) {
+        /* First, set our message. In the case autosize is 0, we calculate some
+         * sane defaults and populate these values, but it's only on first run.
+         */
+        msg = "This can be corrected by altering the values of nsslapd-dbcachesize, nsslapd-cachememsize and nsslapd-dncachememsize\n";
+        autosize_percentage = 10;
+    } else {
+        /* In this case we really are setting the values each start up, so
+         * change the msg.
+         */
+        msg = "This can be corrected by altering the values of nsslapd-cache-autosize, nsslapd-cache-autosize-split and nsslapd-dncachememsize\n";
+        autosize_percentage = li->li_cache_autosize;
+    }
+    /* Has to be less than 0, 0 means to disable I think */
+    if (li->li_import_cache_autosize < 0) {
+        import_percentage = 50;
+    } else {
+        import_percentage = li->li_import_cache_autosize;
+    }
+    /* This doesn't control the availability of the feature, so we can take the
+     * default from ldbm_config.c
+     */
+    if (li->li_cache_autosize_split == 0) {
+        autosize_db_percentage_split = 25;
+    } else {
+        autosize_db_percentage_split = li->li_cache_autosize_split;
+    }
+
+
+    /* Check the values are sane. */
+    if ((autosize_percentage > 100) || (import_percentage > 100) || (autosize_db_percentage_split > 100) ||
+        ((autosize_percentage > 0) && (import_percentage > 0) && (autosize_percentage + import_percentage > 100))) {
+        slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "Cache autosizing: bad settings, value or sum of values can not larger than 100.\n");
+        slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "You should change nsslapd-cache-autosize + nsslapd-import-cache-autosize in dse.ldif to be less than 100.\n");
+        slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "Reasonable starting values are nsslapd-cache-autosize: 10, nsslapd-import-cache-autosize: -1.\n");
+        return SLAPI_FAIL_GENERAL;
+    }
+
+    /* Get our platform memory values. */
+    slapi_pal_meminfo *mi = spal_meminfo_get();
+    if (mi == NULL) {
+        slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "Unable to determine system page limits\n");
+        return SLAPI_FAIL_GENERAL;
+    }
+
+    /* calculate the needed values */
+    zone_size = (autosize_percentage * mi->system_total_bytes) / 100;
+    /* This is how much we "might" use, lets check it's sane. */
+    /* In the case it is not, this will *reduce* the allocation */
+    issane = util_is_cachesize_sane(mi, &zone_size);
+    if (issane == UTIL_CACHESIZE_REDUCED) {
+        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "Your autosized cache values have been reduced. Likely your nsslapd-cache-autosize percentage is too high.\n");
+        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "%s", msg);
+    }
+    /* It's valid, lets divide it up and set according to user prefs */
+    db_size = (autosize_db_percentage_split * zone_size) / 100;
+
+    /* Cap the DB size at 1.5G, as this doesn't help perf much more (lkrispen's advice) */
+    /* NOTE: Do we need a minimum DB size? */
+    if (db_size > (1536 * MEGABYTE)) {
+        db_size = (1536 * MEGABYTE);
+    }
+
+
+    /* NOTE: Because of how we workout entry_size, even if
+     * have autosize split to say ... 90% for dbcache, because
+     * we cap db_size, we use zone_size - db_size, meaning that entry
+     * cache still gets the remaining memory *even* though we didn't use it all.
+     * If we didn't do this, entry_cache would only get 10% of of the avail, even
+     * if db_size was caped at say 5% down from 90.
+     */
+    if (backend_count > 0) {
+        /* Number of entry cache pages per backend. */
+        entry_size = (zone_size - db_size) / backend_count;
+        /* Now split this into dn and entry */
+        dn_size = entry_size * 0.1;
+        entry_size = entry_size * 0.9;
+        /* Now, clamp this value to a 64mb boundary. */
+        /* Now divide the entry pages by this, and also mod. If mod != 0, we need
+         * to add 1 to the diveded number. This should give us:
+         * 510 * 1024 * 1024 == 510MB
+         * 534773760 bytes
+         * 130560 pages at 4096 pages.
+         * 16384 pages for 64Mb
+         * 130560 / 16384 = 7
+         * 130560 % 16384 = 15872 which is != 0
+         * therfore 7 + 1, aka 8 * 16384 = 131072 pages = 536870912 bytes = 512MB.
+         */
+        if (entry_size % (64 * MEGABYTE) != 0) {
+            /* If we want to clamp down, remove the "+1". This would change the above from 510mb -> 448mb. */
+            clamp_div = (entry_size / (64 * MEGABYTE)) + 1;
+            entry_size = clamp_div * (64 * MEGABYTE);
+        }
+        if (dn_size % (64 * MEGABYTE) != 0) {
+            /* If we want to clamp down, remove the "+1". This would change the above from 510mb -> 448mb. */
+            clamp_div = (dn_size / (64 * MEGABYTE)) + 1;
+            dn_size = clamp_div * (64 * MEGABYTE);
+        }
+    }
+
+    slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %" PRIu64 "k physical memory\n", mi->system_total_bytes / 1024);
+    slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %" PRIu64 "k available\n", mi->system_available_bytes / 1024);
+
+    /* We've now calculated the autotuning values. Do we need to apply it?
+     * we use the logic of "if size is 0, or autosize is > 0. This way three
+     * options can happen.
+     *
+     * First, during first run, dbcache is 0, and autosize is 0. So we apply
+     * the autotuned value ONLY on first run.
+     * Second, once the admin sets a value, or autotuning set a value, it sticks.
+     * Third, if the admin really does want autosizing to take effect every
+     * start up, we disregard the defined value.
+     */
+
+    /* First, check the dbcache */
+    if (li->li_dbcachesize == 0 || li->li_cache_autosize > 0) {
+        slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: db cache: %" PRIu64 "k\n", db_size / 1024);
+        if (db_size < (500 * MEGABYTE)) {
+            db_size = db_size / 1.25;
+        }
+        /* Have to set this value through text. */
+        sprintf(size_to_str, "%" PRIu64, db_size);
+        bdb_config_internal_set(li, CONFIG_DBCACHESIZE, size_to_str);
+    }
+    total_cache_size += li->li_dbcachesize;
+
+    /* For each backend */
+    /*   apply the appropriate cache size if 0 */
+    if (backend_count > 0) {
+        li->li_cache_autosize_ec = entry_size;
+        li->li_dncache_autosize_ec = dn_size;
+    }
+
+    for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+         inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+
+        inst = (ldbm_instance *)object_get_data(inst_obj);
+        uint64_t cache_size = (uint64_t)cache_get_max_size(&(inst->inst_cache));
+        uint64_t dncache_size = (uint64_t)cache_get_max_size(&(inst->inst_dncache));
+
+        /* This is the point where we decide to apply or not.
+         * We have to check for the mincachesize as setting 0 resets
+         * to this value. This could cause an issue with a *tiny* install, but
+         * it's highly unlikely.
+         */
+        if (cache_size == 0 || cache_size == MINCACHESIZE || li->li_cache_autosize > 0) {
+            slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: %s entry cache (%" PRIu64 " total): %" PRIu64 "k\n", inst->inst_name, backend_count, entry_size / 1024);
+            cache_set_max_entries(&(inst->inst_cache), -1);
+            cache_set_max_size(&(inst->inst_cache), li->li_cache_autosize_ec, CACHE_TYPE_ENTRY);
+        }
+        if (dncache_size == 0 || dncache_size == MINCACHESIZE || li->li_cache_autosize > 0) {
+            slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: %s dn cache (%" PRIu64 " total): %" PRIu64 "k\n", inst->inst_name, backend_count, dn_size / 1024);
+            cache_set_max_entries(&(inst->inst_dncache), -1);
+            cache_set_max_size(&(inst->inst_dncache), li->li_dncache_autosize_ec, CACHE_TYPE_DN);
+        }
+        /* Refresh this value now. */
+        cache_size = (PRUint64)cache_get_max_size(&(inst->inst_cache));
+        db_size = bdb_get_id2entry_size(inst);
+        if (cache_size < db_size) {
+            slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start",
+                          "%s: entry cache size %" PRIu64 " B is "
+                          "less than db size %" PRIu64 " B; "
+                          "We recommend to increase the entry cache size "
+                          "nsslapd-cachememsize.\n",
+                          inst->inst_name, cache_size, db_size);
+        }
+        total_cache_size += cache_size;
+        total_cache_size += dncache_size;
+    }
+    /* autosizing importCache */
+    if (li->li_import_cache_autosize > 0) {
+        /* Use import percentage here, as it's been corrected for -1 behaviour */
+        import_size = (import_percentage * mi->system_total_bytes) / 100;
+        issane = util_is_cachesize_sane(mi, &import_size);
+        if (issane == UTIL_CACHESIZE_REDUCED) {
+            slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "Your autosized import cache values have been reduced. Likely your nsslapd-import-cache-autosize percentage is too high.\n");
+        }
+        /* We just accept the reduced allocation here. */
+        slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: import cache: %" PRIu64 "k\n", import_size / 1024);
+
+        sprintf(size_to_str, "%" PRIu64, import_size);
+        ldbm_config_internal_set(li, CONFIG_IMPORT_CACHESIZE, size_to_str);
+    }
+
+    /* Finally, lets check that the total result is sane. */
+    slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "total cache size: %" PRIu64 " B; \n", total_cache_size);
+
+    issane = util_is_cachesize_sane(mi, &total_cache_size);
+    if (issane != UTIL_CACHESIZE_VALID) {
+        /* Right, it's time to panic */
+        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "It is highly likely your memory configuration of all backends will EXCEED your systems memory.\n");
+        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "In a future release this WILL prevent server start up. You MUST alter your configuration.\n");
+        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "Total entry cache size: %" PRIu64 " B; dbcache size: %" PRIu64 " B; available memory size: %" PRIu64 " B; \n",
+                      total_cache_size, (uint64_t)li->li_dbcachesize, mi->system_available_bytes);
+        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "%s\n", msg);
+        /* WB 2016 - This should be UNCOMMENTED in a future release */
+        /* return SLAPI_FAIL_GENERAL; */
+    }
+
+    spal_meminfo_destroy(mi);
+
+    /* == End autotune == */
+    return 0;
+}

+ 5 - 7
ldap/servers/slapd/back-ldbm/monitor.c → ldap/servers/slapd/back-ldbm/db-bdb/bdb_monitor.c

@@ -1,6 +1,5 @@
 /** BEGIN COPYRIGHT BLOCK
- * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
+ * Copyright (C) 2019 Red Hat, Inc.
  * All rights reserved.
  *
  * License: GPL (version 3 or any later version).
@@ -13,8 +12,7 @@
 
 /* monitor.c - ldbm backend monitor function */
 
-#include "back-ldbm.h"
-#include "dblayer.h" /* XXXmcs: not sure this is good to do... */
+#include "bdb_layer.h"
 #include <sys/stat.h>
 
 
@@ -136,7 +134,7 @@ ldbm_back_monitor_instance_search(Slapi_PBlock *pb __attribute__((unused)),
     }
 #endif
 
-    if (dblayer_memp_stat(li, NULL, &mpfstat) != 0) {
+    if (bdb_memp_stat(li, NULL, &mpfstat) != 0) {
         *returncode = LDAP_OPERATIONS_ERROR;
         return SLAPI_DSE_CALLBACK_ERROR;
     }
@@ -222,7 +220,7 @@ ldbm_back_monitor_search(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAft
     MSET("database");
 
     /* we have to ask for file stats in order to get correct global stats */
-    if (dblayer_memp_stat(li, &mpstat, &mpfstat) != 0) {
+    if (bdb_memp_stat(li, &mpstat, &mpfstat) != 0) {
         *returncode = LDAP_OPERATIONS_ERROR;
         return SLAPI_DSE_CALLBACK_ERROR;
     }
@@ -305,7 +303,7 @@ ldbm_back_dbmonitor_search(Slapi_PBlock *pb __attribute__((unused)),
     dbpriv = (dblayer_private *)li->li_dblayer_private;
     PR_ASSERT(NULL != dbpriv);
 
-    perfctrs_as_entry(e, dbpriv->perf_private, dbpriv->dblayer_env->dblayer_DB_ENV);
+    perfctrs_as_entry(e, BDB_CONFIG(li)->perf_private, ((bdb_db_env *)dbpriv->dblayer_env)->bdb_DB_ENV);
 
     *returncode = LDAP_SUCCESS;
     return SLAPI_DSE_CALLBACK_OK;

+ 7 - 8
ldap/servers/slapd/back-ldbm/upgrade.c → ldap/servers/slapd/back-ldbm/db-bdb/bdb_upgrade.c

@@ -1,6 +1,5 @@
 /** BEGIN COPYRIGHT BLOCK
- * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
+ * Copyright (C) 2019 Red Hat, Inc.
  * All rights reserved.
  *
  * License: GPL (version 3 or any later version).
@@ -14,7 +13,7 @@
 
 /* upgrade.c --- upgrade from a previous version of the database */
 
-#include "back-ldbm.h"
+#include "bdb_layer.h"
 
 /*
  * ldbm_compat_versions holds DBVERSION strings for all versions of the
@@ -124,7 +123,7 @@ check_db_version(struct ldbminfo *li, int *action)
     char *dataversion = NULL;
 
     *action = 0;
-    result = dbversion_read(li, li->li_directory, &ldbmversion, &dataversion);
+    result = bdb_version_read(li, li->li_directory, &ldbmversion, &dataversion);
     if (result != 0) {
         return 0;
     } else if (NULL == ldbmversion || '\0' == *ldbmversion) {
@@ -147,13 +146,13 @@ check_db_version(struct ldbminfo *li, int *action)
         return DBVERSION_NOT_SUPPORTED;
     }
     if (value & DBVERSION_UPGRADE_3_4) {
-        dblayer_set_recovery_required(li);
+        bdb_set_recovery_required(li);
         *action = DBVERSION_UPGRADE_3_4;
     } else if (value & DBVERSION_UPGRADE_4_4) {
-        dblayer_set_recovery_required(li);
+        bdb_set_recovery_required(li);
         *action = DBVERSION_UPGRADE_4_4;
     } else if (value & DBVERSION_UPGRADE_4_5) {
-        dblayer_set_recovery_required(li);
+        bdb_set_recovery_required(li);
         *action = DBVERSION_UPGRADE_4_5;
     }
     if (value & DBVERSION_RDN_FORMAT) {
@@ -205,7 +204,7 @@ check_db_inst_version(ldbm_instance *inst)
     inst_dirp =
         dblayer_get_full_inst_dir(inst->inst_li, inst, inst_dir, MAXPATHLEN * 2);
 
-    result = dbversion_read(inst->inst_li, inst_dirp, &ldbmversion, &dataversion);
+    result = bdb_version_read(inst->inst_li, inst_dirp, &ldbmversion, &dataversion);
     if (result != 0) {
         return rval;
     } else if (NULL == ldbmversion || '\0' == *ldbmversion) {

+ 233 - 0
ldap/servers/slapd/back-ldbm/db-bdb/bdb_verify.c

@@ -0,0 +1,233 @@
+/** BEGIN COPYRIGHT BLOCK
+ * Copyright (C) 2019 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * License: GPL (version 3 or any later version).
+ * See LICENSE for details.
+ * END COPYRIGHT BLOCK **/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+/* dbverify.c - verify database files */
+
+#include "bdb_layer.h"
+
+static int
+dbverify_ext(ldbm_instance *inst, int verbose)
+{
+    char dbdir[MAXPATHLEN];
+    char *filep = NULL;
+    PRDir *dirhandle = NULL;
+    PRDirEntry *direntry = NULL;
+    DB *dbp = NULL;
+    size_t tmplen = 0;
+    size_t filelen = 0;
+    int rval = 1;
+    int rval_main = 0;
+    struct ldbminfo *li = inst->inst_li;
+    dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
+    bdb_config *conf = (bdb_config *)li->li_dblayer_config;
+    struct bdb_db_env *pEnv = priv->dblayer_env;
+
+    dbdir[sizeof(dbdir) - 1] = '\0';
+    PR_snprintf(dbdir, sizeof(dbdir), "%s/%s", inst->inst_parent_dir_name,
+                inst->inst_dir_name);
+    if ('\0' != dbdir[sizeof(dbdir) - 1]) /* overflown */
+    {
+        slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
+                      "db path too long: %s/%s\n",
+                      inst->inst_parent_dir_name, inst->inst_dir_name);
+        return 1;
+    }
+    tmplen = strlen(dbdir);
+    filep = dbdir + tmplen;
+    filelen = sizeof(dbdir) - tmplen;
+
+    /* run dbverify on each each db file */
+    dirhandle = PR_OpenDir(dbdir);
+    if (!dirhandle) {
+        slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
+                      "PR_OpenDir (%s) failed (%d): %s\n",
+                      dbdir, PR_GetError(), slapd_pr_strerror(PR_GetError()));
+        return 1;
+    }
+    while (NULL !=
+           (direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
+        /* struct attrinfo *ai = NULL; */
+        dbp = NULL;
+
+        if (!direntry->name) {
+            break;
+        }
+        if (!strstr(direntry->name, LDBM_FILENAME_SUFFIX)) /* non db file */
+        {
+            continue;
+        }
+        if (sizeof(direntry->name) + 2 > filelen) {
+            slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
+                          "db path too long: %s/%s\n",
+                          dbdir, direntry->name);
+            continue;
+        }
+        PR_snprintf(filep, filelen, "/%s", direntry->name);
+        rval = db_create(&dbp, pEnv->bdb_DB_ENV, 0);
+        if (0 != rval) {
+            slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
+                          "Unable to create id2entry db file %d\n", rval);
+            return rval;
+        }
+
+#define VLVPREFIX "vlv#"
+        if (0 != strncmp(direntry->name, ID2ENTRY, strlen(ID2ENTRY))) {
+            struct attrinfo *ai = NULL;
+            char *p = NULL;
+            p = strstr(filep, LDBM_FILENAME_SUFFIX); /* since already checked,
+                                                        it must have it */
+            if (p)
+                *p = '\0';
+            ainfo_get(inst->inst_be, filep + 1, &ai);
+            if (p)
+                *p = '.';
+            if (ai->ai_key_cmp_fn) {
+                dbp->app_private = (void *)ai->ai_key_cmp_fn;
+                dbp->set_bt_compare(dbp, bdb_bt_compare);
+            }
+            if (idl_get_idl_new()) {
+                rval = dbp->set_pagesize(dbp,
+                                         (conf->bdb_index_page_size == 0) ? DBLAYER_INDEX_PAGESIZE : conf->bdb_index_page_size);
+            } else {
+                rval = dbp->set_pagesize(dbp,
+                                         (conf->bdb_page_size == 0) ? DBLAYER_PAGESIZE : conf->bdb_page_size);
+            }
+            if (0 != rval) {
+                slapi_log_err(SLAPI_LOG_ERR, "DB verify",
+                              "Unable to set pagesize flags to db (%d)\n", rval);
+                return rval;
+            }
+            if (0 == strncmp(direntry->name, VLVPREFIX, strlen(VLVPREFIX))) {
+                rval = dbp->set_flags(dbp, DB_RECNUM);
+                if (0 != rval) {
+                    slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
+                                  "Unable to set RECNUM flag to vlv index (%d)\n", rval);
+                    return rval;
+                }
+            } else if (idl_get_idl_new()) {
+                rval = dbp->set_flags(dbp, DB_DUP | DB_DUPSORT);
+                if (0 != rval) {
+                    slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
+                                  "Unable to set DUP flags to db (%d)\n", rval);
+                    return rval;
+                }
+
+                if (ai->ai_dup_cmp_fn) {
+                    /* If set, use the special dup compare callback */
+                    rval = dbp->set_dup_compare(dbp, ai->ai_dup_cmp_fn);
+                } else {
+                    rval = dbp->set_dup_compare(dbp, idl_new_compare_dups);
+                }
+
+                if (0 != rval) {
+                    slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
+                                  "Unable to set dup_compare to db (%d)\n", rval);
+                    return rval;
+                }
+            }
+        }
+#undef VLVPREFIX
+        rval = dbp->verify(dbp, dbdir, NULL, NULL, 0);
+        if (0 == rval) {
+            if (verbose) {
+                slapi_log_err(SLAPI_LOG_INFO, "dbverify_ext",
+                              "%s: ok\n", dbdir);
+            }
+        } else {
+            slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
+                          "verify failed(%d): %s\n", rval, dbdir);
+        }
+        rval_main |= rval;
+        *filep = '\0';
+    }
+    PR_CloseDir(dirhandle);
+
+    return rval_main;
+}
+
+int
+bdb_verify(Slapi_PBlock *pb)
+{
+    struct ldbminfo *li = NULL;
+    Object *inst_obj = NULL;
+    ldbm_instance *inst = NULL;
+    int verbose = 0;
+    int rval = 1;
+    int rval_main = 0;
+    char **instance_names = NULL;
+    char *dbdir = NULL;
+
+    slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_dbverify", "Verifying db files...\n");
+    slapi_pblock_get(pb, SLAPI_BACKEND_INSTANCE_NAME, &instance_names);
+    slapi_pblock_get(pb, SLAPI_SEQ_TYPE, &verbose);
+    slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
+    slapi_pblock_get(pb, SLAPI_DBVERIFY_DBDIR, &dbdir);
+    ldbm_config_load_dse_info(li);
+    bdb_config_internal_set(li, CONFIG_DB_TRANSACTION_LOGGING, "off");
+
+    /* no write needed; choose EXPORT MODE */
+    if (0 != bdb_start(li, DBLAYER_EXPORT_MODE)) {
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_dbverify",
+                      "dbverify: Failed to init database\n");
+        return rval;
+    }
+
+    /* server is up */
+    slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_dbverify", "server is up\n");
+    if (instance_names) /* instance is specified */
+    {
+        char **inp = NULL;
+        for (inp = instance_names; inp && *inp; inp++) {
+            inst = ldbm_instance_find_by_name(li, *inp);
+            if (inst) {
+                if (dbdir) {
+                    /* verifying backup */
+                    slapi_ch_free_string(&inst->inst_parent_dir_name);
+                    inst->inst_parent_dir_name = slapi_ch_strdup(dbdir);
+                }
+                rval_main |= dbverify_ext(inst, verbose);
+            } else {
+                rval_main |= 1; /* no such instance */
+            }
+        }
+    } else /* all instances */
+    {
+        for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
+             inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
+            inst = (ldbm_instance *)object_get_data(inst_obj);
+            /* check if an import/restore is already ongoing... */
+            if (instance_set_busy(inst) != 0) {
+                /* standalone, only.  never happens */
+                slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_dbverify",
+                              "Backend '%s' is already in the middle of "
+                              "another task and cannot be disturbed.\n",
+                              inst->inst_name);
+                continue; /* skip this instance and go to the next*/
+            }
+            if (dbdir) {
+                /* verifying backup */
+                slapi_ch_free_string(&inst->inst_parent_dir_name);
+                inst->inst_parent_dir_name = slapi_ch_strdup(dbdir);
+            }
+            rval_main |= dbverify_ext(inst, verbose);
+        }
+    }
+
+    /* close the database down again */
+    rval = bdb_post_close(li, DBLAYER_EXPORT_MODE);
+    if (0 != rval) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "ldbm_back_dbverify", "Failed to close database\n");
+    }
+
+    return rval_main;
+}

+ 15 - 16
ldap/servers/slapd/back-ldbm/dbversion.c → ldap/servers/slapd/back-ldbm/db-bdb/bdb_version.c

@@ -1,6 +1,5 @@
 /** BEGIN COPYRIGHT BLOCK
- * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
+ * Copyright (C) 2019 Red Hat, Inc.
  * All rights reserved.
  *
  * License: GPL (version 3 or any later version).
@@ -12,7 +11,7 @@
 #endif
 
 
-#include "back-ldbm.h"
+#include "bdb_layer.h"
 
 static void
 mk_dbversion_fullpath(struct ldbminfo *li, const char *directory, char *filename)
@@ -21,7 +20,7 @@ mk_dbversion_fullpath(struct ldbminfo *li, const char *directory, char *filename
         if (is_fullpath((char *)directory)) {
             PR_snprintf(filename, MAXPATHLEN * 2, "%s/%s", directory, DBVERSION_FILENAME);
         } else {
-            char *home_dir = dblayer_get_home_dir(li, NULL);
+            char *home_dir = bdb_get_home_dir(li, NULL);
             /* if relpath, nsslapd-dbhome_directory should be set */
             PR_snprintf(filename, MAXPATHLEN * 2, "%s/%s/%s", home_dir, directory, DBVERSION_FILENAME);
         }
@@ -31,14 +30,14 @@ mk_dbversion_fullpath(struct ldbminfo *li, const char *directory, char *filename
 }
 
 /*
- *  Function: dbversion_write
+ *  Function: bdb_version_write
  *
  *  Returns: returns 0 on success, -1 on failure
  *
  *  Description: This function writes the DB version file.
  */
 int
-dbversion_write(struct ldbminfo *li, const char *directory, const char *dataversion, PRUint32 flags)
+bdb_version_write(struct ldbminfo *li, const char *directory, const char *dataversion, PRUint32 flags)
 {
     char filename[MAXPATHLEN * 2];
     PRFileDesc *prfd;
@@ -54,7 +53,7 @@ dbversion_write(struct ldbminfo *li, const char *directory, const char *datavers
     /* Open the file */
     if ((prfd = PR_Open(filename, PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE,
                         SLAPD_DEFAULT_FILE_MODE)) == NULL) {
-        slapi_log_err(SLAPI_LOG_ERR, "dbversion_write - "
+        slapi_log_err(SLAPI_LOG_ERR, "bdb_version_write - "
                                      "Could not open file \"%s\" for writing " SLAPI_COMPONENT_NAME_NSPR " %d (%s)\n",
                       filename, PR_GetError(), slapd_pr_strerror(PR_GetError()));
         rc = -1;
@@ -89,14 +88,14 @@ dbversion_write(struct ldbminfo *li, const char *directory, const char *datavers
         PL_strncpyz(ptr, "\n", sizeof(buf) - len);
         len = strlen(buf);
         if (slapi_write_buffer(prfd, buf, len) != (PRInt32)len) {
-            slapi_log_err(SLAPI_LOG_ERR, "dbversion_write", "Could not write to file \"%s\"\n", filename);
+            slapi_log_err(SLAPI_LOG_ERR, "bdb_version_write", "Could not write to file \"%s\"\n", filename);
             rc = -1;
         }
         if (rc == 0 && dataversion != NULL) {
             sprintf(buf, "%s\n", dataversion);
             len = strlen(buf);
             if (slapi_write_buffer(prfd, buf, len) != (PRInt32)len) {
-                slapi_log_err(SLAPI_LOG_ERR, "dbversion_write", "Could not write to file \"%s\"\n", filename);
+                slapi_log_err(SLAPI_LOG_ERR, "bdb_version_write", "Could not write to file \"%s\"\n", filename);
                 rc = -1;
             }
         }
@@ -106,14 +105,14 @@ dbversion_write(struct ldbminfo *li, const char *directory, const char *datavers
 }
 
 /*
- *  Function: dbversion_read
+ *  Function: bdb_version_read
  *
  *  Returns: returns 0 on success, -1 on failure
  *
  *  Description: This function reads the DB version file.
  */
 int
-dbversion_read(struct ldbminfo *li, const char *directory, char **ldbmversion, char **dataversion)
+bdb_version_read(struct ldbminfo *li, const char *directory, char **ldbmversion, char **dataversion)
 {
     char filename[MAXPATHLEN * 2];
     PRFileDesc *prfd;
@@ -160,15 +159,15 @@ dbversion_read(struct ldbminfo *li, const char *directory, char **ldbmversion, c
         (void)PR_Close(prfd);
 
         if (dataversion == NULL || *dataversion == NULL) {
-            slapi_log_err(SLAPI_LOG_DEBUG, "dbversion_read", "dataversion not present in \"%s\"\n", filename);
+            slapi_log_err(SLAPI_LOG_DEBUG, "bdb_version_read", "dataversion not present in \"%s\"\n", filename);
         }
         if (*ldbmversion == NULL) {
             /* DBVERSIOn is corrupt, COMPLAIN! */
             /* This is IDRM           Identifier removed (POSIX.1)
              * which seems appropriate for the error here :)
              */
-            slapi_log_err(SLAPI_LOG_CRIT, "dbversion_read", "Could not parse file \"%s\". It may be corrupted.\n", filename);
-            slapi_log_err(SLAPI_LOG_CRIT, "dbversion_read", "It may be possible to recover by replacing with a valid DBVERSION file from another DB instance\n");
+            slapi_log_err(SLAPI_LOG_CRIT, "bdb_version_read", "Could not parse file \"%s\". It may be corrupted.\n", filename);
+            slapi_log_err(SLAPI_LOG_CRIT, "bdb_version_read", "It may be possible to recover by replacing with a valid DBVERSION file from another DB instance\n");
             return EIDRM;
         }
         return 0;
@@ -177,14 +176,14 @@ dbversion_read(struct ldbminfo *li, const char *directory, char **ldbmversion, c
 
 
 /*
- *  Function: dbversion_exists
+ *  Function: bdb_version_exists
  *
  *  Returns: 1 for exists, 0 for not.
  *
  *  Description: This function checks if the DB version file exists.
  */
 int
-dbversion_exists(struct ldbminfo *li, const char *directory)
+bdb_version_exists(struct ldbminfo *li, const char *directory)
 {
     char filename[MAXPATHLEN * 2];
     PRFileDesc *prfd;

+ 0 - 353
ldap/servers/slapd/back-ldbm/dbhelp.c

@@ -1,353 +0,0 @@
-/** BEGIN COPYRIGHT BLOCK
- * Copyright (C) 2005 Red Hat, Inc.
- * All rights reserved.
- *
- * License: GPL (version 3 or any later version).
- * See LICENSE for details.
- * END COPYRIGHT BLOCK **/
-
-#ifdef HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-
-/*
- * File for helper functions related to BerkeleyDB.
- * This exists because dblayer.c is 5k+ lines long,
- * so it seems time to move code to a new file.
- */
-
-#include "back-ldbm.h"
-#include "dblayer.h"
-
-static int
-dblayer_copy_file_keybykey(DB_ENV *env,
-                           char *source_file_name,
-                           char *destination_file_name,
-                           int overwrite __attribute__((unused)),
-                           dblayer_private *priv,
-                           ldbm_instance *inst)
-{
-    int retval = 0;
-    int retval_cleanup = 0;
-    DB *source_file = NULL;
-    DB *destination_file = NULL;
-    DBC *source_cursor = NULL;
-    DBTYPE dbtype = 0;
-    PRUint32 dbflags = 0;
-    PRUint32 dbpagesize = 0;
-    int cursor_flag = 0;
-    int finished = 0;
-    int mode = 0;
-    char *p = NULL;
-
-    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_copy_file_keybykey", "=>\n");
-
-    if (!env) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey", "Out of memory\n");
-        goto error;
-    }
-
-    if (priv->dblayer_file_mode)
-        mode = priv->dblayer_file_mode;
-    dblayer_set_env_debugging(env, priv);
-
-    /* Open the source file */
-    retval = db_create(&source_file, env, 0);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey", "Create error %d: %s\n",
-                      retval, db_strerror(retval));
-        goto error;
-    }
-    retval = (source_file->open)(source_file, NULL, source_file_name, NULL, DB_UNKNOWN, DB_RDONLY, 0);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey", "Open error %d: %s\n",
-                      retval, db_strerror(retval));
-        goto error;
-    }
-    /* Get the info we need from the source file */
-    retval = source_file->get_flags(source_file, &dbflags);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey", "get_flags error %d: %s\n",
-                      retval, db_strerror(retval));
-        goto error;
-    }
-    retval = source_file->get_type(source_file, &dbtype);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey",
-                      "get_type error %d: %s\n", retval, db_strerror(retval));
-        goto error;
-    }
-    retval = source_file->get_pagesize(source_file, &dbpagesize);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey",
-                      "get_pagesize error %d: %s\n", retval, db_strerror(retval));
-        goto error;
-    }
-    /* Open the destination file
-     * and make sure that it has the correct page size, the correct access method, and the correct flags (dup etc)
-     */
-    retval = db_create(&destination_file, env, 0);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey",
-                      "Create error %d: %s\n", retval, db_strerror(retval));
-        goto error;
-    }
-    retval = destination_file->set_flags(destination_file, dbflags);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey",
-                      "Set_flags error %d: %s\n", retval, db_strerror(retval));
-        goto error;
-    }
-    retval = destination_file->set_pagesize(destination_file, dbpagesize);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey",
-                      "set_pagesize error %d: %s\n", retval, db_strerror(retval));
-        goto error;
-    }
-
-    /* TEL 20130412: Make sure to set the dup comparison function if needed.
-     * We key our decision off of the presence of new IDL and dup flags on
-     * the source database.  This is similar dblayer_open_file, except that
-     * we don't have the attribute info index mask for VLV.  That should be OK
-     * since the DB_DUP and DB_DUPSORT flags wouldn't have been toggled on
-     * unless they passed the check on the source.
-     */
-    /* Entryrdn index has its own dup compare function */
-    if ((p = PL_strcasestr(source_file_name, LDBM_ENTRYRDN_STR)) &&
-        (*(p + sizeof(LDBM_ENTRYRDN_STR) - 1) == '.')) {
-        /* entryrdn.db */
-        struct attrinfo *ai = NULL;
-        if (NULL == inst) {
-            slapi_log_err(SLAPI_LOG_ERR,
-                          "dblayer_copy_file_keybykey", "(entryrdn) - "
-                                                        "dup_cmp_fn cannot be retrieved since inst is NULL.\n");
-            goto error;
-        }
-        ainfo_get(inst->inst_be, LDBM_ENTRYRDN_STR, &ai);
-        if (ai->ai_dup_cmp_fn) {
-            /* If set, use the special dup compare callback */
-            retval = destination_file->set_dup_compare(destination_file, ai->ai_dup_cmp_fn);
-            if (retval) {
-                slapi_log_err(SLAPI_LOG_ERR,
-                              "dblayer_copy_file_keybykey", "(entryrdn) - set_dup_compare error %d: %s\n",
-                              retval, db_strerror(retval));
-                goto error;
-            }
-        }
-    } else if (idl_get_idl_new() && (dbflags & DB_DUP) && (dbflags & DB_DUPSORT)) {
-        retval = destination_file->set_dup_compare(destination_file, idl_new_compare_dups);
-        if (retval) {
-            slapi_log_err(SLAPI_LOG_ERR,
-                          "dblayer_copy_file_keybykey", "set_dup_compare error %d: %s\n",
-                          retval, db_strerror(retval));
-            goto error;
-        }
-    }
-
-    retval = (destination_file->open)(destination_file, NULL, destination_file_name, NULL, dbtype, DB_CREATE | DB_EXCL, mode);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey", "Open error %d: %s\n",
-                      retval, db_strerror(retval));
-        goto error;
-    }
-    /* Open a cursor on the source file */
-    retval = source_file->cursor(source_file, NULL, &source_cursor, 0);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey",
-                      "Create cursor error %d: %s\n", retval, db_strerror(retval));
-        goto error;
-    }
-    /* Seek to the first key */
-    cursor_flag = DB_FIRST;
-    /* Loop seeking to the next key until they're all done */
-    while (!finished) {
-        DBT key = {0};
-        DBT data = {0};
-        retval = source_cursor->c_get(source_cursor, &key, &data, cursor_flag);
-        if (retval) {
-            /* DB_NOTFOUND is expected when we find the end, log a message for any other error.
-             * In either case, set finished=1 so we can hop down and close the cursor. */
-            if (DB_NOTFOUND != retval) {
-                slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey", "c_get error %d: %s\n",
-                              retval, db_strerror(retval));
-                goto error;
-            }
-            retval = 0; /* DB_NOTFOUND was OK... */
-            finished = 1;
-        } else {
-            /* For each key, insert into the destination file */
-            retval = destination_file->put(destination_file, NULL, &key, &data, 0);
-            if (retval) {
-                slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey", "put error %d: %s\n",
-                              retval, db_strerror(retval));
-                goto error;
-            }
-            cursor_flag = DB_NEXT;
-        }
-    }
-
-error:
-    /* Close the cursor */
-    if (source_cursor) {
-        retval_cleanup = source_cursor->c_close(source_cursor);
-        if (retval_cleanup) {
-            slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey",
-                          "Close cursor error %d: %s\n", retval_cleanup, db_strerror(retval_cleanup));
-            retval += retval_cleanup;
-        }
-    }
-    /* Close the source file */
-    if (source_file) {
-        retval_cleanup = source_file->close(source_file, 0);
-        source_file = NULL;
-        if (retval_cleanup) {
-            slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey",
-                          "Close error %d: %s\n", retval_cleanup, db_strerror(retval_cleanup));
-            retval += retval_cleanup;
-        }
-    }
-    /* Close the destination file */
-    if (destination_file) {
-        retval_cleanup = destination_file->close(destination_file, 0);
-        destination_file = NULL;
-        if (retval_cleanup) {
-            slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_keybykey",
-                          "Close error %d: %s\n", retval_cleanup, db_strerror(retval_cleanup));
-            retval += retval_cleanup;
-        }
-    }
-
-    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_copy_file_keybykey", "<=\n");
-    return retval;
-}
-
-int
-dblayer_copy_file_resetlsns(char *home_dir,
-                            char *source_file_name,
-                            char *destination_file_name,
-                            int overwrite,
-                            dblayer_private *priv,
-                            ldbm_instance *inst)
-{
-    int retval = 0;
-    DB_ENV *env = NULL;
-
-    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_copy_file_resetlsns", "=>\n");
-    /* Make the environment */
-
-    retval = dblayer_make_private_simple_env(home_dir, &env);
-    if (retval || !env) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_resetlsns", "Call to dblayer_make_private_simple_env failed!\n"
-                                                                    "Unable to open an environment.");
-        goto out;
-    }
-    /* Do the copy */
-    retval = dblayer_copy_file_keybykey(env, source_file_name, destination_file_name, overwrite, priv, inst);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_resetlsns", "Copy did not complete successfully.");
-    }
-out:
-    /* Close the environment */
-    if (env) {
-        int retval2 = 0;
-        retval2 = env->close(env, 0);
-        if (retval2) {
-            if (0 == retval) {
-                retval = retval2;
-                slapi_log_err(SLAPI_LOG_ERR, "dblayer_copy_file_resetlsns",
-                              "error %d: %s\n", retval, db_strerror(retval));
-            }
-        }
-    }
-
-    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_copy_file_resetlsns", "<=\n");
-    return retval;
-}
-
-void
-dblayer_set_env_debugging(DB_ENV *pEnv, dblayer_private *priv)
-{
-    pEnv->set_errpfx(pEnv, "ns-slapd");
-    if (priv->dblayer_verbose) {
-        pEnv->set_verbose(pEnv, DB_VERB_DEADLOCK, 1); /* 1 means on */
-        pEnv->set_verbose(pEnv, DB_VERB_RECOVERY, 1); /* 1 means on */
-        pEnv->set_verbose(pEnv, DB_VERB_WAITSFOR, 1); /* 1 means on */
-    }
-    if (priv->dblayer_debug) {
-        pEnv->set_errcall(pEnv, dblayer_log_print);
-    }
-}
-
-/* Make an environment to be used for isolated recovery (e.g. during a partial restore operation) */
-int
-dblayer_make_private_recovery_env(char *db_home_dir, dblayer_private *priv, DB_ENV **env)
-{
-    int retval = 0;
-    DB_ENV *ret_env = NULL;
-
-    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_make_private_recovery_env", "=>\n");
-    if (NULL == env) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_make_private_recovery_env",
-                      "Null environment.  Cannot continue.");
-        return -1;
-    }
-    *env = NULL;
-
-    retval = db_env_create(&ret_env, 0);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_make_private_recovery_env",
-                      "Create error %d: %s\n", retval, db_strerror(retval));
-        goto error;
-    }
-    dblayer_set_env_debugging(ret_env, priv);
-
-    retval = (ret_env->open)(ret_env, db_home_dir, DB_INIT_TXN | DB_RECOVER_FATAL | DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE, 0);
-    if (0 == retval) {
-        *env = ret_env;
-    } else {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_make_private_recovery_env",
-                      "Open error %d: %s\n", retval, db_strerror(retval));
-        goto error;
-    }
-
-error:
-    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_make_private_recovery_env", "<=\n");
-    return retval;
-}
-
-/* Make an environment to be used for simple non-transacted database operations, e.g. fixup during upgrade */
-int
-dblayer_make_private_simple_env(char *db_home_dir, DB_ENV **env)
-{
-    int retval = 0;
-    DB_ENV *ret_env = NULL;
-
-    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_make_private_simple_env", "=>\n");
-    if (NULL == env) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_make_private_simple_env",
-                      "Null environment.  Cannot continue.");
-        return -1;
-    }
-    *env = NULL;
-
-    retval = db_env_create(&ret_env, 0);
-    if (retval) {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_make_private_simple_env",
-                      "Error %d: %s\n", retval, db_strerror(retval));
-        goto error;
-    }
-
-    retval = (ret_env->open)(ret_env, db_home_dir, DB_CREATE | DB_INIT_MPOOL | DB_PRIVATE, 0);
-    if (0 == retval) {
-        *env = ret_env;
-    } else {
-        slapi_log_err(SLAPI_LOG_ERR, "dblayer_make_private_simple_env",
-                      "Error %d: %s\n", retval, db_strerror(retval));
-        goto error;
-    }
-
-error:
-    slapi_log_err(SLAPI_LOG_TRACE, "dblayer_make_private_simple_env", "<=\n");
-    return retval;
-}

Файловите разлики са ограничени, защото са твърде много
+ 34 - 1384
ldap/servers/slapd/back-ldbm/dblayer.c


+ 83 - 91
ldap/servers/slapd/back-ldbm/dblayer.h

@@ -68,108 +68,100 @@
 #define DB_REGION_NAME 25 /* DB: named regions, no backing file. */
 #endif
 
-struct dblayer_private_env
-{
-    DB_ENV *dblayer_DB_ENV;
-    Slapi_RWLock *dblayer_env_lock;
-    int dblayer_openflags;
-    int dblayer_priv_flags;
-};
-
-#define DBLAYER_PRIV_SET_DATA_DIR 0x1
+typedef int dblayer_start_fn_t(struct ldbminfo *li, int flags);
+typedef int dblayer_close_fn_t(struct ldbminfo *li, int flags);
+typedef int dblayer_instance_start_fn_t(backend *be, int flags);
+typedef int dblayer_backup_fn_t(struct ldbminfo *li, char *dest_dir, Slapi_Task *task);
+typedef int dblayer_verify_fn_t(Slapi_PBlock *pb);
+typedef int dblayer_db_size_fn_t(Slapi_PBlock *pb);
+typedef int dblayer_ldif2db_fn_t(Slapi_PBlock *pb);
+typedef int dblayer_db2ldif_fn_t(Slapi_PBlock *pb);
+typedef int dblayer_db2index_fn_t(Slapi_PBlock *pb);
+typedef int dblayer_cleanup_fn_t(struct ldbminfo *li);
+typedef int dblayer_upgradedn_fn_t(Slapi_PBlock *pb);
+typedef int dblayer_upgradedb_fn_t(Slapi_PBlock *pb);
+typedef int dblayer_restore_fn_t(struct ldbminfo *li, char *src_dir, Slapi_Task *task);
+typedef int dblayer_txn_begin_fn_t(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock);
+typedef int dblayer_txn_commit_fn_t(struct ldbminfo *li, back_txn *txn, PRBool use_lock);
+typedef int dblayer_txn_abort_fn_t(struct ldbminfo *li, back_txn *txn, PRBool use_lock);
+typedef int dblayer_get_info_fn_t(Slapi_Backend *be, int cmd, void **info);
+typedef int dblayer_set_info_fn_t(Slapi_Backend *be, int cmd, void **info);
+typedef int dblayer_back_ctrl_fn_t(Slapi_Backend *be, int cmd, void *info);
+typedef int dblayer_delete_db_fn_t(struct ldbminfo *li);
+typedef int dblayer_load_dse_fn_t(struct ldbminfo *li);
+typedef int dblayer_get_db_fn_t(backend *be, char *indexname, int open_flag, struct attrinfo *ai, DB **ppDB);
+typedef int dblayer_rm_db_file_fn_t(backend *be, struct attrinfo *a, PRBool use_lock, int no_force_chkpt);
+typedef int dblayer_import_fn_t(void *arg);
+typedef void dblayer_config_get_fn_t(struct ldbminfo *li, char *attrname, char *value);
+typedef int dblayer_config_set_fn_t(struct ldbminfo *li, char *attrname, int mod_apply, int mod_op, int phase, char *value);
+typedef int instance_config_set_fn_t(ldbm_instance *inst, char *attrname, int mod_apply, int mod_op, int phase, struct berval *value);
+typedef int instance_config_entry_callback_fn_t(struct ldbminfo *li, struct ldbm_instance *inst);
+typedef int instance_cleanup_fn_t(struct ldbm_instance *inst);
+typedef int instance_create_fn_t(struct ldbm_instance *inst);
+typedef int instance_search_callback_fn_t(Slapi_Entry *e, int *returncode, char *returntext, ldbm_instance *inst);
+typedef int dblayer_auto_tune_fn_t(struct ldbminfo *li);
 
-/* structure which holds our stuff */
 struct dblayer_private
 {
-    struct dblayer_private_env *dblayer_env;
-    char *dblayer_home_directory;
-    char *dblayer_log_directory;
-    char *dblayer_dbhome_directory;  /* default path for relative inst paths */
-    char **dblayer_data_directories; /* passed to set_data_dir
-                                      * including dblayer_dbhome_directory */
-    char **dblayer_db_config;
-    int dblayer_ncache;
-    int dblayer_previous_ncache;
-    int dblayer_tx_max;
-    uint64_t dblayer_cachesize;
-    uint64_t dblayer_previous_cachesize; /* Cache size when we last shut down--
-                                        * used to determine if we delete
-                                        * the mpool */
-    int dblayer_recovery_required;
-    int dblayer_enable_transactions;
-    int dblayer_txn_wait; /* Default is "off" (DB_TXN_NOWAIT) but for
-                                     * support purpose it could be helpful to set
-                                     * "on" so that backend hang on deadlock */
-    int dblayer_durable_transactions;
-    int dblayer_checkpoint_interval;
-    int dblayer_circular_logging;
-    uint32_t dblayer_page_size;       /* db page size if configured,
-                                     * otherwise default to DBLAYER_PAGESIZE */
-    uint32_t dblayer_index_page_size; /* db index page size if configured,
-                                     * otherwise default to
-                                     * DBLAYER_INDEX_PAGESIZE */
-    int dblayer_idl_divisor;          /* divide page size by this to get IDL
-                                     * size */
-    uint64_t dblayer_logfile_size;    /* How large can one logfile be ? */
-    uint64_t dblayer_logbuf_size;     /* how large log buffer can be */
+    /* common params for all backen implementations */
     int dblayer_file_mode;            /* pmode for files we create */
-    int dblayer_verbose;              /* Get libdb to exhale debugging info */
-    int dblayer_debug;                /* Will libdb emit debugging info into
-                                     * our log ? */
-    int dblayer_trickle_percentage;
-    int dblayer_cache_config; /* Special cache configurations
-                                     * e.g. force file-based mpool */
-    int dblayer_lib_version;
-    int dblayer_spin_count;         /* DB Mutex spin count, 0 == use default */
-    int dblayer_named_regions;      /* Should the regions be named sections,
-                                     * or backed by files ? */
-    int dblayer_private_mem;        /* private memory will be used for
-                                     * allocation of regions and mutexes */
-    int dblayer_private_import_mem; /* private memory will be used for
-                                     * allocation of regions and mutexes for
-                                     * import */
-    long dblayer_shm_key;           /* base segment ID for named regions */
-    int db_debug_checkpointing;     /* Enable debugging messages from
-                                     * checkpointing */
     int dblayer_bad_stuff_happened; /* Means that something happened (e.g. out
-                                     * of disk space) such that the guardian
-                                     * file must not be written on shutdown */
-    perfctrs_private *perf_private; /* Private data for performance counters
-                                     * code */
-    int dblayer_stop_threads;       /* Used to signal to threads that they
-                                     * should stop ASAP */
-    PRInt32 dblayer_thread_count;   /* Tells us how many threads are running,
-                                     * used to figure out when they're all
-                                     * stopped */
-    PRLock *thread_count_lock;      /* lock for thread_count_cv */
-    PRCondVar *thread_count_cv;     /* condition variable for housekeeping thread shutdown */
-    int dblayer_lockdown;           /* use DB_LOCKDOWN */
-#define BDB_LOCK_NB_MIN 10000
-    int dblayer_lock_config;
-    int dblayer_previous_lock_config;  /* Max lock count when we last shut down--
-                                      * used to determine if we delete the mpool */
-    u_int32_t dblayer_deadlock_policy; /* i.e. the atype to DB_ENV->lock_detect in deadlock_threadmain */
-    int dblayer_compactdb_interval;    /* interval to execute compact id2entry dbs */
+                                     * of disk space)*/
+    int dblayer_idl_divisor;          /* divide page size by this to get IDL size */
+                                      /* this is legacy and should go away, but it is not BDB specific */
+
+    /* backend implementation specific data */
+    void *dblayer_env;              /* specific database environment */
+
+    /* functions to be provided by backend and assigned during backend init */
+    dblayer_start_fn_t *dblayer_start_fn;
+    dblayer_close_fn_t *dblayer_close_fn;
+    dblayer_instance_start_fn_t *dblayer_instance_start_fn;
+    dblayer_backup_fn_t *dblayer_backup_fn;
+    dblayer_verify_fn_t *dblayer_verify_fn;
+    dblayer_db_size_fn_t *dblayer_db_size_fn;
+    dblayer_ldif2db_fn_t *dblayer_ldif2db_fn;
+    dblayer_db2ldif_fn_t *dblayer_db2ldif_fn;
+    dblayer_db2index_fn_t *dblayer_db2index_fn;
+    dblayer_cleanup_fn_t *dblayer_cleanup_fn;
+    dblayer_upgradedn_fn_t *dblayer_upgradedn_fn;
+    dblayer_upgradedb_fn_t *dblayer_upgradedb_fn;
+    dblayer_restore_fn_t *dblayer_restore_fn;
+    dblayer_txn_begin_fn_t *dblayer_txn_begin_fn;
+    dblayer_txn_commit_fn_t *dblayer_txn_commit_fn;
+    dblayer_txn_abort_fn_t *dblayer_txn_abort_fn;
+    dblayer_get_info_fn_t *dblayer_get_info_fn;
+    dblayer_set_info_fn_t *dblayer_set_info_fn;
+    dblayer_back_ctrl_fn_t *dblayer_back_ctrl_fn;
+    dblayer_get_db_fn_t *dblayer_get_db_fn;
+    dblayer_delete_db_fn_t *dblayer_delete_db_fn;
+    dblayer_rm_db_file_fn_t *dblayer_rm_db_file_fn;
+    dblayer_import_fn_t *dblayer_import_fn;
+    dblayer_load_dse_fn_t *dblayer_load_dse_fn;
+    dblayer_config_get_fn_t *dblayer_config_get_fn;
+    dblayer_config_set_fn_t *dblayer_config_set_fn;
+    instance_config_set_fn_t *instance_config_set_fn;
+    instance_config_entry_callback_fn_t *instance_add_config_fn;
+    instance_config_entry_callback_fn_t *instance_postadd_config_fn;
+    instance_config_entry_callback_fn_t *instance_del_config_fn;
+    instance_config_entry_callback_fn_t *instance_postdel_config_fn;
+    instance_cleanup_fn_t *instance_cleanup_fn;
+    instance_create_fn_t *instance_create_fn;
+    instance_search_callback_fn_t *instance_search_callback_fn;
+    dblayer_auto_tune_fn_t *dblayer_auto_tune_fn;
 };
 
-void dblayer_log_print(const DB_ENV *dbenv, const char *prefix, const char *buffer);
+#define DBLAYER_PRIV_SET_DATA_DIR 0x1
 
-int dblayer_db_remove(dblayer_private_env *env, char const path[], char const dbName[]);
+void dblayer_init_pvt_txn(void);
+void dblayer_push_pvt_txn(back_txn *txn);
+back_txn *dblayer_get_pvt_txn(void);
+void dblayer_pop_pvt_txn(void);
+
+void dblayer_log_print(const DB_ENV *dbenv, const char *prefix, const char *buffer);
 
 int dblayer_delete_indices(ldbm_instance *inst);
 
-/* Helper functions in dbhelp.c */
-
-/* Make an environment to be used for isolated recovery (e.g. during a partial restore operation) */
-int dblayer_make_private_recovery_env(char *db_home_dir, dblayer_private *priv, DB_ENV **env);
-/* Make an environment to be used for simple non-transacted database operations, e.g. fixup during upgrade */
-int dblayer_make_private_simple_env(char *db_home_dir, DB_ENV **env);
-/* Copy a database file, preserving all its contents (used to reset the LSNs in the file in order to move
- * it from one transacted environment to another.
- */
-int dblayer_copy_file_resetlsns(char *home_dir, char *source_file_name, char *destination_file_name, int overwrite, dblayer_private *priv, ldbm_instance *inst);
-/* Turn on the various logging and debug options for DB */
-void dblayer_set_env_debugging(DB_ENV *pEnv, dblayer_private *priv);
 
 /* Return the last four characters of a string; used for comparing extensions. */
 char *last_four_chars(const char *s);

+ 4 - 5
ldap/servers/slapd/back-ldbm/dbsize.c

@@ -17,18 +17,17 @@
  */
 
 #include "back-ldbm.h"
+#include "dblayer.h"
 
 /* TODO: make this a 64-bit return value */
 int
 ldbm_db_size(Slapi_PBlock *pb)
 {
     struct ldbminfo *li;
-    unsigned int size; /* TODO: make this a 64-bit return value */
-    int rc;
+    dblayer_private *priv;
 
     slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
-    rc = dblayer_database_size(li, &size);     /* TODO: make this a 64-bit return value */
-    slapi_pblock_set(pb, SLAPI_DBSIZE, &size); /* TODO: make this a 64-bit return value */
+    priv = (dblayer_private *)li->li_dblayer_private;
+    return priv->dblayer_db_size_fn(pb);
 
-    return rc;
 }

+ 2 - 209
ldap/servers/slapd/back-ldbm/dbverify.c

@@ -15,219 +15,12 @@
 #include "back-ldbm.h"
 #include "dblayer.h"
 
-static int
-dbverify_ext(ldbm_instance *inst, int verbose)
-{
-    char dbdir[MAXPATHLEN];
-    char *filep = NULL;
-    PRDir *dirhandle = NULL;
-    PRDirEntry *direntry = NULL;
-    DB *dbp = NULL;
-    size_t tmplen = 0;
-    size_t filelen = 0;
-    int rval = 1;
-    int rval_main = 0;
-    struct ldbminfo *li = inst->inst_li;
-    dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
-    struct dblayer_private_env *pEnv = priv->dblayer_env;
-
-    dbdir[sizeof(dbdir) - 1] = '\0';
-    PR_snprintf(dbdir, sizeof(dbdir), "%s/%s", inst->inst_parent_dir_name,
-                inst->inst_dir_name);
-    if ('\0' != dbdir[sizeof(dbdir) - 1]) /* overflown */
-    {
-        slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
-                      "db path too long: %s/%s\n",
-                      inst->inst_parent_dir_name, inst->inst_dir_name);
-        return 1;
-    }
-    tmplen = strlen(dbdir);
-    filep = dbdir + tmplen;
-    filelen = sizeof(dbdir) - tmplen;
-
-    /* run dbverify on each each db file */
-    dirhandle = PR_OpenDir(dbdir);
-    if (!dirhandle) {
-        slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
-                      "PR_OpenDir (%s) failed (%d): %s\n",
-                      dbdir, PR_GetError(), slapd_pr_strerror(PR_GetError()));
-        return 1;
-    }
-    while (NULL !=
-           (direntry = PR_ReadDir(dirhandle, PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
-        /* struct attrinfo *ai = NULL; */
-        dbp = NULL;
-
-        if (!direntry->name) {
-            break;
-        }
-        if (!strstr(direntry->name, LDBM_FILENAME_SUFFIX)) /* non db file */
-        {
-            continue;
-        }
-        if (sizeof(direntry->name) + 2 > filelen) {
-            slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
-                          "db path too long: %s/%s\n",
-                          dbdir, direntry->name);
-            continue;
-        }
-        PR_snprintf(filep, filelen, "/%s", direntry->name);
-        rval = db_create(&dbp, pEnv->dblayer_DB_ENV, 0);
-        if (0 != rval) {
-            slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
-                          "Unable to create id2entry db file %d\n", rval);
-            return rval;
-        }
-
-#define VLVPREFIX "vlv#"
-        if (0 != strncmp(direntry->name, ID2ENTRY, strlen(ID2ENTRY))) {
-            struct attrinfo *ai = NULL;
-            char *p = NULL;
-            p = strstr(filep, LDBM_FILENAME_SUFFIX); /* since already checked,
-                                                        it must have it */
-            if (p)
-                *p = '\0';
-            ainfo_get(inst->inst_be, filep + 1, &ai);
-            if (p)
-                *p = '.';
-            if (ai->ai_key_cmp_fn) {
-                dbp->app_private = (void *)ai->ai_key_cmp_fn;
-                dbp->set_bt_compare(dbp, dblayer_bt_compare);
-            }
-            if (idl_get_idl_new()) {
-                rval = dbp->set_pagesize(dbp,
-                                         (priv->dblayer_index_page_size == 0) ? DBLAYER_INDEX_PAGESIZE : priv->dblayer_index_page_size);
-            } else {
-                rval = dbp->set_pagesize(dbp,
-                                         (priv->dblayer_page_size == 0) ? DBLAYER_PAGESIZE : priv->dblayer_page_size);
-            }
-            if (0 != rval) {
-                slapi_log_err(SLAPI_LOG_ERR, "DB verify",
-                              "Unable to set pagesize flags to db (%d)\n", rval);
-                return rval;
-            }
-            if (0 == strncmp(direntry->name, VLVPREFIX, strlen(VLVPREFIX))) {
-                rval = dbp->set_flags(dbp, DB_RECNUM);
-                if (0 != rval) {
-                    slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
-                                  "Unable to set RECNUM flag to vlv index (%d)\n", rval);
-                    return rval;
-                }
-            } else if (idl_get_idl_new()) {
-                rval = dbp->set_flags(dbp, DB_DUP | DB_DUPSORT);
-                if (0 != rval) {
-                    slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
-                                  "Unable to set DUP flags to db (%d)\n", rval);
-                    return rval;
-                }
-
-                if (ai->ai_dup_cmp_fn) {
-                    /* If set, use the special dup compare callback */
-                    rval = dbp->set_dup_compare(dbp, ai->ai_dup_cmp_fn);
-                } else {
-                    rval = dbp->set_dup_compare(dbp, idl_new_compare_dups);
-                }
-
-                if (0 != rval) {
-                    slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
-                                  "Unable to set dup_compare to db (%d)\n", rval);
-                    return rval;
-                }
-            }
-        }
-#undef VLVPREFIX
-        rval = dbp->verify(dbp, dbdir, NULL, NULL, 0);
-        if (0 == rval) {
-            if (verbose) {
-                slapi_log_err(SLAPI_LOG_INFO, "dbverify_ext",
-                              "%s: ok\n", dbdir);
-            }
-        } else {
-            slapi_log_err(SLAPI_LOG_ERR, "dbverify_ext",
-                          "verify failed(%d): %s\n", rval, dbdir);
-        }
-        rval_main |= rval;
-        *filep = '\0';
-    }
-    PR_CloseDir(dirhandle);
-
-    return rval_main;
-}
-
 int
 ldbm_back_dbverify(Slapi_PBlock *pb)
 {
     struct ldbminfo *li = NULL;
-    Object *inst_obj = NULL;
-    ldbm_instance *inst = NULL;
-    int verbose = 0;
-    int rval = 1;
-    int rval_main = 0;
-    char **instance_names = NULL;
-    char *dbdir = NULL;
-
-    slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_dbverify", "Verifying db files...\n");
-    slapi_pblock_get(pb, SLAPI_BACKEND_INSTANCE_NAME, &instance_names);
-    slapi_pblock_get(pb, SLAPI_SEQ_TYPE, &verbose);
     slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
-    slapi_pblock_get(pb, SLAPI_DBVERIFY_DBDIR, &dbdir);
-    ldbm_config_load_dse_info(li);
-    ldbm_config_internal_set(li, CONFIG_DB_TRANSACTION_LOGGING, "off");
-
-    /* no write needed; choose EXPORT MODE */
-    if (0 != dblayer_start(li, DBLAYER_EXPORT_MODE)) {
-        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_dbverify",
-                      "dbverify: Failed to init database\n");
-        return rval;
-    }
-
-    /* server is up */
-    slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_dbverify", "server is up\n");
-    if (instance_names) /* instance is specified */
-    {
-        char **inp = NULL;
-        for (inp = instance_names; inp && *inp; inp++) {
-            inst = ldbm_instance_find_by_name(li, *inp);
-            if (inst) {
-                if (dbdir) {
-                    /* verifying backup */
-                    slapi_ch_free_string(&inst->inst_parent_dir_name);
-                    inst->inst_parent_dir_name = slapi_ch_strdup(dbdir);
-                }
-                rval_main |= dbverify_ext(inst, verbose);
-            } else {
-                rval_main |= 1; /* no such instance */
-            }
-        }
-    } else /* all instances */
-    {
-        for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
-             inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
-            inst = (ldbm_instance *)object_get_data(inst_obj);
-            /* check if an import/restore is already ongoing... */
-            if (instance_set_busy(inst) != 0) {
-                /* standalone, only.  never happens */
-                slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_dbverify",
-                              "Backend '%s' is already in the middle of "
-                              "another task and cannot be disturbed.\n",
-                              inst->inst_name);
-                continue; /* skip this instance and go to the next*/
-            }
-            if (dbdir) {
-                /* verifying backup */
-                slapi_ch_free_string(&inst->inst_parent_dir_name);
-                inst->inst_parent_dir_name = slapi_ch_strdup(dbdir);
-            }
-            rval_main |= dbverify_ext(inst, verbose);
-        }
-    }
-
-    /* close the database down again */
-    rval = dblayer_post_close(li, DBLAYER_EXPORT_MODE);
-    if (0 != rval) {
-        slapi_log_err(SLAPI_LOG_ERR,
-                      "ldbm_back_dbverify", "Failed to close database\n");
-    }
+    dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
 
-    return rval_main;
+    return priv->dblayer_verify_fn(pb);;
 }

+ 30 - 4
ldap/servers/slapd/back-ldbm/idl.c

@@ -14,6 +14,7 @@
 /* idl.c - ldap id list handling routines */
 
 #include "back-ldbm.h"
+#include "dblayer.h"
 
 /*
  * Disable idl locking since it causes unbreakable deadlock.
@@ -38,6 +39,30 @@ static int idl_tune = DEFAULT_IDL_TUNE; /* tuning parameters for IDL code */
 #define IDL_TUNE_BSEARCH 1              /* do a binary search when inserting into an IDL */
 #define IDL_TUNE_NOPAD 2                /* Don't pad IDLs with space at the end */
 
+/* if still needed, need to find a solution 
+ * just moved here to clenaup dblayer
+ */
+
+static int32_t
+idl_old_get_optimal_block_size(backend *be)
+{
+    dblayer_private *priv = NULL;
+    uint32_t *page_size = NULL;
+    PR_ASSERT(NULL != be);
+
+    struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
+    priv = (dblayer_private *)li->li_dblayer_private;
+    PR_ASSERT(NULL != priv);
+
+    priv->dblayer_get_info_fn(be, BACK_INFO_DB_PAGESIZE, (void **)&page_size);
+    if (priv->dblayer_idl_divisor == 0) {
+        return *page_size - DB_EXTN_PAGE_HEADER_SIZE;
+    } else {
+        return *page_size / priv->dblayer_idl_divisor;
+    }
+}
+
+
 void
 idl_old_set_tune(int val)
 {
@@ -64,9 +89,10 @@ idl_old_get_allidslimit(struct attrinfo *a)
 }
 
 static void
-idl_init_maxids(struct ldbminfo *li, idl_private *priv)
+idl_init_maxids(backend *be, idl_private *priv)
 {
-    const size_t blksize = dblayer_get_optimal_block_size(li);
+    struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
+    const size_t blksize = idl_old_get_optimal_block_size(be);
 
     if (0 == li->li_allidsthreshold) {
         li->li_allidsthreshold = DEFAULT_ALLIDSTHRESHOLD;
@@ -577,7 +603,7 @@ idl_old_insert_key(
     }
 
     if (0 == a->ai_idl->idl_maxids) {
-        idl_init_maxids(li, a->ai_idl);
+        idl_init_maxids(be, a->ai_idl);
     }
 
     idl_Wlock_list(a->ai_idl, key);
@@ -1038,7 +1064,7 @@ idl_old_store_block(
     IDList *master_block = NULL;
 
     if (0 == a->ai_idl->idl_maxids) {
-        idl_init_maxids(li, a->ai_idl);
+        idl_init_maxids(be, a->ai_idl);
     }
 
     /* First, is it an ALLIDS block ? */

+ 0 - 705
ldap/servers/slapd/back-ldbm/import-merge.c

@@ -1,705 +0,0 @@
-/** BEGIN COPYRIGHT BLOCK
- * Copyright (C) 2001 Sun Microsystems, Inc. Used by permission.
- * Copyright (C) 2005 Red Hat, Inc.
- * All rights reserved.
- *
- * License: GPL (version 3 or any later version).
- * See LICENSE for details.
- * END COPYRIGHT BLOCK **/
-
-#ifdef HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-/*
- * this is a bunch of routines for merging groups of db files together --
- * currently it's only used for imports (when we import into several small
- * db sets for speed, then merge them).
- */
-
-#include "back-ldbm.h"
-#include "import.h"
-
-struct _import_merge_thang
-{
-    int type;
-#define IMPORT_MERGE_THANG_IDL 1 /* Values for type */
-#define IMPORT_MERGE_THANG_VLV 2
-    union
-    {
-        IDList *idl;  /* if type == IMPORT_MERGE_THANG_IDL */
-        DBT vlv_data; /* if type == IMPORT_MERGE_THANG_VLV */
-    } payload;
-};
-typedef struct _import_merge_thang import_merge_thang;
-
-struct _import_merge_queue_entry
-{
-    int *file_referenced_list;
-    import_merge_thang thang;
-    DBT key;
-    struct _import_merge_queue_entry *next;
-};
-typedef struct _import_merge_queue_entry import_merge_queue_entry;
-
-static int
-import_merge_get_next_thang(backend *be, DBC *cursor, DB *db, import_merge_thang *thang, DBT *key, int type)
-{
-    int ret = 0;
-    DBT value = {0};
-
-    value.flags = DB_DBT_MALLOC;
-    key->flags = DB_DBT_MALLOC;
-
-    thang->type = type;
-    if (IMPORT_MERGE_THANG_IDL == type) {
-    /* IDL case */
-    around:
-        ret = cursor->c_get(cursor, key, &value, DB_NEXT_NODUP);
-        if (0 == ret) {
-            /* Check that we've not reached the beginning of continuation
-             * blocks */
-            if (CONT_PREFIX != ((char *)key->data)[0]) {
-                /* If not, read the IDL using idl_fetch() */
-                key->flags = DB_DBT_REALLOC;
-                ret = NEW_IDL_NO_ALLID;
-                thang->payload.idl = idl_fetch(be, db, key, NULL, NULL, &ret);
-                PR_ASSERT(NULL != thang->payload.idl);
-            } else {
-                slapi_ch_free(&(value.data));
-                slapi_ch_free(&(key->data));
-                key->flags = DB_DBT_MALLOC;
-                goto around; /* Just skip these */
-            }
-            slapi_ch_free(&(value.data));
-        } else {
-            if (DB_NOTFOUND == ret) {
-                /* This means that we're at the end of the file */
-                ret = EOF;
-            }
-        }
-    } else {
-        /* VLV case */
-        ret = cursor->c_get(cursor, key, &value, DB_NEXT);
-        if (0 == ret) {
-            thang->payload.vlv_data = value;
-            thang->payload.vlv_data.flags = 0;
-            key->flags = 0;
-        } else {
-            if (DB_NOTFOUND == ret) {
-                /* This means that we're at the end of the file */
-                ret = EOF;
-            }
-        }
-    }
-
-    return ret;
-}
-
-static import_merge_queue_entry *
-import_merge_make_new_queue_entry(import_merge_thang *thang, DBT *key, int fileno, int passes)
-{
-    /* Make a new entry */
-    import_merge_queue_entry *new_entry = (import_merge_queue_entry *)slapi_ch_calloc(1, sizeof(import_merge_queue_entry));
-
-    new_entry->key = *key;
-    new_entry->thang = *thang;
-    new_entry->file_referenced_list =
-        (int *)slapi_ch_calloc(passes, sizeof(fileno));
-
-    (new_entry->file_referenced_list)[fileno] = 1;
-    return new_entry;
-}
-
-/* Put an IDL onto the priority queue */
-static int
-import_merge_insert_input_queue(backend *be, import_merge_queue_entry **queue, int fileno, DBT *key, import_merge_thang *thang, int passes)
-{
-    /* Walk the list, looking for a key value which is greater than or equal
-     * to the presented key */
-    /* If an equal key is found, compute the union of the IDLs and store that
-     * back in the queue entry */
-    /* If a key greater than is found, or no key greater than is found, insert
-     * a new queue entry */
-    import_merge_queue_entry *current_entry = NULL;
-    import_merge_queue_entry *previous_entry = NULL;
-
-    PR_ASSERT(NULL != thang);
-    if (NULL == *queue) {
-        /* Queue was empty--- put ourselves at the head */
-        *queue = import_merge_make_new_queue_entry(thang, key, fileno, passes);
-        if (NULL == *queue) {
-            return -1;
-        }
-    } else {
-        for (current_entry = *queue; current_entry != NULL;
-             current_entry = current_entry->next) {
-            int cmp = strcmp(key->data, current_entry->key.data);
-
-            if (0 == cmp) {
-                if (IMPORT_MERGE_THANG_IDL == thang->type) { /* IDL case */
-                    IDList *idl = thang->payload.idl;
-                    /* Equal --- merge into the stored IDL, add file ID
-                     * to the list */
-                    IDList *new_idl =
-                        idl_union(be, current_entry->thang.payload.idl, idl);
-
-                    idl_free(&(current_entry->thang.payload.idl));
-                    idl_free(&idl);
-                    current_entry->thang.payload.idl = new_idl;
-                    /* Add this file id into the entry's referenced list */
-                    (current_entry->file_referenced_list)[fileno] = 1;
-                    /* Because we merged the entries, we no longer need the
-                     * key, so free it */
-                    slapi_ch_free(&(key->data));
-                    goto done;
-                } else {
-                    /* VLV case, we can see exact keys, this is not a bug ! */
-                    /* We want to ensure that they key read most recently is
-                     * put later in the queue than any others though */
-                }
-            } else {
-                if (cmp < 0) {
-                    /* We compare smaller than the stored key, so we should
-                     * insert ourselves before this entry */
-                    break;
-                } else {
-                    /* We compare greater than this entry, so we should keep
-                     * going */;
-                }
-            }
-            previous_entry = current_entry;
-        }
-
-        /* Now insert */
-        {
-            import_merge_queue_entry *new_entry =
-                import_merge_make_new_queue_entry(thang, key, fileno, passes);
-
-            if (NULL == new_entry) {
-                return -1;
-            }
-
-            /* If not, then we must need to insert ourselves after the last
-             * entry */
-            new_entry->next = current_entry;
-            if (NULL == previous_entry) {
-                *queue = new_entry;
-            } else {
-                previous_entry->next = new_entry;
-            }
-        }
-    }
-
-done:
-    return 0;
-}
-
-static int
-import_merge_remove_input_queue(backend *be, import_merge_queue_entry **queue, import_merge_thang *thang, DBT *key, DBC **input_cursors, DB **input_files, int passes)
-{
-    import_merge_queue_entry *head = NULL;
-    int file_referenced = 0;
-    int i = 0;
-    int ret = 0;
-
-    PR_ASSERT(NULL != queue);
-    head = *queue;
-    if (head == NULL) {
-        /* Means we've exhausted the queue---we're done */
-        return EOF;
-    }
-    /* Remove the head of the queue */
-    *queue = head->next;
-    /* Get the IDL */
-    *thang = head->thang;
-    *key = head->key;
-    PR_ASSERT(NULL != thang);
-    /* Walk the list of referenced files, reading in the next IDL from each
-     * one to the queue */
-    for (i = 0; i < passes; i++) {
-        import_merge_thang new_thang = {0};
-        DBT new_key = {0};
-
-        file_referenced = (head->file_referenced_list)[i];
-        if (file_referenced) {
-            ret = import_merge_get_next_thang(be, input_cursors[i],
-                                              input_files[i], &new_thang, &new_key, thang->type);
-            if (0 != ret) {
-                if (EOF == ret) {
-                    /* Means that we walked off the end of the list,
-                     * do nothing */
-                    ret = 0;
-                } else {
-                    /* Some other error */
-                    break;
-                }
-            } else {
-                /* This function is responsible for any freeing needed */
-                import_merge_insert_input_queue(be, queue, i, &new_key,
-                                                &new_thang, passes);
-            }
-        }
-    }
-    slapi_ch_free((void **)&(head->file_referenced_list));
-    slapi_ch_free((void **)&head);
-
-    return ret;
-}
-
-static int
-import_merge_open_input_cursors(DB **files, int passes, DBC ***cursors)
-{
-    int i = 0;
-    int ret = 0;
-    *cursors = (DBC **)slapi_ch_calloc(passes, sizeof(DBC *));
-    if (NULL == *cursors) {
-        return -1;
-    }
-
-    for (i = 0; i < passes; i++) {
-        DB *pDB = files[i];
-        DBC *pDBC = NULL;
-        if (NULL != pDB) {
-            /* Try to open a cursor onto the file */
-            ret = pDB->cursor(pDB, NULL, &pDBC, 0);
-            if (0 != ret) {
-                break;
-            } else {
-                (*cursors)[i] = pDBC;
-            }
-        }
-    }
-
-    return ret;
-}
-
-static int
-import_count_merge_input_files(ldbm_instance *inst,
-                               char *indexname,
-                               int passes,
-                               int *number_found,
-                               int *pass_number)
-{
-    int i = 0;
-    int found_one = 0;
-
-    *number_found = 0;
-    *pass_number = 0;
-
-    for (i = 0; i < passes; i++) {
-        int fd;
-        char *filename = slapi_ch_smprintf("%s/%s.%d%s", inst->inst_dir_name, indexname, i + 1,
-                                           LDBM_FILENAME_SUFFIX);
-
-        if (NULL == filename) {
-            return -1;
-        }
-
-        fd = dblayer_open_huge_file(filename, O_RDONLY, 0);
-        slapi_ch_free((void **)&filename);
-        if (fd >= 0) {
-            close(fd);
-            if (found_one == 0) {
-                *pass_number = i + 1;
-            }
-            found_one = 1;
-            (*number_found)++;
-        } else {
-            ; /* Not finding a file is OK */
-        }
-    }
-
-    return 0;
-}
-
-static int
-import_open_merge_input_files(backend *be, IndexInfo *index_info, int passes, DB ***input_files, int *number_found, int *pass_number)
-{
-    int i = 0;
-    int ret = 0;
-    int found_one = 0;
-
-    *number_found = 0;
-    *pass_number = 0;
-    *input_files = (DB **)slapi_ch_calloc(passes, sizeof(DB *));
-    if (NULL == *input_files) {
-        /* Memory allocation error */
-        return -1;
-    }
-    for (i = 0; i < passes; i++) {
-        DB *pDB = NULL;
-        char *filename = slapi_ch_smprintf("%s.%d", index_info->name, i + 1);
-
-        if (NULL == filename) {
-            return -1;
-        }
-
-        if (vlv_isvlv(filename)) {
-            /* not sure why the file would be marked as a vlv index but
-           not the index configuration . . . but better make sure
-           the new code works with the old semantics */
-            int saved_mask = index_info->ai->ai_indexmask;
-            index_info->ai->ai_indexmask |= INDEX_VLV;
-            ret = dblayer_open_file(be, filename, 0, index_info->ai, &pDB);
-            index_info->ai->ai_indexmask = saved_mask;
-        } else {
-            ret = dblayer_open_file(be, filename, 0, index_info->ai, &pDB);
-        }
-
-        slapi_ch_free((void **)&filename);
-        if (0 == ret) {
-            if (found_one == 0) {
-                *pass_number = i + 1;
-            }
-            found_one = 1;
-            (*number_found)++;
-            (*input_files)[i] = pDB;
-        } else {
-            if (ENOENT == ret) {
-                ret = 0; /* Not finding a file is OK */
-            } else {
-                break;
-            }
-        }
-    }
-
-    return ret;
-}
-
-/* Performs the n-way merge on one file */
-static int
-import_merge_one_file(ImportWorkerInfo *worker, int passes, int *key_count)
-{
-    ldbm_instance *inst = worker->job->inst;
-    backend *be = inst->inst_be;
-    DB *output_file = NULL;
-    int ret = 0;
-    int preclose_ret = 0;
-    int number_found = 0;
-    int pass_number = 0;
-    DB **input_files = NULL;
-    DBC **input_cursors = NULL;
-
-    PR_ASSERT(NULL != inst);
-
-    /* Try to open all the input files.
-       If we can't open file a file, we assume that is
-       because there was no data in it. */
-    ret = import_count_merge_input_files(inst, worker->index_info->name,
-                                         passes, &number_found, &pass_number);
-    if (0 != ret) {
-        goto error;
-    }
-    /* If there were no input files, then we're finished ! */
-    if (0 == number_found) {
-        ret = 0;
-        goto error;
-    }
-    /* Special-case where there's only one input file---just rename it */
-    if (1 == number_found) {
-        char *newname = NULL;
-        char *oldname = NULL;
-
-        ret = import_make_merge_filenames(inst->inst_dir_name,
-                                          worker->index_info->name, pass_number, &oldname, &newname);
-        if (0 != ret) {
-            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file",
-                              "Failed making filename in merge");
-            goto error;
-        }
-        ret = PR_Rename(newname, oldname);
-        if (0 != ret) {
-            PRErrorCode prerr = PR_GetError();
-            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file",
-                              "Failed to rename file \"%s\" to \"%s\" "
-                              "in merge, " SLAPI_COMPONENT_NAME_NSPR " error %d (%s)",
-                              oldname, newname, prerr, slapd_pr_strerror(prerr));
-            slapi_ch_free((void **)&newname);
-            slapi_ch_free((void **)&oldname);
-            goto error;
-        }
-        slapi_ch_free((void **)&newname);
-        slapi_ch_free((void **)&oldname);
-        *key_count = -1;
-    } else {
-        /* We really need to merge */
-        import_merge_queue_entry *merge_queue = NULL;
-        DBT key = {0};
-        import_merge_thang thang = {0};
-        int i = 0;
-        int not_finished = 1;
-        int vlv_index = (INDEX_VLV == worker->index_info->ai->ai_indexmask);
-
-#if 0
-    /* Close and re-open regions, bugs otherwise */
-    ret = dblayer_close(inst->inst_li, DBLAYER_IMPORT_MODE);
-    if (0 != ret) {
-        if (ENOSPC == ret) {
-        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "FAILED: NO DISK SPACE LEFT");
-        } else {
-        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 8 %d", ret);
-        }
-            goto error;
-    }
-    ret = dblayer_start(inst->inst_li, DBLAYER_IMPORT_MODE);
-    if (0 != ret) {
-        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 9");
-            goto error;
-    }
-    ret = dblayer_instance_start(be, DBLAYER_IMPORT_MODE);
-    if (0 != ret) {
-        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 9A");
-            goto error;
-    }
-#else
-        /* we have reason to believe that it's okay to leave the region files
-         * open in db3.x, since they track which files are opened and closed.
-         * if we had to close the region files, we'd have to take down the
-         * whole backend and defeat the purpose of an online import ---
-         * baaad medicine.
-         */
-        ret = dblayer_instance_close(be);
-        if (0 != ret) {
-            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 8i %d\n", ret);
-            goto error;
-        }
-        ret = dblayer_instance_start(be, DBLAYER_IMPORT_MODE);
-        if (0 != ret) {
-            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 8j %d\n", ret);
-            goto error;
-        }
-#endif
-
-        ret = import_open_merge_input_files(be, worker->index_info,
-                                            passes, &input_files, &number_found, &pass_number);
-        if (0 != ret) {
-            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 10");
-            goto error;
-        }
-
-        ret = dblayer_open_file(be, worker->index_info->name, 1,
-                                worker->index_info->ai, &output_file);
-        if (0 != ret) {
-            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "Failed to open output file for "
-                                                                                   "index %s in merge",
-                              worker->index_info->name);
-            goto error;
-        }
-
-        /* OK, so we now have input and output files open and can proceed to
-     * merge */
-        /* We want to pre-fill the input IDL queue */
-        /* Open cursors onto the input files */
-        ret = import_merge_open_input_cursors(input_files, passes,
-                                              &input_cursors);
-        if (0 != ret) {
-            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 2 %s %d",
-                              worker->index_info->name, ret);
-            goto error;
-        }
-
-        /* Now read from the first location in each file and insert into the
-     * queue */
-        for (i = 0; i < passes; i++)
-            if (input_files[i]) {
-                import_merge_thang prime_thang = {0};
-
-                /* Read an IDL from the file */
-                ret = import_merge_get_next_thang(be, input_cursors[i],
-                                                  input_files[i], &prime_thang, &key,
-                                                  vlv_index ? IMPORT_MERGE_THANG_VLV : IMPORT_MERGE_THANG_IDL);
-                if (0 != ret) {
-                    import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 1 %s %d",
-                                      worker->index_info->name, ret);
-                    goto error;
-                }
-                /* Put it on the queue */
-                ret = import_merge_insert_input_queue(be, &merge_queue, i, &key,
-                                                      &prime_thang, passes);
-                if (0 != ret) {
-                    import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 0 %s",
-                                      worker->index_info->name);
-                    goto error;
-                }
-            }
-
-        /* We now have a pre-filled queue, so we may now proceed to remove the
-       head entry and write it to the output file, and repeat this process
-       until we've finished reading all the input data */
-        while (not_finished && (0 == ret)) {
-            ret = import_merge_remove_input_queue(be, &merge_queue, &thang,
-                                                  &key, input_cursors, input_files, passes);
-            if (0 != ret) {
-                /* Have we finished cleanly ? */
-                if (EOF == ret) {
-                    not_finished = 0;
-                } else {
-                    import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 3 %s, %d",
-                                      worker->index_info->name, ret);
-                }
-            } else {
-                /* Write it out */
-                (*key_count)++;
-                if (vlv_index) {
-                    /* Write the vlv index */
-                    ret = output_file->put(output_file, NULL, &key,
-                                           &(thang.payload.vlv_data), 0);
-                    slapi_ch_free(&(thang.payload.vlv_data.data));
-                    thang.payload.vlv_data.data = NULL;
-                } else {
-                    /* Write the IDL index */
-                    ret = idl_store_block(be, output_file, &key,
-                                          thang.payload.idl, NULL, worker->index_info->ai);
-                    /* Free the key we got back from the queue */
-                    idl_free(&(thang.payload.idl));
-                    thang.payload.idl = NULL;
-                }
-                slapi_ch_free(&(key.data));
-                key.data = NULL;
-                if (0 != ret) {
-                    /* Failed to write--- most obvious cause being out of
-                   disk space, let's make sure that we at least print a
-                   sensible error message right here. The caller should
-                   really handle this properly, but we're always bad at
-                   this. */
-                    if (ret == DB_RUNRECOVERY || ret == ENOSPC) {
-                        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file",
-                                          "OUT OF SPACE ON DISK, failed writing index file %s",
-                                          worker->index_info->name);
-                    } else {
-                        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file",
-                                          "Failed to write index file %s, errno=%d (%s)\n",
-                                          worker->index_info->name, errno,
-                                          dblayer_strerror(errno));
-                    }
-                }
-            }
-        }
-        preclose_ret = ret;
-        /* Now close the files */
-        dblayer_close_file(&output_file);
-        /* Close the cursors */
-        /* Close and delete the files */
-        for (i = 0; i < passes; i++) {
-            DBC *cursor = input_cursors[i];
-            DB *db = input_files[i];
-            if (NULL != db) {
-                PR_ASSERT(NULL != cursor);
-                ret = cursor->c_close(cursor);
-                if (0 != ret) {
-                    import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 4");
-                }
-                ret = dblayer_close_file(&db);
-                if (0 != ret) {
-                    import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 5");
-                }
-                /* Now make the filename and delete the file */
-                {
-                    char *newname = NULL;
-                    char *oldname = NULL;
-                    ret = import_make_merge_filenames(inst->inst_dir_name,
-                                                      worker->index_info->name, i + 1, &oldname, &newname);
-                    if (0 != ret) {
-                        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 6");
-                    } else {
-                        ret = PR_Delete(newname);
-                        if (0 != ret) {
-                            import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file", "MERGE FAIL 7");
-                        }
-                        slapi_ch_free((void **)&newname);
-                        slapi_ch_free((void **)&oldname);
-                    }
-                }
-            }
-        }
-        if (preclose_ret != 0)
-            ret = preclose_ret;
-    }
-    if (EOF == ret) {
-        ret = 0;
-    }
-
-error:
-    slapi_ch_free((void **)&input_cursors);
-    slapi_ch_free((void **)&input_files);
-    if (ret) {
-        import_log_notice(worker->job, SLAPI_LOG_ERR, "import_merge_one_file",
-                          "%s: Import merge failed. "
-                          "If this is an online-import, shutdown the server "
-                          "and try the offline command line import (ldif2db)",
-                          inst->inst_name);
-    }
-    return ret;
-}
-
-/********** the real deal here: **********/
-
-/* Our mission here is as follows:
- * for each index job except entrydn and id2entry:
- *     open all the pass files
- *     open a new output file
- *     iterate cursors over all of the input files picking each distinct
- *         key and combining the input IDLs into a merged IDL. Put that
- *         IDL to the output file.
- */
-int
-import_mega_merge(ImportJob *job)
-{
-    ImportWorkerInfo *current_worker = NULL;
-    int ret = 0;
-    time_t beginning = 0;
-    time_t end = 0;
-    int passes = job->current_pass;
-
-    if (1 == job->number_indexers) {
-        import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge",
-                          "Beginning %d-way merge of one file...", passes);
-    } else {
-        import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge",
-                          "Beginning %d-way merge of up to %lu files...",
-                          passes, (long unsigned int)job->number_indexers);
-    }
-
-    beginning = slapi_current_utc_time();
-    /* Iterate over the files */
-    for (current_worker = job->worker_list;
-         (ret == 0) && (current_worker != NULL);
-         current_worker = current_worker->next) {
-        /* We need to ignore the primary index */
-        if ((current_worker->work_type != FOREMAN) &&
-            (current_worker->work_type != PRODUCER)) {
-            time_t file_beginning = 0;
-            time_t file_end = 0;
-            int key_count = 0;
-
-            file_beginning = slapi_current_utc_time();
-            ret = import_merge_one_file(current_worker, passes, &key_count);
-            file_end = slapi_current_utc_time();
-            if (key_count == 0) {
-                import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "No files to merge for \"%s\".",
-                                  current_worker->index_info->name);
-            } else {
-                if (-1 == key_count) {
-                    import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "Merged \"%s\": Simple merge - "
-                                                                                "file renamed.",
-                                      current_worker->index_info->name);
-                } else {
-                    import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "Merged \"%s\": %d keys merged "
-                                                                                "in %ld seconds.",
-                                      current_worker->index_info->name,
-                                      key_count, file_end - file_beginning);
-                }
-            }
-        }
-    }
-
-    end = slapi_current_utc_time();
-    if (0 == ret) {
-        int seconds_to_merge = end - beginning;
-        import_log_notice(job, SLAPI_LOG_INFO, "import_mega_merge", "Merging completed in %d seconds.",
-                          seconds_to_merge);
-    }
-
-    return ret;
-}

Файловите разлики са ограничени, защото са твърде много
+ 1 - 969
ldap/servers/slapd/back-ldbm/import.c


+ 1 - 6
ldap/servers/slapd/back-ldbm/import.h

@@ -203,14 +203,13 @@ struct _import_worker_info
 /* import.c */
 int import_fifo_validate_capacity_or_expand(ImportJob *job, size_t entrysize);
 FifoItem *import_fifo_fetch(ImportJob *job, ID id, int worker);
-void import_free_job(ImportJob *job);
 void import_log_notice(ImportJob *job, int log_level, char *subsystem, char *format, ...);
+void import_free_job(ImportJob *job);
 void import_abort_all(ImportJob *job, int wait_for_them);
 int import_entry_belongs_here(Slapi_Entry *e, backend *be);
 int import_make_merge_filenames(char *directory, char *indexname, int pass, char **oldname, char **newname);
 void import_main(void *arg);
 int import_main_offline(void *arg);
-int ldbm_back_ldif2ldbm_deluxe(Slapi_PBlock *pb);
 
 /* import-merge.c */
 int import_mega_merge(ImportJob *job);
@@ -218,8 +217,6 @@ int import_mega_merge(ImportJob *job);
 /* ldif2ldbm.c */
 void reset_progress(void);
 void report_progress(int count, int done);
-int add_op_attrs(Slapi_PBlock *pb, struct ldbminfo *li, struct backentry *ep, int *status);
-int update_subordinatecounts(backend *be, ImportJob *job, DB_TXN *txn);
 
 /* import-threads.c */
 void import_producer(void *param);
@@ -228,5 +225,3 @@ void upgradedn_producer(void *param);
 void import_foreman(void *param);
 void import_worker(void *param);
 
-/* ancestorid.c */
-int ldbm_ancestorid_create_index(backend *be, ImportJob *job);

+ 0 - 21
ldap/servers/slapd/back-ldbm/init.c

@@ -70,16 +70,6 @@ ldbm_back_init(Slapi_PBlock *pb)
     /* Initialize the set of instances. */
     li->li_instance_set = objset_new(&ldbm_back_instance_set_destructor);
 
-    /* initialize dblayer  */
-    if (dblayer_init(li)) {
-        slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_init", "dblayer_init failed\n");
-        goto fail;
-    }
-
-    /* Fill in the fields of the ldbminfo and the dblayer_private
-     * structures with some default values */
-    ldbm_config_setup_default(li);
-
     /* ask the factory to give us space in the Connection object
          * (only bulk import uses this)
          */
@@ -97,11 +87,6 @@ ldbm_back_init(Slapi_PBlock *pb)
     /* set plugin private pointer and initialize locks, etc. */
     rc = slapi_pblock_set(pb, SLAPI_PLUGIN_PRIVATE, (void *)li);
 
-    if ((li->li_dbcache_mutex = PR_NewLock()) == NULL) {
-        slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_init", "PR_NewLock failed\n");
-        goto fail;
-    }
-
     if ((li->li_shutdown_mutex = PR_NewLock()) == NULL) {
         slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_init", "PR_NewLock failed\n");
         goto fail;
@@ -112,11 +97,6 @@ ldbm_back_init(Slapi_PBlock *pb)
         goto fail;
     }
 
-    if ((li->li_dbcache_cv = PR_NewCondVar(li->li_dbcache_mutex)) == NULL) {
-        slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_init", "PR_NewCondVar failed\n");
-        goto fail;
-    }
-
     /* set all of the necessary database plugin callback functions */
     rc |= slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION,
                            (void *)SLAPI_PLUGIN_VERSION_03);
@@ -208,7 +188,6 @@ ldbm_back_init(Slapi_PBlock *pb)
     return (0);
 
 fail:
-    dblayer_terminate(li);
     ldbm_config_destroy(li);
     slapi_pblock_set(pb, SLAPI_PLUGIN_PRIVATE, NULL);
     return (-1);

+ 7 - 3
ldap/servers/slapd/back-ldbm/instance.c

@@ -12,6 +12,7 @@
 #endif
 
 #include "back-ldbm.h"
+#include "dblayer.h"
 
 /* Forward declarations */
 static void ldbm_instance_destructor(void **arg);
@@ -25,6 +26,7 @@ int
 ldbm_instance_create(backend *be, char *name)
 {
     struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
+    dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
     ldbm_instance *inst = NULL;
     int rc = 0;
 
@@ -100,6 +102,9 @@ ldbm_instance_create(backend *be, char *name)
     /* Initialize the fields with some default values. */
     ldbm_instance_config_setup_default(inst);
 
+    /* Call the backend implementation specific instance creation function */
+    priv->instance_create_fn(inst);
+
     /* Add this new instance to the the set of instances */
     {
         Object *instance_obj;
@@ -388,9 +393,8 @@ ldbm_instance_destructor(void **arg)
     PR_DestroyLock(inst->inst_config_mutex);
     slapi_ch_free_string(&inst->inst_dir_name);
     slapi_ch_free_string(&inst->inst_parent_dir_name);
-    /* These are removed in dblayer_terminate */
-    /* PR_DestroyMonitor(inst->inst_db_mutex); */
-    /* PR_DestroyLock(inst->inst_handle_list_mutex); */
+    PR_DestroyMonitor(inst->inst_db_mutex);
+    PR_DestroyLock(inst->inst_handle_list_mutex);
     PR_DestroyLock(inst->inst_nextid_mutex);
     PR_DestroyCondVar(inst->inst_indexer_cv);
     attrinfo_deletetree(inst);

Файловите разлики са ограничени, защото са твърде много
+ 176 - 962
ldap/servers/slapd/back-ldbm/ldbm_config.c


+ 7 - 1
ldap/servers/slapd/back-ldbm/ldbm_config.h

@@ -18,7 +18,8 @@ struct config_info;
 typedef struct config_info config_info;
 
 typedef int config_set_fn_t(void *arg, void *value, char *errorbuf, int phase, int apply);
-typedef void *config_get_fn_t(void *arg); /* The value for these is passed around as a
+typedef void *config_get_fn_t(void *arg);
+                                           /* The value for these is passed around as a
                                            * void *, the actual value should be gotten
                                            * by casting the void * as shown below. */
 #define CONFIG_TYPE_ONOFF 1     /* val = (int) value */
@@ -53,6 +54,7 @@ struct config_info
     int config_flags;
 };
 
+#define CONFIG_BACKEND_IMPLEMENT "nsslapd-backend-implement"
 #define CONFIG_INSTANCE "nsslapd-instance"
 #define CONFIG_LOOKTHROUGHLIMIT "nsslapd-lookthroughlimit"
 #define CONFIG_RANGELOOKTHROUGHLIMIT "nsslapd-rangelookthroughlimit"
@@ -145,11 +147,15 @@ struct config_info
 /* Some fuctions in ldbm_config.c used by ldbm_instance_config.c */
 int ldbm_config_add_dse_entries(struct ldbminfo *li, char **entries, char *string1, char *string2, char *string3, int flags);
 int ldbm_config_add_dse_entry(struct ldbminfo *li, char *entry, int flags);
+config_info *config_info_get(config_info *config_array, char *attr_name);
+void config_info_print_val(void *val, int type, char *buf);
 void ldbm_config_get(void *arg, config_info *config, char *buf);
 int ldbm_config_set(void *arg, char *attr_name, config_info *config_array, struct berval *bval, char *err_buf, int phase, int apply_mod, int mod_op);
 int ldbm_config_ignored_attr(char *attr_name);
 
 /* Functions in ldbm_instance_config.c used in ldbm_config.c */
+void ldbm_instance_config_get(ldbm_instance *inst, config_info *config, char *buf);
+int ldbm_instance_config_set(ldbm_instance *inst, char *attr_name, config_info *config_array, struct berval *bval, char *err_buf, int phase, int apply_mod, int mod_op);
 int ldbm_instance_config_load_dse_info(ldbm_instance *inst);
 int ldbm_instance_config_add_index_entry(ldbm_instance *inst, Slapi_Entry *e, int flags);
 int

+ 1 - 1
ldap/servers/slapd/back-ldbm/ldbm_index_config.c

@@ -186,7 +186,7 @@ ldbm_instance_index_config_delete_callback(Slapi_PBlock *pb,
         *returncode = LDAP_UNAVAILABLE;
         rc = SLAPI_DSE_CALLBACK_ERROR;
     } else {
-        if (dblayer_erase_index_file(inst->inst_be, ainfo, 0 /* do chkpt */)) {
+        if (dblayer_erase_index_file(inst->inst_be, ainfo, PR_TRUE, 0 /* do chkpt */)) {
             *returncode = LDAP_UNWILLING_TO_PERFORM;
             rc = SLAPI_DSE_CALLBACK_ERROR;
         }

+ 158 - 275
ldap/servers/slapd/back-ldbm/ldbm_instance_config.c

@@ -239,25 +239,6 @@ ldbm_instance_config_readonly_get(void *arg)
     return (void *)((uintptr_t)inst->inst_be->be_readonly);
 }
 
-static void *
-ldbm_instance_config_instance_dir_get(void *arg)
-{
-    ldbm_instance *inst = (ldbm_instance *)arg;
-
-    if (inst->inst_dir_name == NULL)
-        return slapi_ch_strdup("");
-    else if (inst->inst_parent_dir_name) {
-        int len = strlen(inst->inst_parent_dir_name) +
-                  strlen(inst->inst_dir_name) + 2;
-        char *full_inst_dir = (char *)slapi_ch_malloc(len);
-        PR_snprintf(full_inst_dir, len, "%s%c%s",
-                    inst->inst_parent_dir_name, get_sep(inst->inst_parent_dir_name),
-                    inst->inst_dir_name);
-        return full_inst_dir;
-    } else
-        return slapi_ch_strdup(inst->inst_dir_name);
-}
-
 static void *
 ldbm_instance_config_require_index_get(void *arg)
 {
@@ -266,49 +247,6 @@ ldbm_instance_config_require_index_get(void *arg)
     return (void *)((uintptr_t)inst->require_index);
 }
 
-static int
-ldbm_instance_config_instance_dir_set(void *arg,
-                                      void *value,
-                                      char *errorbuf __attribute__((unused)),
-                                      int phase __attribute__((unused)),
-                                      int apply)
-{
-    ldbm_instance *inst = (ldbm_instance *)arg;
-
-    if (!apply) {
-        return LDAP_SUCCESS;
-    }
-
-    if ((value == NULL) || (strlen(value) == 0)) {
-        inst->inst_dir_name = NULL;
-        inst->inst_parent_dir_name = NULL;
-    } else {
-        char *dir = (char *)value;
-        if (is_fullpath(dir)) {
-            char sep = get_sep(dir);
-            char *p = strrchr(dir, sep);
-            if (NULL == p) /* should not happens, tho */
-            {
-                inst->inst_parent_dir_name = NULL;
-                inst->inst_dir_name = rel2abspath(dir); /* normalize dir;
-                                                           strdup'ed in
-                                                           rel2abspath */
-            } else {
-                *p = '\0';
-                inst->inst_parent_dir_name = rel2abspath(dir); /* normalize dir;
-                                                                  strdup'ed in
-                                                                  rel2abspath */
-                inst->inst_dir_name = slapi_ch_strdup(p + 1);
-                *p = sep;
-            }
-        } else {
-            inst->inst_parent_dir_name = NULL;
-            inst->inst_dir_name = slapi_ch_strdup(dir);
-        }
-    }
-    return LDAP_SUCCESS;
-}
-
 static int
 ldbm_instance_config_readonly_set(void *arg,
                                   void *value,
@@ -369,7 +307,6 @@ static config_info ldbm_instance_config[] = {
     {CONFIG_INSTANCE_CACHEMEMSIZE, CONFIG_TYPE_UINT64, DEFAULT_CACHE_SIZE_STR, &ldbm_instance_config_cachememsize_get, &ldbm_instance_config_cachememsize_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
     {CONFIG_INSTANCE_READONLY, CONFIG_TYPE_ONOFF, "off", &ldbm_instance_config_readonly_get, &ldbm_instance_config_readonly_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
     {CONFIG_INSTANCE_REQUIRE_INDEX, CONFIG_TYPE_ONOFF, "off", &ldbm_instance_config_require_index_get, &ldbm_instance_config_require_index_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
-    {CONFIG_INSTANCE_DIR, CONFIG_TYPE_STRING, NULL, &ldbm_instance_config_instance_dir_get, &ldbm_instance_config_instance_dir_set, CONFIG_FLAG_ALWAYS_SHOW},
     {CONFIG_INSTANCE_DNCACHEMEMSIZE, CONFIG_TYPE_UINT64, DEFAULT_DNCACHE_SIZE_STR, &ldbm_instance_config_dncachememsize_get, &ldbm_instance_config_dncachememsize_set, CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
     {NULL, 0, NULL, NULL, NULL, 0}};
 
@@ -383,6 +320,46 @@ ldbm_instance_config_setup_default(ldbm_instance *inst)
     }
 }
 
+
+/* Returns LDAP_SUCCESS on success */
+int
+ldbm_instance_config_set(ldbm_instance *inst, char *attr_name, config_info *config_array, struct berval *bval, char *err_buf, int phase, int apply_mod, int mod_op)
+{
+    config_info *config;
+    int rc = LDAP_SUCCESS;
+
+    config = config_info_get(config_array, attr_name);
+    if (NULL == config) {
+        struct ldbminfo *li = inst->inst_li;
+        dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
+        slapi_log_err(SLAPI_LOG_CONFIG, "ldbm_instance_config_set", "Unknown config attribute %s check db specific layer\n", attr_name);
+        slapi_create_errormsg(err_buf, SLAPI_DSE_RETURNTEXT_SIZE, "Unknown config attribute %s check db specific layer\n", attr_name);
+        rc = priv->instance_config_set_fn(inst, attr_name, apply_mod, mod_op, phase, bval);
+    } else {
+        rc = ldbm_config_set(inst, attr_name, config_array, bval, err_buf,phase, apply_mod, mod_op);
+    }
+
+    return rc;
+}
+
+void
+ldbm_instance_config_get(ldbm_instance *inst, config_info *config, char *buf)
+{
+    void *val = NULL;
+
+    if (config == NULL) {
+        buf[0] = '\0';
+        return;
+    }
+
+    val = config->config_get_fn((void *)inst);
+    config_info_print_val(val, config->config_type, buf);
+
+    if (config->config_type == CONFIG_TYPE_STRING) {
+        slapi_ch_free((void **)&val);
+    }
+}
+
 static int
 parse_ldbm_instance_entry(Slapi_Entry *e, char **instance_name)
 {
@@ -537,7 +514,7 @@ parse_ldbm_instance_config_entry(ldbm_instance *inst, Slapi_Entry *e, config_inf
         slapi_attr_first_value(attr, &sval);
         bval = (struct berval *)slapi_value_get_berval(sval);
 
-        if (ldbm_config_set((void *)inst, attr_name, config_array, bval,
+        if (ldbm_instance_config_set((void *)inst, attr_name, config_array, bval,
                             err_buf, CONFIG_PHASE_STARTUP, 1 /* apply */, LDAP_MOD_REPLACE) != LDAP_SUCCESS) {
             slapi_log_err(SLAPI_LOG_ERR, "parse_ldbm_instance_config_entry",
                           "Error with config attribute %s : %s\n",
@@ -764,6 +741,8 @@ ldbm_instance_search_config_entry_callback(Slapi_PBlock *pb __attribute__((unuse
     struct berval *vals[2];
     struct berval val;
     ldbm_instance *inst = (ldbm_instance *)arg;
+    struct ldbminfo *li = inst->inst_li;
+    dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
     config_info *config;
     int x;
     const Slapi_DN *suffix;
@@ -803,33 +782,15 @@ ldbm_instance_search_config_entry_callback(Slapi_PBlock *pb __attribute__((unuse
         slapi_entry_attr_replace(e, config->config_name, vals);
     }
 
+    /* NOTE (LK): need to extend with db specific attrs */
+    priv->instance_search_callback_fn(e, returncode, returntext, inst);
+
     PR_Unlock(inst->inst_config_mutex);
 
     *returncode = LDAP_SUCCESS;
     return SLAPI_DSE_CALLBACK_OK;
 }
 
-/* This function is used by the instance modify callback to add a new
- * suffix.  It return LDAP_SUCCESS on success.
- */
-int
-add_suffix(ldbm_instance *inst, struct berval **bvals, int apply_mod, char *returntext)
-{
-    Slapi_DN suffix;
-    int x;
-
-    returntext[0] = '\0';
-    for (x = 0; bvals[x]; x++) {
-        slapi_sdn_init_dn_byref(&suffix, bvals[x]->bv_val);
-        if (!slapi_be_issuffix(inst->inst_be, &suffix) && apply_mod) {
-            be_addsuffix(inst->inst_be, &suffix);
-        }
-        slapi_sdn_done(&suffix);
-    }
-
-    return LDAP_SUCCESS;
-}
-
 /*
  * Config. DSE callback for instance entry modifies.
  */
@@ -916,29 +877,6 @@ out:
     }
 }
 
-/* This function is used to set instance config attributes. It can be used as a
- * shortcut to doing an internal modify operation on the config DSE.
- */
-void
-ldbm_instance_config_internal_set(ldbm_instance *inst, char *attrname, char *value)
-{
-    char err_buf[SLAPI_DSE_RETURNTEXT_SIZE];
-    struct berval bval;
-
-    bval.bv_val = value;
-    bval.bv_len = strlen(value);
-
-    if (ldbm_config_set((void *)inst, attrname, ldbm_instance_config, &bval,
-                        err_buf, CONFIG_PHASE_INTERNAL, 1 /* apply */, LDAP_MOD_REPLACE) != LDAP_SUCCESS) {
-        slapi_log_err(SLAPI_LOG_CRIT,
-                      "ldbm_instance_config_internal_set",
-                      "Internal error setting instance config attr %s to %s: %s\n",
-                      attrname, value, err_buf);
-        exit(1);
-    }
-}
-
-
 static int
 ldbm_instance_generate(struct ldbminfo *li, char *instance_name, Slapi_Backend **ret_be)
 {
@@ -984,102 +922,6 @@ bail:
     return rc;
 }
 
-int
-ldbm_instance_postadd_instance_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
-                                              Slapi_Entry *entryBefore,
-                                              Slapi_Entry *entryAfter __attribute__((unused)),
-                                              int *returncode __attribute__((unused)),
-                                              char *returntext __attribute__((unused)),
-                                              void *arg)
-{
-    backend *be = NULL;
-    struct ldbm_instance *inst;
-    char *instance_name;
-    struct ldbminfo *li = (struct ldbminfo *)arg;
-    int rval = 0;
-
-    parse_ldbm_instance_entry(entryBefore, &instance_name);
-    rval = ldbm_instance_generate(li, instance_name, &be);
-    if (rval) {
-        slapi_log_err(SLAPI_LOG_ERR,
-                      "ldbm_instance_postadd_instance_entry_callback",
-                      "ldbm_instance_generate (%s) failed (%d)\n",
-                      instance_name, rval);
-    }
-
-    inst = ldbm_instance_find_by_name(li, instance_name);
-
-    /* Add default indexes */
-    ldbm_instance_create_default_user_indexes(inst);
-
-    /* Initialize and register callbacks for VLV indexes */
-    vlv_init(inst);
-
-    /* this is an ACTUAL ADD being done while the server is running!
-     * start up the appropriate backend...
-     */
-    rval = ldbm_instance_start(be);
-    if (0 != rval) {
-        slapi_log_err(SLAPI_LOG_ERR,
-                      "ldbm_instance_postadd_instance_entry_callback",
-                      "ldbm_instnace_start (%s) failed (%d)\n",
-                      instance_name, rval);
-    }
-
-    slapi_ch_free((void **)&instance_name);
-
-    /* instance must be fully ready before we call this */
-    slapi_mtn_be_started(be);
-
-    return SLAPI_DSE_CALLBACK_OK;
-}
-
-int
-ldbm_instance_add_instance_entry_callback(Slapi_PBlock *pb,
-                                          Slapi_Entry *entryBefore,
-                                          Slapi_Entry *entryAfter __attribute__((unused)),
-                                          int *returncode,
-                                          char *returntext,
-                                          void *arg)
-{
-    char *instance_name;
-    struct ldbm_instance *inst = NULL;
-    struct ldbminfo *li = (struct ldbminfo *)arg;
-    int rc = 0;
-
-    parse_ldbm_instance_entry(entryBefore, &instance_name);
-
-    /* Make sure we don't create two instances with the same name. */
-    inst = ldbm_instance_find_by_name(li, instance_name);
-    if (inst != NULL) {
-        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_instance_add_instance_entry_callback",
-                      "ldbm instance %s already exists\n", instance_name);
-        if (returntext != NULL)
-            PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "An ldbm instance with the name %s already exists\n",
-                        instance_name);
-        if (returncode != NULL)
-            *returncode = LDAP_UNWILLING_TO_PERFORM;
-        slapi_ch_free((void **)&instance_name);
-        return SLAPI_DSE_CALLBACK_ERROR;
-    }
-
-    if (pb == NULL) {
-        /* called during startup -- do the rest now */
-        rc = ldbm_instance_generate(li, instance_name, NULL);
-        if (!rc) {
-            inst = ldbm_instance_find_by_name(li, instance_name);
-            rc = ldbm_instance_create_default_user_indexes(inst);
-        }
-    }
-    /* if called during a normal ADD operation, the postadd callback
-     * will do the rest.
-     */
-
-    slapi_ch_free((void **)&instance_name);
-    return (rc == 0) ? SLAPI_DSE_CALLBACK_OK : SLAPI_DSE_CALLBACK_ERROR;
-}
-
-
 /* unregister the DSE callbacks on a backend -- this needs to be done when
  * deleting a backend, so that adding the same backend later won't cause
  * these expired callbacks to be called.
@@ -1181,6 +1023,107 @@ ldbm_instance_unregister_callbacks(ldbm_instance *inst)
 bail:
     slapi_ch_free_string(&dn);
 }
+int
+ldbm_instance_postadd_instance_entry_callback(Slapi_PBlock *pb __attribute__((unused)),
+                                              Slapi_Entry *entryBefore,
+                                              Slapi_Entry *entryAfter __attribute__((unused)),
+                                              int *returncode __attribute__((unused)),
+                                              char *returntext __attribute__((unused)),
+                                              void *arg)
+{
+    backend *be = NULL;
+    struct ldbm_instance *inst;
+    char *instance_name;
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    dblayer_private *priv = NULL;
+    int rval = 0;
+
+    parse_ldbm_instance_entry(entryBefore, &instance_name);
+    rval = ldbm_instance_generate(li, instance_name, &be);
+
+    inst = ldbm_instance_find_by_name(li, instance_name);
+
+    /* Add default indexes */
+    ldbm_instance_create_default_user_indexes(inst);
+
+    /* Initialize and register callbacks for VLV indexes */
+    vlv_init(inst);
+
+    /* this is an ACTUAL ADD being done while the server is running!
+     * start up the appropriate backend...
+     */
+    rval = ldbm_instance_start(be);
+    if (0 != rval) {
+        slapi_log_err(SLAPI_LOG_ERR,
+                      "ldbm_instance_postadd_instance_entry_callback",
+                      "ldbm_instnace_start (%s) failed (%d)\n",
+                      instance_name, rval);
+    }
+
+
+    /* call the backend implementation specific callbacks */
+    priv = (dblayer_private *)li->li_dblayer_private;
+    priv->instance_postadd_config_fn(li, inst);
+
+    slapi_ch_free((void **)&instance_name);
+
+    /* instance must be fully ready before we call this */
+    slapi_mtn_be_started(be);
+
+    return SLAPI_DSE_CALLBACK_OK;
+}
+
+int
+ldbm_instance_add_instance_entry_callback(Slapi_PBlock *pb,
+                                          Slapi_Entry *entryBefore,
+                                          Slapi_Entry *entryAfter __attribute__((unused)),
+                                          int *returncode,
+                                          char *returntext,
+                                          void *arg)
+{
+    char *instance_name;
+    struct ldbm_instance *inst = NULL;
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    dblayer_private *priv = NULL;
+    int rc = 0;
+
+    parse_ldbm_instance_entry(entryBefore, &instance_name);
+
+    /* Make sure we don't create two instances with the same name. */
+    inst = ldbm_instance_find_by_name(li, instance_name);
+    if (inst != NULL) {
+        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_instance_add_instance_entry_callback",
+                      "ldbm instance %s already exists\n", instance_name);
+        if (returntext != NULL)
+            PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE, "An ldbm instance with the name %s already exists\n",
+                        instance_name);
+        if (returncode != NULL)
+            *returncode = LDAP_UNWILLING_TO_PERFORM;
+        slapi_ch_free((void **)&instance_name);
+        return SLAPI_DSE_CALLBACK_ERROR;
+    }
+
+    if (pb == NULL) {
+        /* called during startup -- do the rest now */
+        rc = ldbm_instance_generate(li, instance_name, NULL);
+        if (!rc) {
+            inst = ldbm_instance_find_by_name(li, instance_name);
+            rc = ldbm_instance_create_default_user_indexes(inst);
+        }
+    }
+    /* if called during a normal ADD operation, the postadd callback
+     * will do the rest.
+     */
+
+
+    /* call the backend implementation specific callbacks */
+    priv = (dblayer_private *)li->li_dblayer_private;
+    priv->instance_add_config_fn(li, inst);
+
+    slapi_ch_free((void **)&instance_name);
+    return (rc == 0) ? SLAPI_DSE_CALLBACK_OK : SLAPI_DSE_CALLBACK_ERROR;
+}
+
 
 
 int
@@ -1194,6 +1137,7 @@ ldbm_instance_post_delete_instance_entry_callback(Slapi_PBlock *pb __attribute__
     char *instance_name;
     struct ldbminfo *li = (struct ldbminfo *)arg;
     struct ldbm_instance *inst = NULL;
+    dblayer_private *priv = NULL;
 
     parse_ldbm_instance_entry(entryBefore, &instance_name);
     inst = ldbm_instance_find_by_name(li, instance_name);
@@ -1219,79 +1163,12 @@ ldbm_instance_post_delete_instance_entry_callback(Slapi_PBlock *pb __attribute__
     if (entryrdn_get_switch()) { /* subtree-rename: on */
         cache_destroy_please(&inst->inst_dncache, CACHE_TYPE_DN);
     }
-    {
-        struct ldbminfo *li = (struct ldbminfo *)inst->inst_be->be_database->plg_private;
-        dblayer_private *priv = (dblayer_private *)li->li_dblayer_private;
-        struct dblayer_private_env *pEnv = priv->dblayer_env;
-        if (pEnv) {
-            PRDir *dirhandle = NULL;
-            char inst_dir[MAXPATHLEN * 2];
-            char *inst_dirp = NULL;
-
-            if (inst->inst_dir_name == NULL) {
-                dblayer_get_instance_data_dir(inst->inst_be);
-            }
-            inst_dirp = dblayer_get_full_inst_dir(li, inst,
-                                                  inst_dir, MAXPATHLEN * 2);
-            if (NULL != inst_dirp) {
-                dirhandle = PR_OpenDir(inst_dirp);
-                /* the db dir instance may have been removed already */
-                if (dirhandle) {
-                    PRDirEntry *direntry = NULL;
-                    char *dbp = NULL;
-                    char *p = NULL;
-                    while (NULL != (direntry = PR_ReadDir(dirhandle,
-                                                          PR_SKIP_DOT | PR_SKIP_DOT_DOT))) {
-                        int rc;
-                        if (!direntry->name)
-                            break;
-
-                        dbp = PR_smprintf("%s/%s", inst_dirp, direntry->name);
-                        if (NULL == dbp) {
-                            slapi_log_err(SLAPI_LOG_ERR,
-                                          "ldbm_instance_post_delete_instance_entry_callback",
-                                          "Failed to generate db path: %s/%s\n",
-                                          inst_dirp, direntry->name);
-                            break;
-                        }
-
-                        p = strstr(dbp, LDBM_FILENAME_SUFFIX);
-                        if (NULL != p &&
-                            strlen(p) == strlen(LDBM_FILENAME_SUFFIX)) {
-                            rc = dblayer_db_remove(pEnv, dbp, 0);
-                        } else {
-                            rc = PR_Delete(dbp);
-                        }
-                        PR_ASSERT(rc == 0);
-                        if (rc != 0) {
-                            slapi_log_err(SLAPI_LOG_ERR,
-                                          "ldbm_instance_post_delete_instance_entry_callback",
-                                          "Failed to delete %s, error %d\n", dbp, rc);
-                        }
-                        PR_smprintf_free(dbp);
-                    }
-                    PR_CloseDir(dirhandle);
-                }
-                /*
-                 * When a backend was removed, the db instance directory
-                 * was removed as well (See also bz463774).
-                 * In case DB_RECOVER_FATAL is set in the DB open after
-                 * the removal (e.g., in restore), the logs in the transaction
-                 * logs are replayed and compared with the contents of the DB
-                 * files.  At that time, if the db instance directory does not
-                 * exist, libdb returns FATAL error.  To prevent the problem,
-                 * we have to leave the empty directory. (bz597375)
-                 *
-                 * PR_RmDir(inst_dirp);
-                 */
-            } /* non-null dirhandle */
-            if (inst_dirp != inst_dir) {
-                slapi_ch_free_string(&inst_dirp);
-            }
-        } /* non-null pEnv */
-    }
+    /* call the backend implementation specific callbacks */
+    priv = (dblayer_private *)li->li_dblayer_private;
+    priv->instance_postdel_config_fn(li, inst);
 
     ldbm_instance_unregister_callbacks(inst);
+    vlv_close(inst);
     slapi_be_free(&inst->inst_be);
     ldbm_instance_destroy(inst);
     slapi_ch_free((void **)&instance_name);
@@ -1310,6 +1187,7 @@ ldbm_instance_delete_instance_entry_callback(Slapi_PBlock *pb __attribute__((unu
     char *instance_name = NULL;
     struct ldbminfo *li = (struct ldbminfo *)arg;
     struct ldbm_instance *inst = NULL;
+    dblayer_private *priv = NULL;
 
     parse_ldbm_instance_entry(entryBefore, &instance_name);
     inst = ldbm_instance_find_by_name(li, instance_name);
@@ -1351,6 +1229,11 @@ ldbm_instance_delete_instance_entry_callback(Slapi_PBlock *pb __attribute__((unu
     slapi_log_err(SLAPI_LOG_INFO, "ldbm_instance_delete_instance_entry_callback",
                   "Bringing %s offline...\n", instance_name);
     slapi_mtn_be_stopping(inst->inst_be);
+
+    /* call the backend implementation specific callbacks */
+    priv = (dblayer_private *)li->li_dblayer_private;
+    priv->instance_del_config_fn(li, inst);
+
     dblayer_instance_close(inst->inst_be);
     slapi_ch_free((void **)&instance_name);
 

Файловите разлики са ограничени, защото са твърде много
+ 8 - 1011
ldap/servers/slapd/back-ldbm/ldif2ldbm.c


+ 11 - 36
ldap/servers/slapd/back-ldbm/proto-back-ldbm.h

@@ -25,6 +25,7 @@ void ainfo_get(backend *be, char *type, struct attrinfo **at);
 void attr_masks(backend *be, char *type, int *indexmask, int *syntaxmask);
 void attr_masks_ex(backend *be, char *type, int *indexmask, int *syntaxmask, struct attrinfo **at);
 int attr_index_config(backend *be, char *fname, int lineno, Slapi_Entry *e, int init, int none);
+int db2index_add_indexed_attr(backend *be, char *attrString);
 int ldbm_compute_init(void);
 void attrinfo_deletetree(ldbm_instance *inst);
 void attr_create_empty(backend *be, char *type, struct attrinfo **ai);
@@ -72,21 +73,15 @@ struct backdn *dncache_find_id(struct cache *cache, ID id);
  * dblayer.c
  */
 int dblayer_init(struct ldbminfo *li);
-int dblayer_terminate(struct ldbminfo *li);
+int dblayer_setup(struct ldbminfo *li);
 int dblayer_start(struct ldbminfo *li, int dbmode);
 int dblayer_close(struct ldbminfo *li, int dbmode);
-void dblayer_pre_close(struct ldbminfo *li);
-int dblayer_post_close(struct ldbminfo *li, int dbmode);
 int dblayer_instance_close(backend *be);
 int dblayer_get_index_file(backend *be, struct attrinfo *a, DB **ppDB, int create);
 int dblayer_release_index_file(backend *be, struct attrinfo *a, DB *pDB);
-int dblayer_erase_index_file(backend *be, struct attrinfo *a, int no_force_chkpt);
-int dblayer_erase_index_file_nolock(backend *be, struct attrinfo *a, int no_force_chkpt);
+int dblayer_erase_index_file(backend *be, struct attrinfo *a, PRBool use_lock, int no_force_chkpt);
 int dblayer_get_id2entry(backend *be, DB **ppDB);
 int dblayer_release_id2entry(backend *be, DB *pDB);
-int dblayer_get_aux_id2entry(backend *be, DB **ppDB, DB_ENV **ppEnv, char **path);
-int dblayer_get_aux_id2entry_ext(backend *be, DB **ppDB, DB_ENV **ppEnv, char **path, int flags);
-int dblayer_release_aux_id2entry(backend *be, DB *pDB, DB_ENV *pEnv);
 int dblayer_txn_init(struct ldbminfo *li, back_txn *txn);
 int dblayer_txn_begin(backend *be, back_txnid parent_txn, back_txn *txn);
 int dblayer_txn_begin_ext(struct ldbminfo *li, back_txnid parent_txn, back_txn *txn, PRBool use_lock);
@@ -106,21 +101,15 @@ void dblayer_lock_backend(backend *be);
 int dblayer_plugin_begin(Slapi_PBlock *pb);
 int dblayer_plugin_commit(Slapi_PBlock *pb);
 int dblayer_plugin_abort(Slapi_PBlock *pb);
-int dblayer_memp_stat(struct ldbminfo *li, DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp);
-int dblayer_memp_stat_instance(ldbm_instance *inst, DB_MPOOL_STAT **gsp, DB_MPOOL_FSTAT ***fsp);
 int dblayer_backup(struct ldbminfo *li, char *destination_directory, Slapi_Task *task);
-int dblayer_restore(struct ldbminfo *li, char *source_directory, Slapi_Task *task, char *bename);
-int dblayer_copy_directory(struct ldbminfo *li, Slapi_Task *task, char *instance_dir, char *destination_dir, int restore, int *cnt, int indexonly, int resetlsns, int is_changelog);
+int dblayer_restore(struct ldbminfo *li, char *source_directory, Slapi_Task *task);
 int dblayer_copyfile(char *source, char *destination, int overwrite, int mode);
 int dblayer_delete_instance_dir(backend *be);
 int dblayer_delete_database(struct ldbminfo *li);
 int dblayer_database_size(struct ldbminfo *li, unsigned int *size);
-int dblayer_terminate(struct ldbminfo *li);
 int dblayer_close_indexes(backend *be);
 int dblayer_open_file(backend *be, char *indexname, int create, struct attrinfo *ai, DB **ppDB);
-int dblayer_close_file(DB **db);
 void dblayer_remember_disk_filled(struct ldbminfo *li);
-int dblayer_open_huge_file(const char *path, int oflag, int mode);
 int dblayer_instance_start(backend *be, int normal_mode);
 int dblayer_make_new_instance_data_dir(backend *be);
 int dblayer_get_instance_data_dir(backend *be);
@@ -129,28 +118,22 @@ PRInt64 db_atol(char *str, int *err);
 PRInt64 db_atoi(char *str, int *err);
 uint32_t db_strtoul(const char *str, int *err);
 uint64_t db_strtoull(const char *str, int *err);
-int dblayer_set_batch_transactions(void *arg, void *value, char *errorbuf, int phase, int apply);
-int dblayer_set_batch_txn_min_sleep(void *arg, void *value, char *errorbuf, int phase, int apply);
-int dblayer_set_batch_txn_max_sleep(void *arg, void *value, char *errorbuf, int phase, int apply);
-void *dblayer_get_batch_transactions(void *arg);
-void *dblayer_get_batch_txn_min_sleep(void *arg);
-void *dblayer_get_batch_txn_max_sleep(void *arg);
+int bdb_set_batch_transactions(void *arg, void *value, char *errorbuf, int phase, int apply);
+int bdb_set_batch_txn_min_sleep(void *arg, void *value, char *errorbuf, int phase, int apply);
+int bdb_set_batch_txn_max_sleep(void *arg, void *value, char *errorbuf, int phase, int apply);
+void *bdb_get_batch_transactions(void *arg);
+void *bdb_get_batch_txn_min_sleep(void *arg);
+void *bdb_get_batch_txn_max_sleep(void *arg);
 int dblayer_in_import(ldbm_instance *inst);
 
 int dblayer_update_db_ext(ldbm_instance *inst, char *oldext, char *newext);
-void dblayer_set_recovery_required(struct ldbminfo *li);
 
-char *dblayer_get_home_dir(struct ldbminfo *li, int *dbhome);
 char *dblayer_get_full_inst_dir(struct ldbminfo *li, ldbm_instance *inst, char *buf, int buflen);
-int check_and_set_import_cache(struct ldbminfo *li);
 
 int dblayer_db_uses_locking(DB_ENV *db_env);
 int dblayer_db_uses_transactions(DB_ENV *db_env);
 int dblayer_db_uses_mpool(DB_ENV *db_env);
 int dblayer_db_uses_logging(DB_ENV *db_env);
-int dblayer_bt_compare(DB *db, const DBT *dbt1, const DBT *dbt2);
-int dblayer_remove_env(struct ldbminfo *li);
-PRUint64 dblayer_get_id2entry_size(ldbm_instance *inst);
 
 int ldbm_back_get_info(Slapi_Backend *be, int cmd, void **info);
 int ldbm_back_set_info(Slapi_Backend *be, int cmd, void *info);
@@ -158,7 +141,6 @@ int ldbm_back_ctrl_info(Slapi_Backend *be, int cmd, void *info);
 
 int dblayer_is_restored(void);
 void dblayer_set_restored(void);
-int dblayer_restore_file_check(struct ldbminfo *li);
 int dblayer_restore_file_init(struct ldbminfo *li);
 void dblayer_restore_file_update(struct ldbminfo *li, char *directory);
 int dblayer_import_file_init(ldbm_instance *inst);
@@ -332,9 +314,6 @@ void import_subcount_stuff_init(import_subcount_stuff *stuff);
 void import_subcount_stuff_term(import_subcount_stuff *stuff);
 void import_configure_index_buffer_size(size_t size);
 size_t import_get_index_buffer_size(void);
-int ldbm_back_fetch_incl_excl(Slapi_PBlock *pb, char ***include, char ***exclude);
-void ldbm_back_free_incl_excl(char **include, char **exclude);
-int ldbm_back_ok_to_dump(const char *dn, char **include, char **exclude);
 int ldbm_back_wire_import(Slapi_PBlock *pb);
 void *factory_constructor(void *object, void *parent);
 void factory_destructor(void *extension, void *object, void *parent);
@@ -606,9 +585,7 @@ int ldbm_attribute_always_indexed(const char *attrtype);
 /*
  * dbversion.c
  */
-int dbversion_write(struct ldbminfo *li, const char *directory, const char *dataversion, PRUint32 flags);
 int dbversion_read(struct ldbminfo *li, const char *directory, char **ldbmversion, char **dataversion);
-int dbversion_exists(struct ldbminfo *li, const char *directory);
 
 /*
  * config_ldbm.c
@@ -616,8 +593,6 @@ int dbversion_exists(struct ldbminfo *li, const char *directory);
 int ldbm_config_load_dse_info(struct ldbminfo *li);
 void ldbm_config_setup_default(struct ldbminfo *li);
 void ldbm_config_internal_set(struct ldbminfo *li, char *attrname, char *value);
-void *ldbm_config_db_logdirectory_get_ext(void *arg);
-void ldbm_instance_config_internal_set(ldbm_instance *inst, char *attrname, char *value);
 void ldbm_instance_config_setup_default(ldbm_instance *inst);
 int ldbm_instance_postadd_instance_entry_callback(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg);
 int ldbm_instance_add_instance_entry_callback(Slapi_PBlock *pb, Slapi_Entry *entryBefore, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg);
@@ -659,7 +634,7 @@ int ldbm_ancestorid_move_subtree(
  * import-threads.c
  */
 int dse_conf_backup(struct ldbminfo *li, char *destination_directory);
-int dse_conf_verify(struct ldbminfo *li, char *src_dir, char *bename);
+int dse_conf_verify(struct ldbminfo *li, char *src_dir);
 
 /*
  * ldbm_attrcrypt.c

+ 4 - 1
ldap/servers/slapd/back-ldbm/rmdb.c

@@ -19,11 +19,13 @@
  */
 
 #include "back-ldbm.h"
+#include "dblayer.h"
 
 int
 ldbm_back_rmdb(Slapi_PBlock *pb)
 {
     struct ldbminfo *li = NULL;
+    dblayer_private *priv;
     /* char            *directory = NULL;*/
     int return_value = -1;
     Slapi_Backend *be;
@@ -48,8 +50,9 @@ ldbm_back_rmdb(Slapi_PBlock *pb)
     }
 
     slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
+    priv = (dblayer_private *)li->li_dblayer_private;
     /*    slapi_pblock_get( pb, SLAPI_SEQ_VAL, &directory );*/
-    return_value = dblayer_delete_database(li);
+    return_value = priv->dblayer_delete_db_fn(li);
 
     if (return_value == 0)
         be->be_state = BE_STATE_DELETED;

+ 34 - 269
ldap/servers/slapd/back-ldbm/start.c

@@ -16,6 +16,7 @@
  */
 
 #include "back-ldbm.h"
+#include "dblayer.h"
 
 
 static int initialized = 0;
@@ -26,263 +27,6 @@ ldbm_back_isinitialized()
     return initialized;
 }
 
-static int
-ldbm_back_start_autotune(struct ldbminfo *li)
-{
-    Object *inst_obj = NULL;
-    ldbm_instance *inst = NULL;
-    /* size_t is a platform unsigned int, IE uint64_t */
-    uint64_t total_cache_size = 0;
-    uint64_t entry_size = 0;
-    uint64_t dn_size = 0;
-    uint64_t zone_size = 0;
-    uint64_t import_size = 0;
-    uint64_t db_size = 0;
-    /* For clamping the autotune value to a 64Mb boundary */
-    uint64_t clamp_div = 0;
-    /* Backend count */
-    uint64_t backend_count = 0;
-
-    int_fast32_t autosize_percentage = 0;
-    int_fast32_t autosize_db_percentage_split = 0;
-    int_fast32_t import_percentage = 0;
-    util_cachesize_result issane;
-    char *msg = "";       /* This will be set by one of the two cache sizing paths below. */
-    char size_to_str[32]; /* big enough to hold %ld */
-
-
-    /* == Begin autotune == */
-
-    /*
-    * The process that we take here now defaults to autotune first, then override
-    * with manual values if so chosen.
-    *
-    * This means first off, we need to check for valid autosizing values.
-    * We then calculate what our system tuning would be. We clamp these to the
-    * nearest value. IE 487MB would be 510656512 bytes, so we clamp this to
-    * 536870912 bytes, aka 512MB. This is aligned to 64MB boundaries.
-    *
-    * Now that we have these values, we then check the values of dbcachesize
-    * and cachememsize. If they are 0, we set them to the auto-calculated value.
-    * If they are non-0, we skip the value.
-    *
-    * This way, we are really autotuning on "first run", and if the admin wants
-    * to up the values, they merely need to reset the value to 0, and let the
-    * server restart.
-    *
-    * wibrown 2017
-    */
-
-    /* sanity check the autosizing values,
-     no value or sum of values larger than 100.
-    */
-    backend_count = objset_size(li->li_instance_set);
-
-    /* If autosize == 0, set autosize_per to 10. */
-    if (li->li_cache_autosize <= 0) {
-        /* First, set our message. In the case autosize is 0, we calculate some
-         * sane defaults and populate these values, but it's only on first run.
-         */
-        msg = "This can be corrected by altering the values of nsslapd-dbcachesize, nsslapd-cachememsize and nsslapd-dncachememsize\n";
-        autosize_percentage = 10;
-    } else {
-        /* In this case we really are setting the values each start up, so
-         * change the msg.
-         */
-        msg = "This can be corrected by altering the values of nsslapd-cache-autosize, nsslapd-cache-autosize-split and nsslapd-dncachememsize\n";
-        autosize_percentage = li->li_cache_autosize;
-    }
-    /* Has to be less than 0, 0 means to disable I think */
-    if (li->li_import_cache_autosize < 0) {
-        import_percentage = 50;
-    } else {
-        import_percentage = li->li_import_cache_autosize;
-    }
-    /* This doesn't control the availability of the feature, so we can take the
-     * default from ldbm_config.c
-     */
-    if (li->li_cache_autosize_split == 0) {
-        autosize_db_percentage_split = 25;
-    } else {
-        autosize_db_percentage_split = li->li_cache_autosize_split;
-    }
-
-
-    /* Check the values are sane. */
-    if ((autosize_percentage > 100) || (import_percentage > 100) || (autosize_db_percentage_split > 100) ||
-        ((autosize_percentage > 0) && (import_percentage > 0) && (autosize_percentage + import_percentage > 100))) {
-        slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "Cache autosizing: bad settings, value or sum of values can not larger than 100.\n");
-        slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "You should change nsslapd-cache-autosize + nsslapd-import-cache-autosize in dse.ldif to be less than 100.\n");
-        slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "Reasonable starting values are nsslapd-cache-autosize: 10, nsslapd-import-cache-autosize: -1.\n");
-        return SLAPI_FAIL_GENERAL;
-    }
-
-    /* Get our platform memory values. */
-    slapi_pal_meminfo *mi = spal_meminfo_get();
-    if (mi == NULL) {
-        slapi_log_err(SLAPI_LOG_CRIT, "ldbm_back_start", "Unable to determine system page limits\n");
-        return SLAPI_FAIL_GENERAL;
-    }
-
-    /* calculate the needed values */
-    zone_size = (autosize_percentage * mi->system_total_bytes) / 100;
-    /* This is how much we "might" use, lets check it's sane. */
-    /* In the case it is not, this will *reduce* the allocation */
-    issane = util_is_cachesize_sane(mi, &zone_size);
-    if (issane == UTIL_CACHESIZE_REDUCED) {
-        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "Your autosized cache values have been reduced. Likely your nsslapd-cache-autosize percentage is too high.\n");
-        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "%s", msg);
-    }
-    /* It's valid, lets divide it up and set according to user prefs */
-    db_size = (autosize_db_percentage_split * zone_size) / 100;
-
-    /* Cap the DB size at 1.5G, as this doesn't help perf much more (lkrispen's advice) */
-    /* NOTE: Do we need a minimum DB size? */
-    if (db_size > (1536 * MEGABYTE)) {
-        db_size = (1536 * MEGABYTE);
-    }
-
-
-    /* NOTE: Because of how we workout entry_size, even if
-     * have autosize split to say ... 90% for dbcache, because
-     * we cap db_size, we use zone_size - db_size, meaning that entry
-     * cache still gets the remaining memory *even* though we didn't use it all.
-     * If we didn't do this, entry_cache would only get 10% of of the avail, even
-     * if db_size was caped at say 5% down from 90.
-     */
-    if (backend_count > 0) {
-        /* Number of entry cache pages per backend. */
-        entry_size = (zone_size - db_size) / backend_count;
-        /* Now split this into dn and entry */
-        dn_size = entry_size * 0.1;
-        entry_size = entry_size * 0.9;
-        /* Now, clamp this value to a 64mb boundary. */
-        /* Now divide the entry pages by this, and also mod. If mod != 0, we need
-         * to add 1 to the diveded number. This should give us:
-         * 510 * 1024 * 1024 == 510MB
-         * 534773760 bytes
-         * 130560 pages at 4096 pages.
-         * 16384 pages for 64Mb
-         * 130560 / 16384 = 7
-         * 130560 % 16384 = 15872 which is != 0
-         * therfore 7 + 1, aka 8 * 16384 = 131072 pages = 536870912 bytes = 512MB.
-         */
-        if (entry_size % (64 * MEGABYTE) != 0) {
-            /* If we want to clamp down, remove the "+1". This would change the above from 510mb -> 448mb. */
-            clamp_div = (entry_size / (64 * MEGABYTE)) + 1;
-            entry_size = clamp_div * (64 * MEGABYTE);
-        }
-        if (dn_size % (64 * MEGABYTE) != 0) {
-            /* If we want to clamp down, remove the "+1". This would change the above from 510mb -> 448mb. */
-            clamp_div = (dn_size / (64 * MEGABYTE)) + 1;
-            dn_size = clamp_div * (64 * MEGABYTE);
-        }
-    }
-
-    slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %" PRIu64 "k physical memory\n", mi->system_total_bytes / 1024);
-    slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "found %" PRIu64 "k available\n", mi->system_available_bytes / 1024);
-
-    /* We've now calculated the autotuning values. Do we need to apply it?
-     * we use the logic of "if size is 0, or autosize is > 0. This way three
-     * options can happen.
-     *
-     * First, during first run, dbcache is 0, and autosize is 0. So we apply
-     * the autotuned value ONLY on first run.
-     * Second, once the admin sets a value, or autotuning set a value, it sticks.
-     * Third, if the admin really does want autosizing to take effect every
-     * start up, we disregard the defined value.
-     */
-
-    /* First, check the dbcache */
-    if (li->li_dbcachesize == 0 || li->li_cache_autosize > 0) {
-        slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: db cache: %" PRIu64 "k\n", db_size / 1024);
-        if (db_size < (500 * MEGABYTE)) {
-            db_size = db_size / 1.25;
-        }
-        /* Have to set this value through text. */
-        sprintf(size_to_str, "%" PRIu64, db_size);
-        ldbm_config_internal_set(li, CONFIG_DBCACHESIZE, size_to_str);
-    }
-    total_cache_size += li->li_dbcachesize;
-
-    /* For each backend */
-    /*   apply the appropriate cache size if 0 */
-    if (backend_count > 0) {
-        li->li_cache_autosize_ec = entry_size;
-        li->li_dncache_autosize_ec = dn_size;
-    }
-
-    for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
-         inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
-
-        inst = (ldbm_instance *)object_get_data(inst_obj);
-        uint64_t cache_size = (uint64_t)cache_get_max_size(&(inst->inst_cache));
-        uint64_t dncache_size = (uint64_t)cache_get_max_size(&(inst->inst_dncache));
-
-        /* This is the point where we decide to apply or not.
-         * We have to check for the mincachesize as setting 0 resets
-         * to this value. This could cause an issue with a *tiny* install, but
-         * it's highly unlikely.
-         */
-        if (cache_size == 0 || cache_size == MINCACHESIZE || li->li_cache_autosize > 0) {
-            slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: %s entry cache (%" PRIu64 " total): %" PRIu64 "k\n", inst->inst_name, backend_count, entry_size / 1024);
-            cache_set_max_entries(&(inst->inst_cache), -1);
-            cache_set_max_size(&(inst->inst_cache), li->li_cache_autosize_ec, CACHE_TYPE_ENTRY);
-        }
-        if (dncache_size == 0 || dncache_size == MINCACHESIZE || li->li_cache_autosize > 0) {
-            slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: %s dn cache (%" PRIu64 " total): %" PRIu64 "k\n", inst->inst_name, backend_count, dn_size / 1024);
-            cache_set_max_entries(&(inst->inst_dncache), -1);
-            cache_set_max_size(&(inst->inst_dncache), li->li_dncache_autosize_ec, CACHE_TYPE_DN);
-        }
-        /* Refresh this value now. */
-        cache_size = (PRUint64)cache_get_max_size(&(inst->inst_cache));
-        db_size = dblayer_get_id2entry_size(inst);
-        if (cache_size < db_size) {
-            slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start",
-                          "%s: entry cache size %" PRIu64 " B is "
-                          "less than db size %" PRIu64 " B; "
-                          "We recommend to increase the entry cache size "
-                          "nsslapd-cachememsize.\n",
-                          inst->inst_name, cache_size, db_size);
-        }
-        total_cache_size += cache_size;
-        total_cache_size += dncache_size;
-    }
-    /* autosizing importCache */
-    if (li->li_import_cache_autosize > 0) {
-        /* Use import percentage here, as it's been corrected for -1 behaviour */
-        import_size = (import_percentage * mi->system_total_bytes) / 100;
-        issane = util_is_cachesize_sane(mi, &import_size);
-        if (issane == UTIL_CACHESIZE_REDUCED) {
-            slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "Your autosized import cache values have been reduced. Likely your nsslapd-import-cache-autosize percentage is too high.\n");
-        }
-        /* We just accept the reduced allocation here. */
-        slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "cache autosizing: import cache: %" PRIu64 "k\n", import_size / 1024);
-
-        sprintf(size_to_str, "%" PRIu64, import_size);
-        ldbm_config_internal_set(li, CONFIG_IMPORT_CACHESIZE, size_to_str);
-    }
-
-    /* Finally, lets check that the total result is sane. */
-    slapi_log_err(SLAPI_LOG_NOTICE, "ldbm_back_start", "total cache size: %" PRIu64 " B; \n", total_cache_size);
-
-    issane = util_is_cachesize_sane(mi, &total_cache_size);
-    if (issane != UTIL_CACHESIZE_VALID) {
-        /* Right, it's time to panic */
-        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "It is highly likely your memory configuration of all backends will EXCEED your systems memory.\n");
-        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "In a future release this WILL prevent server start up. You MUST alter your configuration.\n");
-        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "Total entry cache size: %" PRIu64 " B; dbcache size: %" PRIu64 " B; available memory size: %" PRIu64 " B; \n",
-                      total_cache_size, (uint64_t)li->li_dbcachesize, mi->system_available_bytes);
-        slapi_log_err(SLAPI_LOG_WARNING, "ldbm_back_start", "%s\n", msg);
-        /* WB 2016 - This should be UNCOMMENTED in a future release */
-        /* return SLAPI_FAIL_GENERAL; */
-    }
-
-    spal_meminfo_destroy(mi);
-
-    /* == End autotune == */
-    return 0;
-}
 
 /*
  * Start the LDBM plugin, and all its instances.
@@ -291,17 +35,16 @@ int
 ldbm_back_start(Slapi_PBlock *pb)
 {
     struct ldbminfo *li;
-    char *home_dir = NULL;
     int action = 0;
     int retval = 0;
-
+    dblayer_private *priv = NULL;
     slapi_log_err(SLAPI_LOG_TRACE, "ldbm_back_start", "ldbm backend starting\n");
 
     slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &li);
 
-    /* parse the config file here */
-    if (0 != ldbm_config_load_dse_info(li)) {
-        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_start", "Loading database configuration failed\n");
+    /* initialize dblayer  */
+   if( dblayer_setup(li)) {
+        slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_start", "Failed to setup dblayer\n");
         return SLAPI_FAIL_GENERAL;
     }
 
@@ -353,17 +96,31 @@ ldbm_back_start(Slapi_PBlock *pb)
         ldbm_config_internal_set(li, CONFIG_DIRECTORY, "get default");
     }
 
-    retval = ldbm_back_start_autotune(li);
+    /* We are autotuning the caches. was: 
+     * retval = ldbm_back_start_autotune(li);
+     * This involves caches specific to instances managed in the ldbm layer
+     * and to caches specific to the db implementation.
+     * The cache usage and requirements of the db is not known here, also it
+     * might have impact on the sizing of the instance caches.
+     * Therfor this functionality is moved to the db_xxx layer.
+     * The latest autotune function was implemented only with BDB in mind
+     * so it should be safe to move it to db_bdb.
+     */ 
+    priv = (dblayer_private *)li->li_dblayer_private;
+    retval = priv->dblayer_auto_tune_fn(li);
     if (retval != 0) {
         slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_start", "Failed to set database tuning on backends\n");
         return SLAPI_FAIL_GENERAL;
     }
 
+    /* TBD do we want to go on with auto upgrades of old db versions and config 
+     * or take the opportunity to stop this ??
     retval = check_db_version(li, &action);
     if (0 != retval) {
         slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_start", "db version is not supported\n");
         return SLAPI_FAIL_GENERAL;
     }
+     */
 
     if (action &
         (DBVERSION_UPGRADE_3_4 | DBVERSION_UPGRADE_4_4 | DBVERSION_UPGRADE_4_5)) {
@@ -391,20 +148,28 @@ ldbm_back_start(Slapi_PBlock *pb)
             return return_on_disk_full(li);
         else {
             if ((li->li_cache_autosize > 0) && (li->li_cache_autosize <= 100)) {
+                /* NOTE (LK): there are two problems with the following error message:
+                 * First it reports a dbcache size which might not be available for
+                 * all backend implementations.
+                 * Second, there are many error conditions in ldbm_instance_startall
+                 * which can result in retval != 0, so it might be misleading.
+                 * For now do not change it, use a generic function to get db config
+                 * params, but need to think about it
+                 *
                 slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_start", "Failed to allocate %" PRIu64 " byte dbcache.  "
                                                                 "Please reduce the value of %s and restart the server.\n",
                               li->li_dbcachesize, CONFIG_CACHE_AUTOSIZE);
+                 */
+                char dbcachesize[BUFSIZ];
+                priv->dblayer_config_get_fn(li, CONFIG_DBCACHESIZE, dbcachesize);
+                slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_start", "Failed to allocate %s byte dbcache.  "
+                                                                "Please reduce the value of %s and restart the server.\n",
+                              dbcachesize, CONFIG_CACHE_AUTOSIZE);
             }
             return SLAPI_FAIL_GENERAL;
         }
     }
 
-    /* write DBVERSION file if one does not exist */
-    home_dir = dblayer_get_home_dir(li, NULL);
-    if (!dbversion_exists(li, home_dir)) {
-        dbversion_write(li, home_dir, NULL, DBVERSION_ALL);
-    }
-
 
     /* this function is called every time new db is initialized   */
     /* currently it is called the 2nd time  when changelog db is  */

+ 3 - 3
ldap/servers/slapd/back-ldbm/vlv_srch.c

@@ -169,7 +169,7 @@ vlvSearch_init(struct vlvSearch *p, Slapi_PBlock *pb, const Slapi_Entry *e, ldbm
             /* switch context back to the DSE backend */
             slapi_pblock_set(pb, SLAPI_BACKEND, oldbe);
             if (oldbe) {
-                slapi_pblock_set(pb, SLAPI_PLUGIN, oldbe->be_database);
+                 slapi_pblock_set(pb, SLAPI_PLUGIN, oldbe->be_database);
             }
         }
 
@@ -496,7 +496,7 @@ vlvIndex_delete(struct vlvIndex **ppvs)
             }
         }
         internal_ldap_free_sort_keylist((*ppvs)->vlv_sortkey);
-        dblayer_erase_index_file_nolock((*ppvs)->vlv_be, (*ppvs)->vlv_attrinfo, 1 /* chkpt if not busy */);
+        dblayer_erase_index_file((*ppvs)->vlv_be, (*ppvs)->vlv_attrinfo, PR_FALSE, 1 /* chkpt if not busy */);
         attrinfo_delete(&((*ppvs)->vlv_attrinfo));
         slapi_ch_free((void **)&((*ppvs)->vlv_name));
         slapi_ch_free((void **)&((*ppvs)->vlv_filename));
@@ -698,7 +698,7 @@ vlvIndex_go_offline(struct vlvIndex *p, backend *be)
     p->vlv_enabled = 0;
     p->vlv_indexlength = 0;
     p->vlv_attrinfo->ai_indexmask |= INDEX_OFFLINE;
-    dblayer_erase_index_file_nolock(be, p->vlv_attrinfo, 1 /* chkpt if not busy */);
+    dblayer_erase_index_file(be, p->vlv_attrinfo, PR_FALSE, 1 /* chkpt if not busy */);
 }
 
 void

+ 2 - 4
ldap/servers/slapd/main.c

@@ -1347,7 +1347,7 @@ process_command_line(int argc, char **argv, struct main_config *mcfg)
         {"verbose", ArgNone, 'V'},
         {0, 0, 0}};
 
-    char *opts_archive2db = "vd:i:a:n:SD:qV";
+    char *opts_archive2db = "vd:i:a:SD:qV";
     struct opt_ext long_options_archive2db[] = {
         {"version", ArgNone, 'v'},
         {"debug", ArgRequired, 'd'},
@@ -1599,8 +1599,7 @@ process_command_line(int argc, char **argv, struct main_config *mcfg)
         case 'n': /* which backend to do ldif2db/bak2db for */
             if (mcfg->slapd_exemode == SLAPD_EXEMODE_LDIF2DB ||
                 mcfg->slapd_exemode == SLAPD_EXEMODE_UPGRADEDNFORMAT ||
-                mcfg->slapd_exemode == SLAPD_EXEMODE_DB2INDEX ||
-                mcfg->slapd_exemode == SLAPD_EXEMODE_ARCHIVE2DB) {
+                mcfg->slapd_exemode == SLAPD_EXEMODE_DB2INDEX) {
                 /* The -n argument will give the name of a backend instance. */
                 mcfg->cmd_line_instance_name = optarg_ext;
             } else if (mcfg->slapd_exemode == SLAPD_EXEMODE_DB2LDIF ||
@@ -2597,7 +2596,6 @@ slapd_exemode_archive2db(struct main_config *mcfg)
     slapi_pblock_set(pb, SLAPI_SEQ_VAL, mcfg->archive_name);
     int32_t task_flags = SLAPI_TASK_RUNNING_FROM_COMMANDLINE;
     slapi_pblock_set(pb, SLAPI_TASK_FLAGS, &task_flags);
-    slapi_pblock_set(pb, SLAPI_BACKEND_INSTANCE_NAME, mcfg->cmd_line_instance_name);
     return_value = (backend_plugin->plg_archive2db)(pb);
     slapi_pblock_destroy(pb);
     return return_value;

+ 8 - 1
ldap/servers/slapd/result.c

@@ -346,11 +346,18 @@ send_ldap_result_ext(
     ber_tag_t bind_method = 0;
     int internal_op;
     int i, rc, logit = 0;
+    char *pbtext;
 
     slapi_pblock_get(pb, SLAPI_BIND_METHOD, &bind_method);
     slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
     slapi_pblock_get(pb, SLAPI_CONNECTION, &conn);
 
+    if (text) {
+        pbtext = text;
+    } else {
+        slapi_pblock_get(pb, SLAPI_PB_RESULT_TEXT, &pbtext);
+    }
+
     if (operation == NULL) {
         slapi_log_err(SLAPI_LOG_ERR, "send_ldap_result_ext", "No operation found: slapi_search_internal_set_pb was incomplete (invalid 'base' ?)\n");
         return;
@@ -497,7 +504,7 @@ send_ldap_result_ext(
             err = LDAP_PARTIAL_RESULTS;
         }
         rc = ber_printf(ber, "{it{ess", operation->o_msgid, tag, err,
-                        matched ? matched : "", text ? text : "");
+                        matched ? matched : "", pbtext ? pbtext : "");
 
         /*
          * if this is an LDAPv3 ExtendedResponse to an ExtendedRequest,

+ 4 - 2
ldap/servers/slapd/slapi-plugin.h

@@ -7723,6 +7723,7 @@ int slapi_back_ctrl_info(Slapi_Backend *be, int cmd, void *info);
 enum
 {
     BACK_INFO_DBENV,               /* Get the dbenv */
+    BACK_INFO_DB_PAGESIZE,         /* Get the db page size */
     BACK_INFO_INDEXPAGESIZE,       /* Get the index page size */
     BACK_INFO_DBENV_OPENFLAGS,     /* Get the dbenv openflags */
     BACK_INFO_CRYPT_INIT,          /* Ctrl: clcrypt_init */
@@ -7730,8 +7731,9 @@ enum
     BACK_INFO_CRYPT_DECRYPT_VALUE, /* Ctrl: clcrypt_decrypt_value */
     BACK_INFO_DIRECTORY,           /* Get the directory path */
     BACK_INFO_LOG_DIRECTORY,       /* Get the txn log directory */
-    BACK_INFO_IS_ENTRYRDN,         /* Get the flag for entryrdn */
-    BACK_INFO_INDEX_KEY            /* Get the status of a key in an index */
+    BACK_INFO_INDEX_KEY,           /* Get the status of a key in an index */
+    BACK_INFO_DBHOME_DIRECTORY,    /* Get the dbhome directory */
+    BACK_INFO_IS_ENTRYRDN          /* Get the flag for entryrdn */
 };
 
 struct _back_info_index_key

+ 0 - 15
ldap/servers/slapd/task.c

@@ -1633,10 +1633,6 @@ task_restore_thread(void *arg)
     slapi_pblock_get(pb, SLAPI_SEQ_VAL, &seq_val);
     slapi_ch_free((void **)&seq_val);
 
-    char *instance_name = NULL;
-    slapi_pblock_get(pb, SLAPI_BACKEND_INSTANCE_NAME, &instance_name);
-    slapi_ch_free_string(&instance_name);
-
     slapi_pblock_destroy(pb);
     g_decr_active_threadcnt();
 }
@@ -1650,7 +1646,6 @@ task_restore_add(Slapi_PBlock *pb,
                  void *arg __attribute__((unused)))
 {
     Slapi_Backend *be = NULL;
-    const char *instance_name = NULL;
     const char *archive_dir = NULL;
     const char *my_database_type = NULL;
     const char *database_type = "ldbm database";
@@ -1679,8 +1674,6 @@ task_restore_add(Slapi_PBlock *pb,
     if (NULL != my_database_type)
         database_type = my_database_type;
 
-    instance_name = slapi_entry_attr_get_ref(e, "nsInstance");
-
     /* get backend that has archive2db and the database type matches.  */
     be = slapi_get_first_backend(&cookie);
     while (be) {
@@ -1726,14 +1719,9 @@ task_restore_add(Slapi_PBlock *pb,
         rv = SLAPI_DSE_CALLBACK_ERROR;
         goto out;
     }
-    char *pb_instance_name = NULL;
     char *seq_val = slapi_ch_strdup(archive_dir);
     slapi_pblock_set(mypb, SLAPI_SEQ_VAL, seq_val);
     slapi_pblock_set(mypb, SLAPI_PLUGIN, be->be_database);
-    if (NULL != instance_name) {
-        pb_instance_name = slapi_ch_strdup(instance_name);
-        slapi_pblock_set(mypb, SLAPI_BACKEND_INSTANCE_NAME, pb_instance_name);
-    }
     slapi_pblock_set(mypb, SLAPI_BACKEND_TASK, task);
     int32_t task_flags = SLAPI_TASK_RUNNING_AS_TASK;
     slapi_pblock_set(mypb, SLAPI_TASK_FLAGS, &task_flags);
@@ -1748,9 +1736,6 @@ task_restore_add(Slapi_PBlock *pb,
         *returncode = LDAP_OPERATIONS_ERROR;
         rv = SLAPI_DSE_CALLBACK_ERROR;
         slapi_ch_free((void **)&seq_val);
-        if (instance_name) {
-            slapi_ch_free_string(&pb_instance_name);
-        }
         slapi_pblock_destroy(mypb);
         goto out;
     }

+ 1 - 0
src/lib389/lib389/_constants.py

@@ -75,6 +75,7 @@ PW_DM = "password"
 DN_CONFIG = "cn=config"
 DN_LDBM = "cn=ldbm database,cn=plugins,cn=config"
 DN_CONFIG_LDBM = "cn=config,cn=ldbm database,cn=plugins,cn=config"
+DN_CONFIG_LDBM_BDB = "cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config"
 DN_USERROOT_LDBM = "cn=userRoot,cn=ldbm database,cn=plugins,cn=config"
 DN_SCHEMA = "cn=schema"
 DN_MONITOR = "cn=monitor"

+ 19 - 0
src/lib389/lib389/config.py

@@ -472,3 +472,22 @@ class LDBMConfig(DSLdapObject):
         self._rdn_attribute = 'cn'
         self._lint_functions = []
         self._protected = True
+
+
+class BDB_LDBMConfig(DSLdapObject):
+    """
+        Manage "cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config" including:
+        - Performance related tunings
+        - BDB specific DB backend settings
+
+        :param instance: An instance
+        :type instance: lib389.DirSrv
+    """
+
+    def __init__(self, conn):
+        super(BDB_LDBMConfig, self).__init__(instance=conn)
+        self._dn = DN_CONFIG_LDBM_BDB
+        config_compare_exclude = []
+        self._rdn_attribute = 'cn'
+        self._lint_functions = []
+        self._protected = True

Някои файлове не бяха показани, защото твърде много файлове са промени