Browse Source

Ticket 49055 - Clean up test tickets and suites

Description: Add all topology fixture imports
for all tickets and refactor them accordingly.
Fix PEP8 and some logic issues.
Optimize imports in tickets and suites.

https://fedorahosted.org/389/ticket/49055

Reviewed by: mreynolds (Thanks!)
Simon Pichugin 9 years ago
parent
commit
54e90366c1
100 changed files with 5106 additions and 9020 deletions
  1. 0 12
      dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py
  2. 1 10
      dirsrvtests/tests/suites/acl/acl_test.py
  3. 0 6
      dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py
  4. 2 11
      dirsrvtests/tests/suites/basic/basic_test.py
  5. 0 9
      dirsrvtests/tests/suites/betxns/betxn_test.py
  6. 0 9
      dirsrvtests/tests/suites/clu/clu_test.py
  7. 1 8
      dirsrvtests/tests/suites/config/config_test.py
  8. 0 9
      dirsrvtests/tests/suites/dna_plugin/dna_test.py
  9. 22 23
      dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
  10. 212 220
      dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py
  11. 6 10
      dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py
  12. 4 10
      dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py
  13. 11 18
      dirsrvtests/tests/suites/filter/filter_test.py
  14. 0 7
      dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py
  15. 0 10
      dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py
  16. 0 1
      dirsrvtests/tests/suites/ldapi/__init__.py
  17. 181 161
      dirsrvtests/tests/suites/memberof_plugin/memberof_test.py
  18. 0 9
      dirsrvtests/tests/suites/memory_leaks/range_search_test.py
  19. 4 8
      dirsrvtests/tests/suites/paged_results/paged_results_test.py
  20. 36 40
      dirsrvtests/tests/suites/paged_results/sss_control.py
  21. 1 8
      dirsrvtests/tests/suites/password/password_test.py
  22. 0 9
      dirsrvtests/tests/suites/password/pwdAdmin_test.py
  23. 0 8
      dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py
  24. 4 9
      dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py
  25. 1 6
      dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py
  26. 0 10
      dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py
  27. 0 9
      dirsrvtests/tests/suites/password/pwd_algo_test.py
  28. 0 7
      dirsrvtests/tests/suites/password/pwp_history_test.py
  29. 2 11
      dirsrvtests/tests/suites/replication/cleanallruv_test.py
  30. 4 5
      dirsrvtests/tests/suites/replication/tombstone_test.py
  31. 2 10
      dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py
  32. 33 39
      dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py
  33. 5 10
      dirsrvtests/tests/suites/schema/test_schema.py
  34. 0 52
      dirsrvtests/tests/tickets/finalizer.py
  35. 102 125
      dirsrvtests/tests/tickets/ticket1347760_test.py
  36. 27 67
      dirsrvtests/tests/tickets/ticket365_test.py
  37. 22 75
      dirsrvtests/tests/tickets/ticket397_test.py
  38. 29 74
      dirsrvtests/tests/tickets/ticket47313_test.py
  39. 17 63
      dirsrvtests/tests/tickets/ticket47384_test.py
  40. 45 86
      dirsrvtests/tests/tickets/ticket47431_test.py
  41. 71 184
      dirsrvtests/tests/tickets/ticket47462_test.py
  42. 118 196
      dirsrvtests/tests/tickets/ticket47490_test.py
  43. 65 169
      dirsrvtests/tests/tickets/ticket47536_test.py
  44. 30 77
      dirsrvtests/tests/tickets/ticket47553_test.py
  45. 22 65
      dirsrvtests/tests/tickets/ticket47560_test.py
  46. 48 137
      dirsrvtests/tests/tickets/ticket47573_test.py
  47. 26 118
      dirsrvtests/tests/tickets/ticket47619_test.py
  48. 15 60
      dirsrvtests/tests/tickets/ticket47640_test.py
  49. 103 217
      dirsrvtests/tests/tickets/ticket47653MMR_test.py
  50. 112 154
      dirsrvtests/tests/tickets/ticket47653_test.py
  51. 63 112
      dirsrvtests/tests/tickets/ticket47669_test.py
  52. 74 190
      dirsrvtests/tests/tickets/ticket47676_test.py
  53. 65 103
      dirsrvtests/tests/tickets/ticket47714_test.py
  54. 87 202
      dirsrvtests/tests/tickets/ticket47721_test.py
  55. 28 75
      dirsrvtests/tests/tickets/ticket47781_test.py
  56. 110 223
      dirsrvtests/tests/tickets/ticket47787_test.py
  57. 31 74
      dirsrvtests/tests/tickets/ticket47808_test.py
  58. 31 73
      dirsrvtests/tests/tickets/ticket47815_test.py
  59. 29 77
      dirsrvtests/tests/tickets/ticket47819_test.py
  60. 364 379
      dirsrvtests/tests/tickets/ticket47823_test.py
  61. 330 331
      dirsrvtests/tests/tickets/ticket47828_test.py
  62. 334 317
      dirsrvtests/tests/tickets/ticket47829_test.py
  63. 104 153
      dirsrvtests/tests/tickets/ticket47833_test.py
  64. 277 296
      dirsrvtests/tests/tickets/ticket47838_test.py
  65. 73 187
      dirsrvtests/tests/tickets/ticket47869MMR_test.py
  66. 27 122
      dirsrvtests/tests/tickets/ticket47871_test.py
  67. 81 125
      dirsrvtests/tests/tickets/ticket47900_test.py
  68. 16 63
      dirsrvtests/tests/tickets/ticket47910_test.py
  69. 46 88
      dirsrvtests/tests/tickets/ticket47920_test.py
  70. 28 70
      dirsrvtests/tests/tickets/ticket47921_test.py
  71. 102 137
      dirsrvtests/tests/tickets/ticket47927_test.py
  72. 36 80
      dirsrvtests/tests/tickets/ticket47931_test.py
  73. 40 82
      dirsrvtests/tests/tickets/ticket47937_test.py
  74. 24 71
      dirsrvtests/tests/tickets/ticket47950_test.py
  75. 8 54
      dirsrvtests/tests/tickets/ticket47953_test.py
  76. 30 71
      dirsrvtests/tests/tickets/ticket47963_test.py
  77. 5 109
      dirsrvtests/tests/tickets/ticket47966_test.py
  78. 10 57
      dirsrvtests/tests/tickets/ticket47970_test.py
  79. 16 60
      dirsrvtests/tests/tickets/ticket47973_test.py
  80. 75 122
      dirsrvtests/tests/tickets/ticket47976_test.py
  81. 233 280
      dirsrvtests/tests/tickets/ticket47980_test.py
  82. 50 97
      dirsrvtests/tests/tickets/ticket47981_test.py
  83. 142 255
      dirsrvtests/tests/tickets/ticket47988_test.py
  84. 72 110
      dirsrvtests/tests/tickets/ticket48005_test.py
  85. 6 48
      dirsrvtests/tests/tickets/ticket48013_test.py
  86. 27 67
      dirsrvtests/tests/tickets/ticket48026_test.py
  87. 82 135
      dirsrvtests/tests/tickets/ticket48109_test.py
  88. 3 47
      dirsrvtests/tests/tickets/ticket48170_test.py
  89. 166 205
      dirsrvtests/tests/tickets/ticket48194_test.py
  90. 50 94
      dirsrvtests/tests/tickets/ticket48212_test.py
  91. 37 83
      dirsrvtests/tests/tickets/ticket48214_test.py
  92. 28 142
      dirsrvtests/tests/tickets/ticket48226_test.py
  93. 76 117
      dirsrvtests/tests/tickets/ticket48228_test.py
  94. 8 56
      dirsrvtests/tests/tickets/ticket48233_test.py
  95. 15 59
      dirsrvtests/tests/tickets/ticket48234_test.py
  96. 22 69
      dirsrvtests/tests/tickets/ticket48252_test.py
  97. 19 57
      dirsrvtests/tests/tickets/ticket48265_test.py
  98. 68 179
      dirsrvtests/tests/tickets/ticket48266_test.py
  99. 43 81
      dirsrvtests/tests/tickets/ticket48270_test.py
  100. 31 85
      dirsrvtests/tests/tickets/ticket48272_test.py

+ 0 - 12
dirsrvtests/tests/suites/acctpolicy_plugin/accpol_test.py

@@ -1,16 +1,4 @@
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-import ldif
-import ldap.modlist as modlist
-from ldif import LDIFParser, LDIFWriter
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 1 - 10
dirsrvtests/tests/suites/acl/acl_test.py

@@ -6,20 +6,11 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+from ldap.controls.simple import GetEffectiveRightsControl
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_m2
-from ldap.controls.simple import GetEffectiveRightsControl
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)

+ 0 - 6
dirsrvtests/tests/suites/attr_uniqueness_plugin/attr_uniqueness_test.py

@@ -6,13 +6,7 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 2 - 11
dirsrvtests/tests/suites/basic/basic_test.py

@@ -6,19 +6,10 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
+from subprocess import check_output
+
 import ldap.sasl
-import logging
 import pytest
-import shutil
-from subprocess import check_output
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 0 - 9
dirsrvtests/tests/suites/betxns/betxn_test.py

@@ -6,17 +6,8 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
 import six
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 0 - 9
dirsrvtests/tests/suites/clu/clu_test.py

@@ -6,16 +6,7 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 1 - 8
dirsrvtests/tests/suites/config/config_test.py

@@ -6,16 +6,9 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.topologies import topology_m2
 

+ 0 - 9
dirsrvtests/tests/suites/dna_plugin/dna_test.py

@@ -6,16 +6,7 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 22 - 23
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py

@@ -6,11 +6,9 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import ldap
-import logging
-import pytest
 from random import sample
-from lib389.properties import *
+
+import pytest
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st
@@ -34,15 +32,15 @@ def add_users(topology_st, users_num):
         users_list.append(USER_DN)
         try:
             topology_st.standalone.add_s(Entry((USER_DN, {
-                                             'objectclass': 'top person'.split(),
-                                             'objectclass': 'organizationalPerson',
-                                             'objectclass': 'inetorgperson',
-                                             'cn': USER_NAME,
-                                             'sn': USER_NAME,
-                                             'userpassword': 'pass%s' % num_ran,
-                                             'mail': '%[email protected]' % USER_NAME,
-                                             'uid': USER_NAME
-                                              })))
+                'objectclass': 'top person'.split(),
+                'objectclass': 'organizationalPerson',
+                'objectclass': 'inetorgperson',
+                'cn': USER_NAME,
+                'sn': USER_NAME,
+                'userpassword': 'pass%s' % num_ran,
+                'mail': '%[email protected]' % USER_NAME,
+                'uid': USER_NAME
+            })))
         except ldap.LDAPError as e:
             log.error('Failed to add user (%s): error (%s)' % (USER_DN,
                                                                e.message['desc']))
@@ -50,15 +48,15 @@ def add_users(topology_st, users_num):
 
 
 def search_users(topology_st):
-        try:
-            entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)', ['cn'])
-            for entry in entries:
-                if 'user1' in entry.data['cn']:
-                    log.info('Search found "user1"')
+    try:
+        entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(cn=*)', ['cn'])
+        for entry in entries:
+            if 'user1' in entry.data['cn']:
+                log.info('Search found "user1"')
 
-        except ldap.LDAPError as e:
-            log.fatal('Search failed, error: ' + e.message['desc'])
-            raise e
+    except ldap.LDAPError as e:
+        log.fatal('Search failed, error: ' + e.message['desc'])
+        raise e
 
 
 def test_check_default(topology_st):
@@ -72,7 +70,7 @@ def test_check_default(topology_st):
     default = topology_st.standalone.config.get_attr_val(PLUGIN_TIMESTAMP)
 
     # Now check it should be ON by default
-    assert(default == "on")
+    assert (default == "on")
     log.debug(default)
 
 
@@ -81,7 +79,7 @@ def test_plugin_set_invalid(topology_st):
 
     log.info('test_plugin_set_invalid - Expect to fail with junk value')
     with pytest.raises(ldap.OPERATIONS_ERROR):
-        result = topology_st.standalone.config.set(PLUGIN_TIMESTAMP,'JUNK')
+        result = topology_st.standalone.config.set(PLUGIN_TIMESTAMP, 'JUNK')
 
 
 def test_log_plugin_on(topology_st):
@@ -126,6 +124,7 @@ def test_log_plugin_off(topology_st):
     assert len(access_log_lines) > 0
     assert not topology_st.standalone.ds_access_log.match('^\[.+\d{9}.+\].+')
 
+
 if __name__ == '__main__':
     # Run isolated
     # -s for DEBUG mode

+ 212 - 220
dirsrvtests/tests/suites/dynamic-plugins/plugin_tests.py

@@ -11,16 +11,8 @@ Created on Dec 09, 2014
 
 @author: mreynolds
 '''
-import os
-import sys
-import time
-import ldap
 import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+
 from lib389.tasks import *
 
 log = logging.getLogger(__name__)
@@ -146,16 +138,16 @@ def test_acctpolicy(inst, args=None):
     # Add the config entry
     try:
         inst.add_s(Entry((CONFIG_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'cn': 'config',
-                          'alwaysrecordlogin': 'yes',
-                          'stateattrname': 'lastLoginTime'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'cn': 'config',
+            'alwaysrecordlogin': 'yes',
+            'stateattrname': 'lastLoginTime'
+        })))
     except ldap.ALREADY_EXISTS:
         try:
             inst.modify_s(CONFIG_DN,
-                      [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
-                       (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])
+                          [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
+                           (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])
         except ldap.LDAPError as e:
             log.fatal('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc'])
             assert False
@@ -171,10 +163,10 @@ def test_acctpolicy(inst, args=None):
     time.sleep(1)
     try:
         inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '1',
-                                 'cn': 'user 1',
-                                 'uid': 'user1',
-                                 'userpassword': 'password'})))
+                                     'sn': '1',
+                                     'cn': 'user 1',
+                                     'uid': 'user1',
+                                     'userpassword': 'password'})))
     except ldap.LDAPError as e:
         log.fatal('test_acctpolicy: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
         assert False
@@ -308,7 +300,7 @@ def test_attruniq(inst, args=None):
                                      'cn': 'user 1',
                                      'uid': 'user1',
                                      'mail': '[email protected]',
-                                     'mailAlternateAddress' : '[email protected]',
+                                     'mailAlternateAddress': '[email protected]',
                                      'userpassword': 'password'})))
     except ldap.LDAPError as e:
         log.fatal('test_attruniq: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
@@ -347,11 +339,11 @@ def test_attruniq(inst, args=None):
 
     try:
         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '2',
-                                 'cn': 'user 2',
-                                 'uid': 'user2',
-                                 'mail': '[email protected]',
-                                 'userpassword': 'password'})))
+                                     'sn': '2',
+                                     'cn': 'user 2',
+                                     'uid': 'user2',
+                                     'mail': '[email protected]',
+                                     'userpassword': 'password'})))
     except ldap.CONSTRAINT_VIOLATION:
         pass
     else:
@@ -369,7 +361,8 @@ def test_attruniq(inst, args=None):
                         'mailAlternateAddress')])
 
     except ldap.LDAPError as e:
-        log.error('test_attruniq: Failed to reconfigure plugin for "mail mailAlternateAddress": error ' + e.message['desc'])
+        log.error(
+            'test_attruniq: Failed to reconfigure plugin for "mail mailAlternateAddress": error ' + e.message['desc'])
         assert False
 
     ############################################################################
@@ -378,11 +371,11 @@ def test_attruniq(inst, args=None):
 
     try:
         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '2',
-                                 'cn': 'user 2',
-                                 'uid': 'user2',
-                                 'mail': '[email protected]',
-                                 'userpassword': 'password'})))
+                                     'sn': '2',
+                                     'cn': 'user 2',
+                                     'uid': 'user2',
+                                     'mail': '[email protected]',
+                                     'userpassword': 'password'})))
     except ldap.CONSTRAINT_VIOLATION:
         pass
     else:
@@ -395,11 +388,11 @@ def test_attruniq(inst, args=None):
 
     try:
         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '2',
-                                 'cn': 'user 2',
-                                 'uid': 'user2',
-                                 'mailAlternateAddress': '[email protected]',
-                                 'userpassword': 'password'})))
+                                     'sn': '2',
+                                     'cn': 'user 2',
+                                     'uid': 'user2',
+                                     'mailAlternateAddress': '[email protected]',
+                                     'userpassword': 'password'})))
     except ldap.CONSTRAINT_VIOLATION:
         pass
     else:
@@ -412,11 +405,11 @@ def test_attruniq(inst, args=None):
 
     try:
         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '2',
-                                 'cn': 'user 2',
-                                 'uid': 'user2',
-                                 'mail': '[email protected]',
-                                 'userpassword': 'password'})))
+                                     'sn': '2',
+                                     'cn': 'user 2',
+                                     'uid': 'user2',
+                                     'mail': '[email protected]',
+                                     'userpassword': 'password'})))
     except ldap.CONSTRAINT_VIOLATION:
         pass
     else:
@@ -429,11 +422,11 @@ def test_attruniq(inst, args=None):
 
     try:
         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '2',
-                                 'cn': 'user 2',
-                                 'uid': 'user2',
-                                 'mailAlternateAddress': '[email protected]',
-                                 'userpassword': 'password'})))
+                                     'sn': '2',
+                                     'cn': 'user 2',
+                                     'uid': 'user2',
+                                     'mailAlternateAddress': '[email protected]',
+                                     'userpassword': 'password'})))
     except ldap.CONSTRAINT_VIOLATION:
         pass
     else:
@@ -488,9 +481,9 @@ def test_automember(inst, args=None):
     # Add the automember group
     try:
         inst.add_s(Entry((GROUP_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'cn': 'group'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'cn': 'group'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_automember: Failed to add group: error ' + e.message['desc'])
         assert False
@@ -498,9 +491,9 @@ def test_automember(inst, args=None):
     # Add ou=branch1
     try:
         inst.add_s(Entry((BRANCH1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'ou': 'branch1'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'ou': 'branch1'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_automember: Failed to add branch1: error ' + e.message['desc'])
         assert False
@@ -508,9 +501,9 @@ def test_automember(inst, args=None):
     # Add ou=branch2
     try:
         inst.add_s(Entry((BRANCH2_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'ou': 'branch2'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'ou': 'branch2'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_automember: Failed to add branch2: error ' + e.message['desc'])
         assert False
@@ -518,13 +511,13 @@ def test_automember(inst, args=None):
     # Add the automember config entry
     try:
         inst.add_s(Entry((CONFIG_DN, {
-                          'objectclass': 'top autoMemberDefinition'.split(),
-                          'cn': 'config',
-                          'autoMemberScope': 'ou=branch1,' + DEFAULT_SUFFIX,
-                          'autoMemberFilter': 'objectclass=top',
-                          'autoMemberDefaultGroup': 'cn=group,' + DEFAULT_SUFFIX,
-                          'autoMemberGroupingAttr': 'member:dn'
-                          })))
+            'objectclass': 'top autoMemberDefinition'.split(),
+            'cn': 'config',
+            'autoMemberScope': 'ou=branch1,' + DEFAULT_SUFFIX,
+            'autoMemberFilter': 'objectclass=top',
+            'autoMemberDefaultGroup': 'cn=group,' + DEFAULT_SUFFIX,
+            'autoMemberGroupingAttr': 'member:dn'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_automember: Failed to add config entry: error ' + e.message['desc'])
         assert False
@@ -536,9 +529,9 @@ def test_automember(inst, args=None):
     # Add a user that should get added to the group
     try:
         inst.add_s(Entry((BUSER1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_automember: Failed to add user: error ' + e.message['desc'])
         assert False
@@ -574,9 +567,9 @@ def test_automember(inst, args=None):
     # Add a user that should get added to the group
     try:
         inst.add_s(Entry((BUSER2_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user2'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user2'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_automember: Failed to user to branch2: error ' + e.message['desc'])
         assert False
@@ -602,9 +595,9 @@ def test_automember(inst, args=None):
     # Add an entry that should be picked up by automember - verify it is not(yet)
     try:
         inst.add_s(Entry((BUSER3_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user3'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user3'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_automember: Failed to user3 to branch2: error ' + e.message['desc'])
         assert False
@@ -627,9 +620,9 @@ def test_automember(inst, args=None):
     # Add the task
     try:
         inst.add_s(Entry((TASK_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'basedn': 'ou=branch2,' + DEFAULT_SUFFIX,
-                          'filter': 'objectclass=top'})))
+            'objectclass': 'top extensibleObject'.split(),
+            'basedn': 'ou=branch2,' + DEFAULT_SUFFIX,
+            'filter': 'objectclass=top'})))
     except ldap.LDAPError as e:
         log.fatal('test_automember: Failed to add task: error ' + e.message['desc'])
         assert False
@@ -730,15 +723,15 @@ def test_dna(inst, args=None):
 
     try:
         inst.add_s(Entry((CONFIG_DN, {
-                          'objectclass': 'top dnaPluginConfig'.split(),
-                          'cn': 'config',
-                          'dnatype': 'uidNumber',
-                          'dnafilter': '(objectclass=top)',
-                          'dnascope': DEFAULT_SUFFIX,
-                          'dnaMagicRegen': '-1',
-                          'dnaMaxValue': '50000',
-                          'dnaNextValue': '1'
-                          })))
+            'objectclass': 'top dnaPluginConfig'.split(),
+            'cn': 'config',
+            'dnatype': 'uidNumber',
+            'dnafilter': '(objectclass=top)',
+            'dnascope': DEFAULT_SUFFIX,
+            'dnaMagicRegen': '-1',
+            'dnaMaxValue': '50000',
+            'dnaNextValue': '1'
+        })))
     except ldap.ALREADY_EXISTS:
         try:
             inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaNextValue', '1'),
@@ -756,9 +749,9 @@ def test_dna(inst, args=None):
 
     try:
         inst.add_s(Entry((USER1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_dna: Failed to user1: error ' + e.message['desc'])
         assert False
@@ -872,18 +865,18 @@ def test_linkedattrs(inst, args=None):
     # Add test entries
     try:
         inst.add_s(Entry((USER1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_linkedattrs: Failed to user1: error ' + e.message['desc'])
         assert False
 
     try:
         inst.add_s(Entry((USER2_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user2'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user2'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_linkedattrs: Failed to user1: error ' + e.message['desc'])
         assert False
@@ -891,11 +884,11 @@ def test_linkedattrs(inst, args=None):
     # Add the linked attrs config entry
     try:
         inst.add_s(Entry((CONFIG_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'cn': 'config',
-                          'linkType': 'directReport',
-                          'managedType': 'manager'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'cn': 'config',
+            'linkType': 'directReport',
+            'managedType': 'manager'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_linkedattrs: Failed to add config entry: error ' + e.message['desc'])
         assert False
@@ -1033,9 +1026,9 @@ def test_linkedattrs(inst, args=None):
     TASK_DN = 'cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config'
     try:
         inst.add_s(Entry(('cn=task-' + str(int(time.time())) + ',cn=fixup linked attributes,cn=tasks,cn=config', {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'basedn': DEFAULT_SUFFIX,
-                          'filter': 'objectclass=top'})))
+            'objectclass': 'top extensibleObject'.split(),
+            'basedn': DEFAULT_SUFFIX,
+            'filter': 'objectclass=top'})))
     except ldap.LDAPError as e:
         log.fatal('test_linkedattrs: Failed to add task: error ' + e.message['desc'])
         assert False
@@ -1123,29 +1116,29 @@ def test_memberof(inst, args=None):
     # Add our test entries
     try:
         inst.add_s(Entry((USER1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_memberof: Failed to add user1: error ' + e.message['desc'])
         assert False
 
     try:
         inst.add_s(Entry((GROUP_DN, {
-                          'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
-                          'cn': 'group',
-                          'member': USER1_DN
-                          })))
+            'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+            'cn': 'group',
+            'member': USER1_DN
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_memberof: Failed to add group: error ' + e.message['desc'])
         assert False
 
     try:
         inst.add_s(Entry((SHARED_CONFIG_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'memberofgroupattr': 'member',
-                          'memberofattr': 'memberof'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'memberofgroupattr': 'member',
+            'memberofattr': 'memberof'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_memberof: Failed to shared config entry: error ' + e.message['desc'])
         assert False
@@ -1250,19 +1243,19 @@ def test_memberof(inst, args=None):
 
     try:
         inst.add_s(Entry((USER1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_memberof: Failed to add user1: error ' + e.message['desc'])
         assert False
 
     try:
         inst.add_s(Entry((GROUP_DN, {
-                          'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
-                          'cn': 'group',
-                          'member': USER1_DN
-                          })))
+            'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+            'cn': 'group',
+            'member': USER1_DN
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_memberof: Failed to add group: error ' + e.message['desc'])
         assert False
@@ -1303,7 +1296,7 @@ def test_memberof(inst, args=None):
         inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'memberofgroupattr', 'uniquemember')])
     except ldap.LDAPError as e:
         log.fatal('test_memberof: Failed to set shared plugin entry(uniquemember): error '
-            + e.message['desc'])
+                  + e.message['desc'])
         assert False
 
     try:
@@ -1433,9 +1426,9 @@ def test_memberof(inst, args=None):
     TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
     try:
         inst.add_s(Entry((TASK_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'basedn': DEFAULT_SUFFIX + "bad",
-                          'filter': 'objectclass=top'})))
+            'objectclass': 'top extensibleObject'.split(),
+            'basedn': DEFAULT_SUFFIX + "bad",
+            'filter': 'objectclass=top'})))
     except ldap.LDAPError as e:
         log.fatal('test_memberof: Failed to add task(bad dn): error ' +
                   e.message['desc'])
@@ -1453,9 +1446,9 @@ def test_memberof(inst, args=None):
     TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
     try:
         inst.add_s(Entry((TASK_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'basedn': "bad",
-                          'filter': 'objectclass=top'})))
+            'objectclass': 'top extensibleObject'.split(),
+            'basedn': "bad",
+            'filter': 'objectclass=top'})))
     except ldap.LDAPError as e:
         log.fatal('test_memberof: Failed to add task(invalid dn syntax): ' +
                   e.message['desc'])
@@ -1474,9 +1467,9 @@ def test_memberof(inst, args=None):
     TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
     try:
         inst.add_s(Entry((TASK_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'basedn': DEFAULT_SUFFIX,
-                          'filter': '(objectclass=top'})))
+            'objectclass': 'top extensibleObject'.split(),
+            'basedn': DEFAULT_SUFFIX,
+            'filter': '(objectclass=top'})))
     except ldap.LDAPError as e:
         log.fatal('test_memberof: Failed to add task(bad filter: error ' +
                   e.message['desc'])
@@ -1499,9 +1492,9 @@ def test_memberof(inst, args=None):
     TASK_DN = 'cn=task-' + str(int(time.time())) + ',' + DN_MBO_TASK
     try:
         inst.add_s(Entry((TASK_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'basedn': DEFAULT_SUFFIX,
-                          'filter': 'objectclass=top'})))
+            'objectclass': 'top extensibleObject'.split(),
+            'basedn': DEFAULT_SUFFIX,
+            'filter': 'objectclass=top'})))
     except ldap.LDAPError as e:
         log.fatal('test_memberof: Failed to add task: error ' + e.message['desc'])
         assert False
@@ -1585,8 +1578,8 @@ def test_mep(inst, args=None):
     # Add our org units
     try:
         inst.add_s(Entry((PEOPLE_OU, {
-                   'objectclass': 'top extensibleObject'.split(),
-                   'ou': 'people'})))
+            'objectclass': 'top extensibleObject'.split(),
+            'ou': 'people'})))
     except ldap.ALREADY_EXISTS:
         pass
     except ldap.LDAPError as e:
@@ -1595,8 +1588,8 @@ def test_mep(inst, args=None):
 
     try:
         inst.add_s(Entry((GROUP_OU, {
-                   'objectclass': 'top extensibleObject'.split(),
-                   'ou': 'people'})))
+            'objectclass': 'top extensibleObject'.split(),
+            'ou': 'people'})))
     except ldap.ALREADY_EXISTS:
         pass
     except ldap.LDAPError as e:
@@ -1606,12 +1599,12 @@ def test_mep(inst, args=None):
     # Add the template entry
     try:
         inst.add_s(Entry((TEMPLATE_DN, {
-                   'objectclass': 'top mepTemplateEntry extensibleObject'.split(),
-                   'cn': 'MEP Template',
-                   'mepRDNAttr': 'cn',
-                   'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'),
-                   'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|')
-                   })))
+            'objectclass': 'top mepTemplateEntry extensibleObject'.split(),
+            'cn': 'MEP Template',
+            'mepRDNAttr': 'cn',
+            'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'),
+            'mepMappedAttr': 'cn: $cn|uid: $cn|gidNumber: $uidNumber'.split('|')
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_mep: Failed to add template entry: error ' + e.message['desc'])
         assert False
@@ -1619,13 +1612,13 @@ def test_mep(inst, args=None):
     # Add the config entry
     try:
         inst.add_s(Entry((CONFIG_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'cn': 'config',
-                          'originScope': PEOPLE_OU,
-                          'originFilter': 'objectclass=posixAccount',
-                          'managedBase': GROUP_OU,
-                          'managedTemplate': TEMPLATE_DN
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'cn': 'config',
+            'originScope': PEOPLE_OU,
+            'originFilter': 'objectclass=posixAccount',
+            'managedBase': GROUP_OU,
+            'managedTemplate': TEMPLATE_DN
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_mep: Failed to add config entry: error ' + e.message['desc'])
         assert False
@@ -1637,13 +1630,13 @@ def test_mep(inst, args=None):
     # Add an entry that meets the MEP scope
     try:
         inst.add_s(Entry((USER_DN, {
-                          'objectclass': 'top posixAccount extensibleObject'.split(),
-                          'uid': 'user1',
-                          'cn': 'user1',
-                          'uidNumber': '1',
-                          'gidNumber': '1',
-                          'homeDirectory': '/home/user1'
-                          })))
+            'objectclass': 'top posixAccount extensibleObject'.split(),
+            'uid': 'user1',
+            'cn': 'user1',
+            'uidNumber': '1',
+            'gidNumber': '1',
+            'homeDirectory': '/home/user1'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_mep: Failed to user1: error ' + e.message['desc'])
         assert False
@@ -1662,12 +1655,12 @@ def test_mep(inst, args=None):
     # Add a new template entry
     try:
         inst.add_s(Entry((TEMPLATE_DN2, {
-                   'objectclass': 'top mepTemplateEntry extensibleObject'.split(),
-                   'cn': 'MEP Template2',
-                   'mepRDNAttr': 'uid',
-                   'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'),
-                   'mepMappedAttr': 'cn: $uid|uid: $cn|gidNumber: $gidNumber'.split('|')
-                   })))
+            'objectclass': 'top mepTemplateEntry extensibleObject'.split(),
+            'cn': 'MEP Template2',
+            'mepRDNAttr': 'uid',
+            'mepStaticAttr': 'objectclass: posixGroup|objectclass: extensibleObject'.split('|'),
+            'mepMappedAttr': 'cn: $uid|uid: $cn|gidNumber: $gidNumber'.split('|')
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_mep: Failed to add template entry2: error ' + e.message['desc'])
         assert False
@@ -1686,13 +1679,13 @@ def test_mep(inst, args=None):
     # Add an entry that meets the MEP scope
     try:
         inst.add_s(Entry((USER_DN2, {
-                          'objectclass': 'top posixAccount extensibleObject'.split(),
-                          'uid': 'user 1',
-                          'cn': 'user 1',
-                          'uidNumber': '1',
-                          'gidNumber': '1',
-                          'homeDirectory': '/home/user2'
-                          })))
+            'objectclass': 'top posixAccount extensibleObject'.split(),
+            'uid': 'user 1',
+            'cn': 'user 1',
+            'uidNumber': '1',
+            'gidNumber': '1',
+            'homeDirectory': '/home/user2'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_mep: Failed to user2: error ' + e.message['desc'])
         assert False
@@ -1800,8 +1793,8 @@ def test_passthru(inst, args=None):
     # Create the top of the tree
     try:
         passthru_inst.add_s(Entry((PASS_SUFFIX2, {
-                          'objectclass': 'top domain'.split(),
-                          'dc': 'pass2'})))
+            'objectclass': 'top domain'.split(),
+            'dc': 'pass2'})))
     except ldap.ALREADY_EXISTS:
         pass
     except ldap.LDAPError as e:
@@ -1812,10 +1805,10 @@ def test_passthru(inst, args=None):
     # Add user to suffix1
     try:
         passthru_inst.add_s(Entry((PASSTHRU_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'admin',
-                          'userpassword': 'password'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'admin',
+            'userpassword': 'password'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_passthru: Failed to admin1: error ' + e.message['desc'])
         passthru_inst.delete()
@@ -1824,10 +1817,10 @@ def test_passthru(inst, args=None):
     # Add user to suffix 2
     try:
         passthru_inst.add_s(Entry((PASSTHRU_DN2, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'admin2',
-                          'userpassword': 'password'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'admin2',
+            'userpassword': 'password'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_passthru: Failed to admin2 : error ' + e.message['desc'])
         passthru_inst.delete()
@@ -1952,29 +1945,29 @@ def test_referint(inst, args=None):
     # Add some users and a group
     try:
         inst.add_s(Entry((USER1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_referint: Failed to add user1: error ' + e.message['desc'])
         assert False
 
     try:
         inst.add_s(Entry((USER2_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user2'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user2'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_referint: Failed to add user2: error ' + e.message['desc'])
         assert False
 
     try:
         inst.add_s(Entry((GROUP_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'cn': 'group',
-                          'member': USER1_DN,
-                          'uniquemember': USER2_DN
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'cn': 'group',
+            'member': USER1_DN,
+            'uniquemember': USER2_DN
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_referint: Failed to add group: error ' + e.message['desc'])
         assert False
@@ -1991,12 +1984,12 @@ def test_referint(inst, args=None):
     # Add shared config entry
     try:
         inst.add_s(Entry((SHARED_CONFIG_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'referint-membership-attr': 'member',
-                          'referint-update-delay': '0',
-                          'referint-logfile': REFERINT_LOGFILE,
-                          'referint-logchanges': '0'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'referint-membership-attr': 'member',
+            'referint-update-delay': '0',
+            'referint-logfile': REFERINT_LOGFILE,
+            'referint-logchanges': '0'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_referint: Failed to shared config entry: error ' + e.message['desc'])
         assert False
@@ -2069,29 +2062,29 @@ def test_referint(inst, args=None):
 
     try:
         inst.add_s(Entry((USER1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_referint: Failed to add user1: error ' + e.message['desc'])
         assert False
 
     try:
         inst.add_s(Entry((USER2_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user2'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user2'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_referint: Failed to add user2: error ' + e.message['desc'])
         assert False
 
     try:
         inst.add_s(Entry((GROUP_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'cn': 'group',
-                          'member': USER1_DN,
-                          'uniquemember': USER2_DN
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'cn': 'group',
+            'member': USER1_DN,
+            'uniquemember': USER2_DN
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_referint: Failed to add group: error ' + e.message['desc'])
         assert False
@@ -2121,7 +2114,7 @@ def test_referint(inst, args=None):
         inst.modify_s(SHARED_CONFIG_DN, [(ldap.MOD_REPLACE, 'referint-membership-attr', 'uniquemember')])
     except ldap.LDAPError as e:
         log.fatal('test_referint: Failed to set shared plugin entry(uniquemember): error '
-            + e.message['desc'])
+                  + e.message['desc'])
         assert False
 
     # Delete a user
@@ -2162,9 +2155,9 @@ def test_referint(inst, args=None):
     # Add test user
     try:
         inst.add_s(Entry((USER1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_referint: Failed to add user1: error ' + e.message['desc'])
         assert False
@@ -2259,9 +2252,9 @@ def test_retrocl(inst, args=None):
     # Add a user
     try:
         inst.add_s(Entry((USER1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_retrocl: Failed to add user1: error ' + e.message['desc'])
         assert False
@@ -2299,7 +2292,7 @@ def test_retrocl(inst, args=None):
         entry = inst.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, '(changenumber=*)')
         if len(entry) != entry_count:
             log.fatal('test_retrocl: changelog incorrectly updated - change count: '
-                + str(len(entry)) + ' - expected 1')
+                      + str(len(entry)) + ' - expected 1')
             assert False
     except ldap.LDAPError as e:
         log.fatal('test_retrocl: Unable to search retro changelog: ' + e.message['desc'])
@@ -2351,10 +2344,10 @@ def test_rootdn(inst, args=None):
     # Add an user and aci to open up cn=config
     try:
         inst.add_s(Entry((USER1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1',
-                          'userpassword': 'password'
-                          })))
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1',
+            'userpassword': 'password'
+        })))
     except ldap.LDAPError as e:
         log.fatal('test_rootdn: Failed to add user1: error ' + e.message['desc'])
         assert False
@@ -2480,4 +2473,3 @@ def test_all_plugins(inst, args=None):
         func(inst, args)
 
     return
-

+ 6 - 10
dirsrvtests/tests/suites/dynamic-plugins/stress_tests.py

@@ -11,15 +11,11 @@ Created on Dec 16, 2014
 
 @author: mreynolds
 '''
-import os
-import sys
-import time
-import ldap
 import logging
-import pytest
 import threading
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
+
+import ldap
+from lib389 import DirSrv, Entry
 from lib389._constants import *
 from lib389.properties import *
 
@@ -114,8 +110,8 @@ class AddUsers(threading.Thread):
         if self.addToGroup:
             try:
                 conn.add_s(Entry((GROUP_DN,
-                    {'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
-                     'uid': 'user' + str(idx)})))
+                                  {'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+                                   'uid': 'user' + str(idx)})))
             except ldap.LDAPError as e:
                 if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
                     log.fatal('AddUsers: failed to add group (' + USER_DN + ') error: ' + e.message['desc'])
@@ -127,7 +123,7 @@ class AddUsers(threading.Thread):
             USER_DN = 'uid=' + self.rdnval + str(idx) + ',' + DEFAULT_SUFFIX
             try:
                 conn.add_s(Entry((USER_DN, {'objectclass': 'top extensibleObject'.split(),
-                           'uid': 'user' + str(idx)})))
+                                            'uid': 'user' + str(idx)})))
             except ldap.LDAPError as e:
                 if e == ldap.UNAVAILABLE or e == ldap.SERVER_DOWN:
                     log.fatal('AddUsers: failed to add (' + USER_DN + ') error: ' + e.message['desc'])

+ 4 - 10
dirsrvtests/tests/suites/dynamic-plugins/test_dynamic_plugins.py

@@ -11,20 +11,14 @@ Created on Dec 09, 2014
 
 @author: mreynolds
 '''
-import os
-import sys
-import time
-import ldap
-import ldap.sasl
 import logging
+
+import ldap.sasl
 import pytest
+from lib389.tasks import *
+
 import plugin_tests
 import stress_tests
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
 from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)

+ 11 - 18
dirsrvtests/tests/suites/filter/filter_test.py

@@ -6,16 +6,9 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.topologies import topology_st
 
@@ -35,10 +28,10 @@ def test_filter_escaped(topology_st):
 
     try:
         topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '1',
-                                 'cn': 'test * me',
-                                 'uid': 'test_entry',
-                                 'userpassword': PASSWORD})))
+                                                       'sn': '1',
+                                                       'cn': 'test * me',
+                                                       'uid': 'test_entry',
+                                                       'userpassword': PASSWORD})))
     except ldap.LDAPError as e:
         log.fatal('test_filter_escaped: Failed to add test user ' + USER1_DN + ': error ' +
                   e.message['desc'])
@@ -46,10 +39,10 @@ def test_filter_escaped(topology_st):
 
     try:
         topology_st.standalone.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '2',
-                                 'cn': 'test me',
-                                 'uid': 'test_entry2',
-                                 'userpassword': PASSWORD})))
+                                                       'sn': '2',
+                                                       'cn': 'test me',
+                                                       'uid': 'test_entry2',
+                                                       'userpassword': PASSWORD})))
     except ldap.LDAPError as e:
         log.fatal('test_filter_escaped: Failed to add test user ' + USER2_DN + ': error ' + e.message['desc'])
         assert False
@@ -61,7 +54,7 @@ def test_filter_escaped(topology_st):
             assert False
     except ldap.LDAPError as e:
         log.fatal('test_filter_escaped: Failed to search for user(%s), error: %s' %
-        (USER1_DN, e.message('desc')))
+                  (USER1_DN, e.message('desc')))
         assert False
 
     log.info('test_filter_escaped: PASSED')
@@ -77,7 +70,7 @@ def test_filter_search_original_attrs(topology_st):
 
     try:
         entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_BASE,
-                                             'objectclass=top', ['objectclass-EXTRA'])
+                                                'objectclass=top', ['objectclass-EXTRA'])
         if entry[0].hasAttr('objectclass-EXTRA'):
             log.fatal('test_filter_search_original_attrs: Entry does not have the original attribute')
             assert False

+ 0 - 7
dirsrvtests/tests/suites/filter/rfc3673_all_oper_attrs_test.py

@@ -6,14 +6,7 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 
-import os
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 0 - 10
dirsrvtests/tests/suites/gssapi_repl/gssapi_repl_test.py

@@ -6,19 +6,9 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
-from lib389.mit_krb5 import MitKrb5
 from lib389.topologies import topology_m2
 
 #########################################

+ 0 - 1
dirsrvtests/tests/suites/ldapi/__init__.py

@@ -1 +0,0 @@
-# -*- coding: utf-8 -*-

File diff suppressed because it is too large
+ 181 - 161
dirsrvtests/tests/suites/memberof_plugin/memberof_test.py


+ 0 - 9
dirsrvtests/tests/suites/memory_leaks/range_search_test.py

@@ -6,16 +6,7 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 4 - 8
dirsrvtests/tests/suites/paged_results/paged_results_test.py

@@ -6,21 +6,17 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import time
-import ldap
-import logging
-import pytest
 from random import sample
+
+import pytest
 from ldap.controls import SimplePagedResultsControl, GetEffectiveRightsControl
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st
+
 from sss_control import SSSRequestControl
 
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
 
 if DEBUGGING:
     logging.getLogger(__name__).setLevel(logging.DEBUG)

+ 36 - 40
dirsrvtests/tests/suites/paged_results/sss_control.py

@@ -11,14 +11,10 @@ __all__ = [
     'SSSResponseControl',
 ]
 
-
-import ldap
-from ldap.ldapobject import LDAPObject
 from ldap.controls import (RequestControl, ResponseControl,
-        KNOWN_RESPONSE_CONTROLS, DecodeControlTuples)
-
-from pyasn1.type import univ, namedtype, tag, namedval, constraint
+                           KNOWN_RESPONSE_CONTROLS)
 from pyasn1.codec.ber import encoder, decoder
+from pyasn1.type import univ, namedtype, tag, namedval, constraint
 
 
 #    SortKeyList ::= SEQUENCE OF SEQUENCE {
@@ -29,14 +25,14 @@ from pyasn1.codec.ber import encoder, decoder
 
 class SortKeyType(univ.Sequence):
     componentType = namedtype.NamedTypes(
-            namedtype.NamedType('attributeType', univ.OctetString()),
-            namedtype.OptionalNamedType('orderingRule',
-                  univ.OctetString().subtype(
-                    implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)
-                  )
-                ),
-            namedtype.DefaultedNamedType('reverseOrder', univ.Boolean(False).subtype(
-                implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))))
+        namedtype.NamedType('attributeType', univ.OctetString()),
+        namedtype.OptionalNamedType('orderingRule',
+                                    univ.OctetString().subtype(
+                                        implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)
+                                    )
+                                    ),
+        namedtype.DefaultedNamedType('reverseOrder', univ.Boolean(False).subtype(
+            implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))))
 
 
 class SortKeyListType(univ.SequenceOf):
@@ -50,11 +46,11 @@ class SSSRequestControl(RequestControl):
     controlType = '1.2.840.113556.1.4.473'
 
     def __init__(
-        self,
-        criticality=False,
-        ordering_rules=None,
+            self,
+            criticality=False,
+            ordering_rules=None,
     ):
-        RequestControl.__init__(self,self.controlType,criticality)
+        RequestControl.__init__(self, self.controlType, criticality)
         self.ordering_rules = ordering_rules
         if isinstance(ordering_rules, basestring):
             ordering_rules = [ordering_rules]
@@ -87,33 +83,33 @@ class SSSRequestControl(RequestControl):
 
 class SortResultType(univ.Sequence):
     componentType = namedtype.NamedTypes(
-            namedtype.NamedType('sortResult', univ.Enumerated().subtype(
-                namedValues=namedval.NamedValues(
-                        ('success', 0),
-                        ('operationsError', 1),
-                        ('timeLimitExceeded', 3),
-                        ('strongAuthRequired', 8),
-                        ('adminLimitExceeded', 11),
-                        ('noSuchAttribute', 16),
-                        ('inappropriateMatching', 18),
-                        ('insufficientAccessRights', 50),
-                        ('busy', 51),
-                        ('unwillingToPerform', 53),
-                        ('other', 80)),
-                subtypeSpec=univ.Enumerated.subtypeSpec + constraint.SingleValueConstraint(
-                        0, 1, 3, 8, 11, 16, 18, 50, 51, 53, 80))),
-            namedtype.OptionalNamedType('attributeType',
-                  univ.OctetString().subtype(
-                    implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)
-                  )
-                ))
+        namedtype.NamedType('sortResult', univ.Enumerated().subtype(
+            namedValues=namedval.NamedValues(
+                ('success', 0),
+                ('operationsError', 1),
+                ('timeLimitExceeded', 3),
+                ('strongAuthRequired', 8),
+                ('adminLimitExceeded', 11),
+                ('noSuchAttribute', 16),
+                ('inappropriateMatching', 18),
+                ('insufficientAccessRights', 50),
+                ('busy', 51),
+                ('unwillingToPerform', 53),
+                ('other', 80)),
+            subtypeSpec=univ.Enumerated.subtypeSpec + constraint.SingleValueConstraint(
+                0, 1, 3, 8, 11, 16, 18, 50, 51, 53, 80))),
+        namedtype.OptionalNamedType('attributeType',
+                                    univ.OctetString().subtype(
+                                        implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)
+                                    )
+                                    ))
 
 
 class SSSResponseControl(ResponseControl):
     controlType = '1.2.840.113556.1.4.474'
 
-    def __init__(self,criticality=False):
-        ResponseControl.__init__(self,self.controlType,criticality)
+    def __init__(self, criticality=False):
+        ResponseControl.__init__(self, self.controlType, criticality)
 
     def decodeControlValue(self, encoded):
         p, rest = decoder.decode(encoded, asn1Spec=SortResultType())

+ 1 - 8
dirsrvtests/tests/suites/password/password_test.py

@@ -6,16 +6,9 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.topologies import topology_st
 

+ 0 - 9
dirsrvtests/tests/suites/password/pwdAdmin_test.py

@@ -6,16 +6,7 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 0 - 8
dirsrvtests/tests/suites/password/pwdPolicy_attribute_test.py

@@ -6,15 +6,7 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import time
-import subprocess
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 4 - 9
dirsrvtests/tests/suites/password/pwdPolicy_inherit_global_test.py

@@ -6,19 +6,14 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
+import logging
+import subprocess
 import time
+
 import ldap
-import logging
 import pytest
-import shutil
-import subprocess
-from lib389 import DirSrv, Entry, tools
-from lib389 import DirSrvTools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
 from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.INFO)

+ 1 - 6
dirsrvtests/tests/suites/password/pwdPolicy_syntax_test.py

@@ -6,14 +6,9 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.topologies import topology_st
 

+ 0 - 10
dirsrvtests/tests/suites/password/pwdPolicy_warning_test.py

@@ -6,18 +6,8 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-import subprocess
-from lib389 import DirSrv, Entry, tools, tasks
 from ldap.controls.ppolicy import PasswordPolicyControl
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 0 - 9
dirsrvtests/tests/suites/password/pwd_algo_test.py

@@ -6,16 +6,7 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 0 - 7
dirsrvtests/tests/suites/password/pwp_history_test.py

@@ -6,14 +6,7 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import ldap
-import logging
 import pytest
-import time
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_st

+ 2 - 11
dirsrvtests/tests/suites/replication/cleanallruv_test.py

@@ -6,18 +6,9 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
 import threading
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389.repltools import ReplTools
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
 from lib389.tasks import *
 from lib389.utils import *
 from lib389.topologies import topology_m4

+ 4 - 5
dirsrvtests/tests/suites/replication/tombstone_test.py

@@ -27,9 +27,9 @@ def test_purge_success(topology_st):
     log.info("Add and then delete an entry to create a tombstone...")
     try:
         topology_st.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', {
-                                  'objectclass': 'top person'.split(),
-                                  'sn': 'user',
-                                  'cn': 'entry1'})))
+            'objectclass': 'top person'.split(),
+            'sn': 'user',
+            'cn': 'entry1'})))
     except ldap.LDAPError as e:
         log.error('Failed to add entry: {}'.format(e.message['desc']))
         assert False
@@ -43,7 +43,7 @@ def test_purge_success(topology_st):
     log.info('Search for tombstone entries...')
     try:
         entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
-                                               '(objectclass=nsTombstone)')
+                                                  '(objectclass=nsTombstone)')
         assert entries
     except ldap.LDAPError as e:
         log.fatal('Search failed: {}'.format(e.message['desc']))
@@ -55,4 +55,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 2 - 10
dirsrvtests/tests/suites/replication/wait_for_async_feature_test.py

@@ -6,19 +6,11 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
+from collections import Counter
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
-from collections import Counter
 from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)

+ 33 - 39
dirsrvtests/tests/suites/rootdn_plugin/rootdn_plugin_test.py

@@ -6,18 +6,12 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
-import pytest
 import socket
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
 from lib389.tasks import *
+from lib389.tools import DirSrvTools
 from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
@@ -62,8 +56,8 @@ def test_rootdn_init(topology_st):
     #
     try:
         topology_st.standalone.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),
-                                 'uid': 'user1',
-                                 'userpassword': PASSWORD})))
+                                                       'uid': 'user1',
+                                                       'userpassword': PASSWORD})))
     except ldap.LDAPError as e:
         log.fatal('test_rootdn_init: Failed to add test user ' + USER1_DN + ': error ' +
                   e.message['desc'])
@@ -104,7 +98,7 @@ def test_rootdn_access_specific_time(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', open_time),
-                                  (ldap.MOD_ADD, 'rootdn-close-time', close_time)])
+                                                    (ldap.MOD_ADD, 'rootdn-close-time', close_time)])
     except ldap.LDAPError as e:
         log.fatal('test_rootdn_access_specific_time: Failed to set (blocking) open/close times: error ' +
                   e.message['desc'])
@@ -134,7 +128,7 @@ def test_rootdn_access_specific_time(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
-                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '2359')])
+                                                    (ldap.MOD_REPLACE, 'rootdn-close-time', '2359')])
     except ldap.LDAPError as e:
         log.fatal('test_rootdn_access_specific_time: Failed to set (open) open/close times: error ' +
                   e.message['desc'])
@@ -152,7 +146,7 @@ def test_rootdn_access_specific_time(topology_st):
     #
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_DELETE, 'rootdn-open-time', None),
-                                                 (ldap.MOD_DELETE, 'rootdn-close-time', None)])
+                                                    (ldap.MOD_DELETE, 'rootdn-close-time', None)])
     except ldap.LDAPError as e:
         log.fatal('test_rootdn_access_specific_time: Failed to delete open and close time: error ' +
                   e.message['desc'])
@@ -198,7 +192,7 @@ def test_rootdn_access_day_of_week(topology_st):
     #
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed',
-                                     deny_days)])
+                                                     deny_days)])
     except ldap.LDAPError as e:
         log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' +
                   e.message['desc'])
@@ -228,7 +222,7 @@ def test_rootdn_access_day_of_week(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-days-allowed',
-                                     allow_days)])
+                                                     allow_days)])
     except ldap.LDAPError as e:
         log.fatal('test_rootdn_access_day_of_week: Failed to set the deny days: error ' +
                   e.message['desc'])
@@ -270,11 +264,11 @@ def test_rootdn_access_denied_ip(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
-                                                  'rootdn-deny-ip',
-                                                  '127.0.0.1'),
-                                                 (ldap.MOD_ADD,
-                                                  'rootdn-deny-ip',
-                                                  '::1')])
+                                                     'rootdn-deny-ip',
+                                                     '127.0.0.1'),
+                                                    (ldap.MOD_ADD,
+                                                     'rootdn-deny-ip',
+                                                     '::1')])
     except ldap.LDAPError as e:
         log.fatal('test_rootdn_access_denied_ip: Failed to set rootDN plugin config: error ' +
                   e.message['desc'])
@@ -346,11 +340,11 @@ def test_rootdn_access_denied_host(topology_st):
     localhost = DirSrvTools.getLocalhost()
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
-                                                  'rootdn-deny-host',
-                                                  hostname)])
+                                                     'rootdn-deny-host',
+                                                     hostname)])
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
-                                                  'rootdn-deny-host',
-                                                  localhost)])
+                                                     'rootdn-deny-host',
+                                                     localhost)])
     except ldap.LDAPError as e:
         log.fatal('test_rootdn_access_denied_host: Failed to set deny host: error ' +
                   e.message['desc'])
@@ -453,7 +447,7 @@ def test_rootdn_access_allowed_ip(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-allow-ip', '127.0.0.1'),
-                                  (ldap.MOD_ADD, 'rootdn-allow-ip', '::1')])
+                                                    (ldap.MOD_ADD, 'rootdn-allow-ip', '::1')])
     except ldap.LDAPError as e:
         log.fatal('test_rootdn_access_allowed_ip: Failed to set allowed host: error ' +
                   e.message['desc'])
@@ -529,11 +523,11 @@ def test_rootdn_access_allowed_host(topology_st):
     localhost = DirSrvTools.getLocalhost()
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
-                                                  'rootdn-allow-host',
-                                                  localhost)])
+                                                     'rootdn-allow-host',
+                                                     localhost)])
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD,
-                                                  'rootdn-allow-host',
-                                                  hostname)])
+                                                     'rootdn-allow-host',
+                                                     hostname)])
     except ldap.LDAPError as e:
         log.fatal('test_rootdn_access_allowed_host: Failed to set allowed host: error ' +
                   e.message['desc'])
@@ -590,7 +584,7 @@ def test_rootdn_config_validate(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-open-time', '0000'),
-                                  (ldap.MOD_ADD, 'rootdn-open-time', '0001')])
+                                                    (ldap.MOD_ADD, 'rootdn-open-time', '0001')])
         log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"')
         assert False
     except ldap.LDAPError:
@@ -598,7 +592,7 @@ def test_rootdn_config_validate(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '-1'),
-                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
+                                                    (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
         log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: -1"')
         assert False
     except ldap.LDAPError:
@@ -606,7 +600,7 @@ def test_rootdn_config_validate(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '2400'),
-                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
+                                                    (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
         log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: 2400"')
         assert False
     except ldap.LDAPError:
@@ -614,7 +608,7 @@ def test_rootdn_config_validate(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', 'aaaaa'),
-                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
+                                                    (ldap.MOD_REPLACE, 'rootdn-close-time', '0000')])
         log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-open-time: aaaaa"')
         assert False
     except ldap.LDAPError:
@@ -632,7 +626,7 @@ def test_rootdn_config_validate(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-close-time', '0000'),
-                                  (ldap.MOD_ADD, 'rootdn-close-time', '0001')])
+                                                    (ldap.MOD_ADD, 'rootdn-close-time', '0001')])
         log.fatal('test_rootdn_config_validate: Incorrectly allowed to add multiple "rootdn-open-time"')
         assert False
     except ldap.LDAPError:
@@ -640,7 +634,7 @@ def test_rootdn_config_validate(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
-                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '-1')])
+                                                    (ldap.MOD_REPLACE, 'rootdn-close-time', '-1')])
         log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: -1"')
         assert False
     except ldap.LDAPError:
@@ -648,7 +642,7 @@ def test_rootdn_config_validate(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
-                                  (ldap.MOD_REPLACE, 'rootdn-close-time', '2400')])
+                                                    (ldap.MOD_REPLACE, 'rootdn-close-time', '2400')])
         log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: 2400"')
         assert False
     except ldap.LDAPError:
@@ -656,7 +650,7 @@ def test_rootdn_config_validate(topology_st):
 
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'rootdn-open-time', '0000'),
-                                  (ldap.MOD_REPLACE, 'rootdn-close-time', 'aaaaa')])
+                                                    (ldap.MOD_REPLACE, 'rootdn-close-time', 'aaaaa')])
         log.fatal('test_rootdn_config_validate: Incorrectly allowed to add invalid "rootdn-close-time: aaaaa"')
         assert False
     except ldap.LDAPError:
@@ -667,7 +661,7 @@ def test_rootdn_config_validate(topology_st):
     #
     try:
         topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_ADD, 'rootdn-days-allowed', 'Mon'),
-                                  (ldap.MOD_ADD, 'rootdn-days-allowed', 'Tue')])
+                                                    (ldap.MOD_ADD, 'rootdn-days-allowed', 'Tue')])
         log.fatal('test_rootdn_config_validate: Incorrectly allowed to add two "rootdn-days-allowed"')
         assert False
     except ldap.LDAPError:

+ 5 - 10
dirsrvtests/tests/suites/schema/test_schema.py

@@ -11,19 +11,14 @@ Created on Dec 18, 2013
 
 @author: rmeggins
 '''
-import os
-import sys
-import time
+import logging
+
 import ldap
+import pytest
 import six
 from ldap.cidict import cidict
 from ldap.schema import SubSchema
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
 from lib389._constants import *
-from lib389.properties import *
 from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
@@ -49,7 +44,7 @@ def ochasattr(subschema, oc, mustormay, attr, key):
         # look in parents
         for noroid in oc.sup:
             ocpar = subschema.get_obj(occlass, noroid)
-            assert(ocpar)
+            assert (ocpar)
             rc = ochasattr(subschema, ocpar, mustormay, attr, key)
             if rc:
                 break
@@ -101,7 +96,7 @@ def atgetparfield(subschema, at, field):
     v = None
     for nameoroid in at.sup:
         atpar = subschema.get_obj(attrclass, nameoroid)
-        assert(atpar)
+        assert (atpar)
         v = atpar.__dict__.get(field, atgetparfield(subschema, atpar, field))
         if v is not None:
             break

+ 0 - 52
dirsrvtests/tests/tickets/finalizer.py

@@ -1,52 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details. 
-# --- END COPYRIGHT BLOCK ---
-#
-'''
-Created on Nov 5, 2013
-
-@author: tbordaz
-'''
-import os
-import sys
-import time
-import ldap
-import logging
-import socket
-import time
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
-from lib389._constants import DN_DM
-from lib389.properties import *
-
-log = logging.getLogger(__name__)
-
-def test_finalizer():
-    # for each defined instance, remove it
-    for args_instance in ALL_INSTANCES:
-        instance = DirSrv(verbose=True)
-        instance.allocate(args_instance)
-        if instance.exists():
-            instance.delete()
-
-        # remove any existing backup for this instance
-        instance.clearBackupFS()
-
-def run_isolated():
-    '''
-        run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
-        To run isolated without py.test, you need to
-            - set the installation prefix
-            - run this program
-    '''
-    test_finalizer()
-
-if __name__ == '__main__':
-    run_isolated()
-

+ 102 - 125
dirsrvtests/tests/tickets/ticket1347760_test.py

@@ -6,24 +6,17 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import time
-import ldap
-import logging
-import pytest
 from subprocess import Popen
-from lib389 import DirSrv, Entry
+
+import pytest
 from lib389.paths import Paths
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
 CONFIG_DN = 'cn=config'
 BOU = 'BOU'
 BINDOU = 'ou=%s,%s' % (BOU, DEFAULT_SUFFIX)
@@ -40,43 +33,6 @@ GROUPOU = 'ou=groups,%s' % DEFAULT_SUFFIX
 BOGUSOU = 'ou=OU,%s' % DEFAULT_SUFFIX
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Delete each instance in the end
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Clear out the tmp dir
-    standalone.clearTmpDir(__file__)
-
-    return TopologyStandalone(standalone)
-
-
 def pattern_accesslog(file, log_pattern):
     for i in range(5):
         try:
@@ -186,49 +142,49 @@ def check_op_result(server, op, dn, superior, exists, rc):
     log.info('PASSED\n')
 
 
-def test_ticket1347760(topology):
+def test_ticket1347760(topology_st):
     """
     Prevent revealing the entry info to whom has no access rights.
     """
     log.info('Testing Bug 1347760 - Information disclosure via repeated use of LDAP ADD operation, etc.')
 
     log.info('Disabling accesslog logbuffering')
-    topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-accesslog-logbuffering', 'off')])
+    topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-accesslog-logbuffering', 'off')])
 
     log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD))
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
     log.info('Adding ou=%s a bind user belongs to.' % BOU)
-    topology.standalone.add_s(Entry((BINDOU, {
-                              'objectclass': 'top organizationalunit'.split(),
-                              'ou': BOU})))
+    topology_st.standalone.add_s(Entry((BINDOU, {
+        'objectclass': 'top organizationalunit'.split(),
+        'ou': BOU})))
 
     log.info('Adding a bind user.')
-    topology.standalone.add_s(Entry((BINDDN,
-                                     {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                      'cn': 'bind user',
-                                      'sn': 'user',
-                                      'userPassword': BINDPW})))
+    topology_st.standalone.add_s(Entry((BINDDN,
+                                        {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                         'cn': 'bind user',
+                                         'sn': 'user',
+                                         'userPassword': BINDPW})))
 
     log.info('Adding a test user.')
-    topology.standalone.add_s(Entry((TESTDN,
-                                     {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                      'cn': 'test user',
-                                      'sn': 'user',
-                                      'userPassword': TESTPW})))
+    topology_st.standalone.add_s(Entry((TESTDN,
+                                        {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                         'cn': 'test user',
+                                         'sn': 'user',
+                                         'userPassword': TESTPW})))
 
     log.info('Deleting aci in %s.' % DEFAULT_SUFFIX)
-    topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)])
+    topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', None)])
 
     log.info('While binding as DM, acquire an access log path')
-    ds_paths = Paths(serverid=topology.standalone.serverid,
-                     instance=topology.standalone)
+    ds_paths = Paths(serverid=topology_st.standalone.serverid,
+                     instance=topology_st.standalone)
     file_path = ds_paths.access_log
 
     log.info('Bind case 1. the bind user has no rights to read the entry itself, bind should be successful.')
     log.info('Bind as {%s,%s} who has no access rights.' % (BINDDN, BINDPW))
     try:
-        topology.standalone.simple_bind_s(BINDDN, BINDPW)
+        topology_st.standalone.simple_bind_s(BINDDN, BINDPW)
     except ldap.LDAPError as e:
         log.info('Desc ' + e.message['desc'])
         assert False
@@ -236,10 +192,11 @@ def test_ticket1347760(topology):
     file_obj = open(file_path, "r")
     log.info('Access log path: %s' % file_path)
 
-    log.info('Bind case 2-1. the bind user does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
+    log.info(
+        'Bind case 2-1. the bind user does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
     log.info('Bind as {%s,%s} who does not exist.' % (BOGUSDN, 'bogus'))
     try:
-        topology.standalone.simple_bind_s(BOGUSDN, 'bogus')
+        topology_st.standalone.simple_bind_s(BOGUSDN, 'bogus')
     except ldap.LDAPError as e:
         log.info("Exception (expected): %s" % type(e).__name__)
         log.info('Desc ' + e.message['desc'])
@@ -253,10 +210,11 @@ def test_ticket1347760(topology):
             log.info('Cause found - %s' % cause)
     time.sleep(1)
 
-    log.info('Bind case 2-2. the bind user\'s suffix does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
+    log.info(
+        'Bind case 2-2. the bind user\'s suffix does not exist, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
     log.info('Bind as {%s,%s} who does not exist.' % (BOGUSSUFFIX, 'bogus'))
     try:
-        topology.standalone.simple_bind_s(BOGUSSUFFIX, 'bogus')
+        topology_st.standalone.simple_bind_s(BOGUSSUFFIX, 'bogus')
     except ldap.LDAPError as e:
         log.info("Exception (expected): %s" % type(e).__name__)
         log.info('Desc ' + e.message['desc'])
@@ -270,10 +228,11 @@ def test_ticket1347760(topology):
             log.info('Cause found - %s' % cause)
     time.sleep(1)
 
-    log.info('Bind case 2-3. the bind user\'s password is wrong, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
+    log.info(
+        'Bind case 2-3. the bind user\'s password is wrong, bind should fail with error %s' % ldap.INVALID_CREDENTIALS.__name__)
     log.info('Bind as {%s,%s} who does not exist.' % (BINDDN, 'bogus'))
     try:
-        topology.standalone.simple_bind_s(BINDDN, 'bogus')
+        topology_st.standalone.simple_bind_s(BINDDN, 'bogus')
     except ldap.LDAPError as e:
         log.info("Exception (expected): %s" % type(e).__name__)
         log.info('Desc ' + e.message['desc'])
@@ -291,121 +250,139 @@ def test_ticket1347760(topology):
     acival = '(targetattr="*")(version 3.0; acl "%s"; allow(all) userdn = "ldap:///%s";)' % (BUID, BINDDN)
     log.info('aci: %s' % acival)
     log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD))
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(BINDOU, [(ldap.MOD_ADD, 'aci', acival)])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(BINDOU, [(ldap.MOD_ADD, 'aci', acival)])
     time.sleep(1)
 
     log.info('Bind case 3. the bind user has the right to read the entry itself, bind should be successful.')
     log.info('Bind as {%s,%s} which should be ok.\n' % (BINDDN, BINDPW))
-    topology.standalone.simple_bind_s(BINDDN, BINDPW)
+    topology_st.standalone.simple_bind_s(BINDDN, BINDPW)
 
     log.info('The following operations are against the subtree the bind user %s has no rights.' % BINDDN)
     # Search
     exists = True
     rc = ldap.SUCCESS
-    log.info('Search case 1. the bind user has no rights to read the search entry, it should return no search results with %s' % rc)
-    check_op_result(topology.standalone, 'search', TESTDN, None, exists, rc)
+    log.info(
+        'Search case 1. the bind user has no rights to read the search entry, it should return no search results with %s' % rc)
+    check_op_result(topology_st.standalone, 'search', TESTDN, None, exists, rc)
 
     exists = False
     rc = ldap.SUCCESS
-    log.info('Search case 2-1. the search entry does not exist, the search should return no search results with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'search', BOGUSDN, None, exists, rc)
+    log.info(
+        'Search case 2-1. the search entry does not exist, the search should return no search results with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'search', BOGUSDN, None, exists, rc)
 
     exists = False
     rc = ldap.SUCCESS
-    log.info('Search case 2-2. the search entry does not exist, the search should return no search results with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'search', BOGUSDN2, None, exists, rc)
+    log.info(
+        'Search case 2-2. the search entry does not exist, the search should return no search results with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'search', BOGUSDN2, None, exists, rc)
 
     # Add
     exists = True
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Add case 1. the bind user has no rights AND the adding entry exists, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'add', TESTDN, None, exists, rc)
+    log.info(
+        'Add case 1. the bind user has no rights AND the adding entry exists, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'add', TESTDN, None, exists, rc)
 
     exists = False
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Add case 2-1. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'add', BOGUSDN, None, exists, rc)
+    log.info(
+        'Add case 2-1. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'add', BOGUSDN, None, exists, rc)
 
     exists = False
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Add case 2-2. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'add', BOGUSDN2, None, exists, rc)
+    log.info(
+        'Add case 2-2. the bind user has no rights AND the adding entry does not exist, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'add', BOGUSDN2, None, exists, rc)
 
     # Modify
     exists = True
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Modify case 1. the bind user has no rights AND the modifying entry exists, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modify', TESTDN, None, exists, rc)
+    log.info(
+        'Modify case 1. the bind user has no rights AND the modifying entry exists, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'modify', TESTDN, None, exists, rc)
 
     exists = False
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Modify case 2-1. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modify', BOGUSDN, None, exists, rc)
+    log.info(
+        'Modify case 2-1. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'modify', BOGUSDN, None, exists, rc)
 
     exists = False
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Modify case 2-2. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modify', BOGUSDN2, None, exists, rc)
+    log.info(
+        'Modify case 2-2. the bind user has no rights AND the modifying entry does not exist, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'modify', BOGUSDN2, None, exists, rc)
 
     # Modrdn
     exists = True
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Modrdn case 1. the bind user has no rights AND the renaming entry exists, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modrdn', TESTDN, None, exists, rc)
+    log.info(
+        'Modrdn case 1. the bind user has no rights AND the renaming entry exists, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'modrdn', TESTDN, None, exists, rc)
 
     exists = False
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Modrdn case 2-1. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modrdn', BOGUSDN, None, exists, rc)
+    log.info(
+        'Modrdn case 2-1. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'modrdn', BOGUSDN, None, exists, rc)
 
     exists = False
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Modrdn case 2-2. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modrdn', BOGUSDN2, None, exists, rc)
+    log.info(
+        'Modrdn case 2-2. the bind user has no rights AND the renaming entry does not exist, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'modrdn', BOGUSDN2, None, exists, rc)
 
     exists = True
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Modrdn case 3. the bind user has no rights AND the node moving an entry to exists, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modrdn', TESTDN, GROUPOU, exists, rc)
+    log.info(
+        'Modrdn case 3. the bind user has no rights AND the node moving an entry to exists, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'modrdn', TESTDN, GROUPOU, exists, rc)
 
     exists = False
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Modrdn case 4-1. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
+    log.info(
+        'Modrdn case 4-1. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
 
     exists = False
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Modrdn case 4-2. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
+    log.info(
+        'Modrdn case 4-2. the bind user has no rights AND the node moving an entry to does not, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
 
     # Delete
     exists = True
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Delete case 1. the bind user has no rights AND the deleting entry exists, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'delete', TESTDN, None, exists, rc)
+    log.info(
+        'Delete case 1. the bind user has no rights AND the deleting entry exists, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'delete', TESTDN, None, exists, rc)
 
     exists = False
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Delete case 2-1. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'delete', BOGUSDN, None, exists, rc)
+    log.info(
+        'Delete case 2-1. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'delete', BOGUSDN, None, exists, rc)
 
     exists = False
     rc = ldap.INSUFFICIENT_ACCESS
-    log.info('Delete case 2-2. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'delete', BOGUSDN2, None, exists, rc)
+    log.info(
+        'Delete case 2-2. the bind user has no rights AND the deleting entry does not exist, it should fail with %s' % rc.__name__)
+    check_op_result(topology_st.standalone, 'delete', BOGUSDN2, None, exists, rc)
 
     log.info('EXTRA: Check no regressions')
     log.info('Adding aci for %s to %s.' % (BINDDN, DEFAULT_SUFFIX))
     acival = '(targetattr="*")(version 3.0; acl "%s-all"; allow(all) userdn = "ldap:///%s";)' % (BUID, BINDDN)
     log.info('Bind as {%s,%s}' % (DN_DM, PASSWORD))
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', acival)])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', acival)])
     time.sleep(1)
 
     log.info('Bind as {%s,%s}.' % (BINDDN, BINDPW))
     try:
-        topology.standalone.simple_bind_s(BINDDN, BINDPW)
+        topology_st.standalone.simple_bind_s(BINDDN, BINDPW)
     except ldap.LDAPError as e:
         log.info('Desc ' + e.message['desc'])
         assert False
@@ -414,42 +391,42 @@ def test_ticket1347760(topology):
     exists = False
     rc = ldap.NO_SUCH_OBJECT
     log.info('Search case. the search entry does not exist, the search should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'search', BOGUSDN2, None, exists, rc)
+    check_op_result(topology_st.standalone, 'search', BOGUSDN2, None, exists, rc)
     file_obj.close()
 
     exists = True
     rc = ldap.ALREADY_EXISTS
     log.info('Add case. the adding entry already exists, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'add', TESTDN, None, exists, rc)
+    check_op_result(topology_st.standalone, 'add', TESTDN, None, exists, rc)
 
     exists = False
     rc = ldap.NO_SUCH_OBJECT
     log.info('Modify case. the modifying entry does not exist, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modify', BOGUSDN, None, exists, rc)
+    check_op_result(topology_st.standalone, 'modify', BOGUSDN, None, exists, rc)
 
     exists = False
     rc = ldap.NO_SUCH_OBJECT
     log.info('Modrdn case 1. the renaming entry does not exist, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modrdn', BOGUSDN, None, exists, rc)
+    check_op_result(topology_st.standalone, 'modrdn', BOGUSDN, None, exists, rc)
 
     exists = False
     rc = ldap.NO_SUCH_OBJECT
     log.info('Modrdn case 2. the node moving an entry to does not, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
+    check_op_result(topology_st.standalone, 'modrdn', TESTDN, BOGUSOU, exists, rc)
 
     exists = False
     rc = ldap.NO_SUCH_OBJECT
     log.info('Delete case. the deleting entry does not exist, it should fail with %s' % rc.__name__)
-    check_op_result(topology.standalone, 'delete', BOGUSDN, None, exists, rc)
+    check_op_result(topology_st.standalone, 'delete', BOGUSDN, None, exists, rc)
 
     log.info('Inactivate %s' % BINDDN)
-    nsinactivate = '%s/sbin/ns-inactivate.pl' % topology.standalone.prefix
+    nsinactivate = '%s/sbin/ns-inactivate.pl' % topology_st.standalone.prefix
     p = Popen([nsinactivate, '-Z', 'standalone', '-D', DN_DM, '-w', PASSWORD, '-I', BINDDN])
-    assert(p.wait() == 0)
+    assert (p.wait() == 0)
 
     log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, BUID, ldap.UNWILLING_TO_PERFORM.__name__))
     try:
-        topology.standalone.simple_bind_s(BINDDN, BUID)
+        topology_st.standalone.simple_bind_s(BINDDN, BUID)
     except ldap.LDAPError as e:
         log.info("Exception (expected): %s" % type(e).__name__)
         log.info('Desc ' + e.message['desc'])
@@ -457,7 +434,7 @@ def test_ticket1347760(topology):
 
     log.info('Bind as {%s,%s} which should fail with %s.' % (BINDDN, 'bogus', ldap.INVALID_CREDENTIALS.__name__))
     try:
-        topology.standalone.simple_bind_s(BINDDN, 'bogus')
+        topology_st.standalone.simple_bind_s(BINDDN, 'bogus')
     except ldap.LDAPError as e:
         log.info("Exception (expected): %s" % type(e).__name__)
         log.info('Desc ' + e.message['desc'])

+ 27 - 67
dirsrvtests/tests/tickets/ticket365_test.py

@@ -7,56 +7,17 @@
 # --- END COPYRIGHT BLOCK ---
 #
 
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
 
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Delete each instance in the end
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket365(topology):
+def test_ticket365(topology_st):
     '''
     Write your testcase here...
 
@@ -75,11 +36,11 @@ def test_ticket365(topology):
     # Add the test entry
     #
     try:
-        topology.standalone.add_s(Entry((USER_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'test_entry',
-                          'userpassword': 'password'
-                          })))
+        topology_st.standalone.add_s(Entry((USER_DN, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'test_entry',
+            'userpassword': 'password'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add test user: error ' + e.message['desc'])
         assert False
@@ -88,16 +49,16 @@ def test_ticket365(topology):
     # Enable the audit log
     #
     try:
-        topology.standalone.modify_s(DN_CONFIG,
-                                     [(ldap.MOD_REPLACE,
-                                       'nsslapd-auditlog-logging-enabled',
-                                       'on')])
+        topology_st.standalone.modify_s(DN_CONFIG,
+                                        [(ldap.MOD_REPLACE,
+                                          'nsslapd-auditlog-logging-enabled',
+                                          'on')])
     except ldap.LDAPError as e:
         log.fatal('Failed to enable audit log, error: ' + e.message['desc'])
         assert False
     '''
     try:
-        ent = topology.standalone.getEntry(DN_CONFIG, attrlist=[
+        ent = topology_st.standalone.getEntry(DN_CONFIG, attrlist=[
                     'nsslapd-instancedir',
                     'nsslapd-errorlog',
                     'nsslapd-accesslog',
@@ -108,8 +69,8 @@ def test_ticket365(topology):
     # Allow the unhashed password to be written to audit log
     #
     try:
-        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
-            'nsslapd-auditlog-logging-hide-unhashed-pw', 'off')])
+        topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+                                                     'nsslapd-auditlog-logging-hide-unhashed-pw', 'off')])
     except ldap.LDAPError as e:
         log.fatal('Failed to enable writing unhashed password to audit log, ' +
                   'error: ' + e.message['desc'])
@@ -119,9 +80,9 @@ def test_ticket365(topology):
     # Set new password, and check the audit log
     #
     try:
-        topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
-                                                'userpassword',
-                                                'mypassword')])
+        topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+                                                   'userpassword',
+                                                   'mypassword')])
     except ldap.LDAPError as e:
         log.fatal('Failed to enable writing unhashed password to audit log, ' +
                   'error: ' + e.message['desc'])
@@ -129,7 +90,7 @@ def test_ticket365(topology):
 
     # Check audit log
     time.sleep(1)
-    if not topology.standalone.searchAuditLog('unhashed#user#password: mypassword'):
+    if not topology_st.standalone.searchAuditLog('unhashed#user#password: mypassword'):
         log.fatal('failed to find unhashed password in auditlog')
         assert False
 
@@ -137,10 +98,10 @@ def test_ticket365(topology):
     # Hide unhashed password in audit log
     #
     try:
-        topology.standalone.modify_s(DN_CONFIG,
-            [(ldap.MOD_REPLACE,
-              'nsslapd-auditlog-logging-hide-unhashed-pw',
-              'on')])
+        topology_st.standalone.modify_s(DN_CONFIG,
+                                        [(ldap.MOD_REPLACE,
+                                          'nsslapd-auditlog-logging-hide-unhashed-pw',
+                                          'on')])
     except ldap.LDAPError as e:
         log.fatal('Failed to deny writing unhashed password to audit log, ' +
                   'error: ' + e.message['desc'])
@@ -151,9 +112,9 @@ def test_ticket365(topology):
     # Modify password, and check the audit log
     #
     try:
-        topology.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
-                                                'userpassword',
-                                                'hidepassword')])
+        topology_st.standalone.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+                                                   'userpassword',
+                                                   'hidepassword')])
     except ldap.LDAPError as e:
         log.fatal('Failed to enable writing unhashed password to audit log, ' +
                   'error: ' + e.message['desc'])
@@ -161,7 +122,7 @@ def test_ticket365(topology):
 
     # Check audit log
     time.sleep(1)
-    if topology.standalone.searchAuditLog('unhashed#user#password: hidepassword'):
+    if topology_st.standalone.searchAuditLog('unhashed#user#password: hidepassword'):
         log.fatal('Found unhashed password in auditlog')
         assert False
 
@@ -173,4 +134,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 22 - 75
dirsrvtests/tests/tickets/ticket397_test.py

@@ -1,17 +1,9 @@
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
 USER_DN = 'uid=user,ou=People,%s' % DEFAULT_SUFFIX
 
 if DEBUGGING:
@@ -19,55 +11,9 @@ if DEBUGGING:
 else:
     logging.getLogger(__name__).setLevel(logging.INFO)
 
-
 log = logging.getLogger(__name__)
 
 
-class TopologyStandalone(object):
-    """The DS Topology Class"""
-    def __init__(self, standalone):
-        """Init"""
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    """Create DS Deployment"""
-
-    # Creating standalone instance ...
-    if DEBUGGING:
-        standalone = DirSrv(verbose=True)
-    else:
-        standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        """If we are debugging just stop the instances, otherwise remove
-        them
-        """
-        if DEBUGGING:
-            standalone.stop()
-        else:
-            standalone.delete()
-
-    request.addfinalizer(fin)
-
-    # Clear out the tmp dir
-    standalone.clearTmpDir(__file__)
-
-    return TopologyStandalone(standalone)
-
 def _test_bind(inst, password):
     result = True
     userconn = ldap.initialize("ldap://%s:%s" % (HOST_STANDALONE, PORT_STANDALONE))
@@ -78,6 +24,7 @@ def _test_bind(inst, password):
         result = False
     return result
 
+
 def _test_algo(inst, algo_name):
     inst.config.set('passwordStorageScheme', algo_name)
 
@@ -86,14 +33,14 @@ def _test_algo(inst, algo_name):
 
     # Create the user with a password
     inst.add_s(Entry((
-                USER_DN, {
-                    'objectClass': 'top account simplesecurityobject'.split(),
-                     'uid': 'user',
-                     'userpassword': ['Secret123', ]
-                })))
+        USER_DN, {
+            'objectClass': 'top account simplesecurityobject'.split(),
+            'uid': 'user',
+            'userpassword': ['Secret123', ]
+        })))
 
     # Make sure when we read the userPassword field, it is the correct ALGO
-    pw_field = inst.search_s(USER_DN, ldap.SCOPE_BASE, '(objectClass=*)', ['userPassword']  )[0]
+    pw_field = inst.search_s(USER_DN, ldap.SCOPE_BASE, '(objectClass=*)', ['userPassword'])[0]
 
     if DEBUGGING:
         print(pw_field.getValue('userPassword'))
@@ -101,29 +48,30 @@ def _test_algo(inst, algo_name):
     if algo_name != 'CLEAR':
         lalgo_name = algo_name.lower()
         lpw_algo_name = pw_field.getValue('userPassword').lower()
-        assert(lpw_algo_name.startswith("{%s}" % lalgo_name))
+        assert (lpw_algo_name.startswith("{%s}" % lalgo_name))
     # Now make sure a bind works
-    assert(_test_bind(inst, 'Secret123'))
+    assert (_test_bind(inst, 'Secret123'))
     # Bind with a wrong shorter password, should fail
-    assert(not _test_bind(inst, 'Wrong'))
+    assert (not _test_bind(inst, 'Wrong'))
     # Bind with a wrong longer password, should fail
-    assert(not _test_bind(inst, 'This is even more wrong'))
+    assert (not _test_bind(inst, 'This is even more wrong'))
     # Bind with a password that has the algo in the name
-    assert(not _test_bind(inst, '{%s}SomeValues....' % algo_name))
+    assert (not _test_bind(inst, '{%s}SomeValues....' % algo_name))
     # Bind with a wrong exact length password.
-    assert(not _test_bind(inst, 'Alsowrong'))
+    assert (not _test_bind(inst, 'Alsowrong'))
     # Bind with a subset password, should fail
-    assert(not _test_bind(inst, 'Secret'))
+    assert (not _test_bind(inst, 'Secret'))
     if algo_name != 'CRYPT':
         # Bind with a subset password that is 1 char shorter, to detect off by 1 in clear
-        assert(not _test_bind(inst, 'Secret12'))
+        assert (not _test_bind(inst, 'Secret12'))
         # Bind with a superset password, should fail
-        assert(not _test_bind(inst, 'Secret123456'))
+        assert (not _test_bind(inst, 'Secret123456'))
     # Delete the user
     inst.delete_s(USER_DN)
     # done!
 
-def test_397(topology):
+
+def test_397(topology_st):
     """
     Assert that all of our password algorithms correctly PASS and FAIL varying
     password conditions.
@@ -136,9 +84,9 @@ def test_397(topology):
 
     # Merge this to the password suite in the future
 
-    for algo in ('PBKDF2_SHA256', ):
+    for algo in ('PBKDF2_SHA256',):
         for i in range(0, 10):
-            _test_algo(topology.standalone, algo)
+            _test_algo(topology_st.standalone, algo)
 
     log.info('Test PASSED')
 
@@ -148,4 +96,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 29 - 74
dirsrvtests/tests/tickets/ticket47313_test.py

@@ -6,64 +6,20 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
-import time
+
+import ldap
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
 ENTRY_NAME = 'test_entry'
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47313_run(topology):
+def test_ticket47313_run(topology_st):
     """
         It adds 2 test entries
         Search with filters including subtype and !
@@ -71,14 +27,14 @@ def test_ticket47313_run(topology):
     """
 
     # bind as directory manager
-    topology.standalone.log.info("Bind as %s" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.log.info("Bind as %s" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
     # enable filter error logging
-    #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')]
-    #topology.standalone.modify_s(DN_CONFIG, mod)
+    # mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')]
+    # topology_st.standalone.modify_s(DN_CONFIG, mod)
 
-    topology.standalone.log.info("\n\n######################### ADD ######################\n")
+    topology_st.standalone.log.info("\n\n######################### ADD ######################\n")
 
     # Prepare the entry with cn;fr & cn;en
     entry_name_fr = '%s fr' % (ENTRY_NAME)
@@ -101,44 +57,44 @@ def test_ticket47313_run(topology):
     entry_en_only.setValues('cn', entry_name_en_only)
     entry_en_only.setValues('cn;en', entry_name_en)
 
-    topology.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both))
-    topology.standalone.add_s(entry_both)
+    topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both))
+    topology_st.standalone.add_s(entry_both)
 
-    topology.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only))
-    topology.standalone.add_s(entry_en_only)
+    topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only))
+    topology_st.standalone.add_s(entry_en_only)
 
-    topology.standalone.log.info("\n\n######################### SEARCH ######################\n")
+    topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n")
 
     # filter: (&(cn=test_entry en only)(!(cn=test_entry fr)))
     myfilter = '(&(sn=%s)(!(cn=%s)))' % (entry_name_en_only, entry_name_fr)
-    topology.standalone.log.info("Try to search with filter %s" % myfilter)
-    ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
+    topology_st.standalone.log.info("Try to search with filter %s" % myfilter)
+    ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
     assert len(ents) == 1
     assert ents[0].sn == entry_name_en_only
-    topology.standalone.log.info("Found %s" % ents[0].dn)
+    topology_st.standalone.log.info("Found %s" % ents[0].dn)
 
     # filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr)))
     myfilter = '(&(sn=%s)(!(cn;fr=%s)))' % (entry_name_en_only, entry_name_fr)
-    topology.standalone.log.info("Try to search with filter %s" % myfilter)
-    ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
+    topology_st.standalone.log.info("Try to search with filter %s" % myfilter)
+    ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
     assert len(ents) == 1
     assert ents[0].sn == entry_name_en_only
-    topology.standalone.log.info("Found %s" % ents[0].dn)
+    topology_st.standalone.log.info("Found %s" % ents[0].dn)
 
     # filter: (&(cn=test_entry en only)(!(cn;en=test_entry en)))
     myfilter = '(&(sn=%s)(!(cn;en=%s)))' % (entry_name_en_only, entry_name_en)
-    topology.standalone.log.info("Try to search with filter %s" % myfilter)
-    ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
+    topology_st.standalone.log.info("Try to search with filter %s" % myfilter)
+    ents = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter)
     assert len(ents) == 0
-    topology.standalone.log.info("Found none")
+    topology_st.standalone.log.info("Found none")
 
-    topology.standalone.log.info("\n\n######################### DELETE ######################\n")
+    topology_st.standalone.log.info("\n\n######################### DELETE ######################\n")
 
-    topology.standalone.log.info("Try to delete  %s " % entry_dn_both)
-    topology.standalone.delete_s(entry_dn_both)
+    topology_st.standalone.log.info("Try to delete  %s " % entry_dn_both)
+    topology_st.standalone.delete_s(entry_dn_both)
 
-    topology.standalone.log.info("Try to delete  %s " % entry_dn_en_only)
-    topology.standalone.delete_s(entry_dn_en_only)
+    topology_st.standalone.log.info("Try to delete  %s " % entry_dn_en_only)
+    topology_st.standalone.delete_s(entry_dn_en_only)
 
     log.info('Testcase PASSED')
 
@@ -148,4 +104,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 17 - 63
dirsrvtests/tests/tickets/ticket47384_test.py

@@ -6,62 +6,16 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-import shutil
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
 
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Delete each instance in the end
-    def fin():
-        standalone.delete()
-        if os.geteuid() == 0:
-            os.system('setenforce 1')
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47384(topology):
+def test_ticket47384(topology_st):
     '''
     Test pluginpath validation: relative and absolute paths
 
@@ -76,29 +30,30 @@ def test_ticket47384(topology):
     os.system('setenforce 0')
 
     PLUGIN_DN = 'cn=%s,cn=plugins,cn=config' % PLUGIN_WHOAMI
-    tmp_dir = topology.standalone.get_tmp_dir()
-    plugin_dir = topology.standalone.get_plugin_dir()
+    tmp_dir = topology_st.standalone.get_tmp_dir()
+    plugin_dir = topology_st.standalone.get_plugin_dir()
 
     # Copy the library to our tmp directory
     try:
         shutil.copy('%s/libwhoami-plugin.so' % plugin_dir, tmp_dir)
     except IOError as e:
-        log.fatal('Failed to copy %s/libwhoami-plugin.so to the tmp directory %s, error: %s' % (plugin_dir, tmp_dir, e.strerror))
+        log.fatal('Failed to copy %s/libwhoami-plugin.so to the tmp directory %s, error: %s' % (
+        plugin_dir, tmp_dir, e.strerror))
         assert False
     try:
         shutil.copy('%s/libwhoami-plugin.la' % plugin_dir, tmp_dir)
     except IOError as e:
         log.warn('Failed to copy ' + plugin_dir +
                  '/libwhoami-plugin.la to the tmp directory, error: '
-                  + e.strerror)
+                 + e.strerror)
 
     #
     # Test adding valid plugin paths
     #
     # Try using the absolute path to the current library
     try:
-        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
-                                     'nsslapd-pluginPath', '%s/libwhoami-plugin' % plugin_dir)])
+        topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+                                                     'nsslapd-pluginPath', '%s/libwhoami-plugin' % plugin_dir)])
     except ldap.LDAPError as e:
         log.error('Failed to set valid plugin path (%s): error (%s)' %
                   ('%s/libwhoami-plugin' % plugin_dir, e.message['desc']))
@@ -106,8 +61,8 @@ def test_ticket47384(topology):
 
     # Try using new remote location
     try:
-        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
-                                     'nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir)])
+        topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+                                                     'nsslapd-pluginPath', '%s/libwhoami-plugin' % tmp_dir)])
     except ldap.LDAPError as e:
         log.error('Failed to set valid plugin path (%s): error (%s)' %
                   ('%s/libwhoami-plugin' % tmp_dir, e.message['desc']))
@@ -115,8 +70,8 @@ def test_ticket47384(topology):
 
     # Set plugin path back to the default
     try:
-        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
-                                     'nsslapd-pluginPath', 'libwhoami-plugin')])
+        topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+                                                     'nsslapd-pluginPath', 'libwhoami-plugin')])
     except ldap.LDAPError as e:
         log.error('Failed to set valid relative plugin path (%s): error (%s)' %
                   ('libwhoami-plugin' % tmp_dir, e.message['desc']))
@@ -126,8 +81,8 @@ def test_ticket47384(topology):
     # Test invalid path (no library present)
     #
     try:
-        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
-                                     'nsslapd-pluginPath', '/bin/libwhoami-plugin')])
+        topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+                                                     'nsslapd-pluginPath', '/bin/libwhoami-plugin')])
         # No exception?! This is an error
         log.error('Invalid plugin path was incorrectly accepted by the server!')
         assert False
@@ -142,8 +97,8 @@ def test_ticket47384(topology):
     # Test invalid relative path (no library present)
     #
     try:
-        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
-                                     'nsslapd-pluginPath', '../libwhoami-plugin')])
+        topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE,
+                                                     'nsslapd-pluginPath', '../libwhoami-plugin')])
         # No exception?! This is an error
         log.error('Invalid plugin path was incorrectly accepted by the server!')
         assert False
@@ -162,4 +117,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 45 - 86
dirsrvtests/tests/tickets/ticket47431_test.py

@@ -6,68 +6,27 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
 DN_7BITPLUGIN = "cn=7-bit check,%s" % DN_PLUGIN
 ATTRS = ["uid", "mail", "userpassword", ",", SUFFIX, None]
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47431_0(topology):
+def test_ticket47431_0(topology_st):
     '''
     Enable 7 bit plugin
     '''
     log.info("Ticket 47431 - 0: Enable 7bit plugin...")
-    topology.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK)
+    topology_st.standalone.plugins.enable(name=PLUGIN_7_BIT_CHECK)
 
 
-def test_ticket47431_1(topology):
+def test_ticket47431_1(topology_st):
     '''
     nsslapd-pluginarg0: uid
     nsslapd-pluginarg1: mail
@@ -85,38 +44,39 @@ def test_ticket47431_1(topology):
 
     log.debug('modify_s %s' % DN_7BITPLUGIN)
     try:
-        topology.standalone.modify_s(DN_7BITPLUGIN,
-                                     [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "mail"),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', "userpassword"),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', ","),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg4', SUFFIX)])
+        topology_st.standalone.modify_s(DN_7BITPLUGIN,
+                                        [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "mail"),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', "userpassword"),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', ","),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg4', SUFFIX)])
     except ValueError:
         log.error('modify failed: Some problem occured with a value that was provided')
         assert False
 
     arg2 = "nsslapd-pluginarg2: userpassword"
-    topology.standalone.stop(timeout=10)
-    dse_ldif = topology.standalone.confdir + '/dse.ldif'
+    topology_st.standalone.stop(timeout=10)
+    dse_ldif = topology_st.standalone.confdir + '/dse.ldif'
     os.system('mv %s %s.47431' % (dse_ldif, dse_ldif))
-    os.system('sed -e "s/\\(%s\\)/\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1/" %s.47431 > %s' % (arg2, dse_ldif, dse_ldif))
-    topology.standalone.start(timeout=10)
+    os.system(
+        'sed -e "s/\\(%s\\)/\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1\\n\\1/" %s.47431 > %s' % (
+        arg2, dse_ldif, dse_ldif))
+    topology_st.standalone.start(timeout=10)
 
-    cmdline = 'egrep -i "%s" %s' % (expected, topology.standalone.errlog)
+    cmdline = 'egrep -i "%s" %s' % (expected, topology_st.standalone.errlog)
     p = os.popen(cmdline, "r")
     line = p.readline()
     if line == "":
-        log.error('Expected error "%s" not logged in %s' % (expected, topology.standalone.errlog))
+        log.error('Expected error "%s" not logged in %s' % (expected, topology_st.standalone.errlog))
         assert False
     else:
         log.debug('line: %s' % line)
-        log.info('Expected error "%s" logged in %s' % (expected, topology.standalone.errlog))
-
+        log.info('Expected error "%s" logged in %s' % (expected, topology_st.standalone.errlog))
 
     log.info("Ticket 47431 - 1: done")
 
 
-def test_ticket47431_2(topology):
+def test_ticket47431_2(topology_st):
     '''
     nsslapd-pluginarg0: uid
     nsslapd-pluginarg0: mail
@@ -140,23 +100,23 @@ def test_ticket47431_2(topology):
     log.info("Ticket 47431 - 2: Check two values belonging to one arg is fixed...")
 
     try:
-        topology.standalone.modify_s(DN_7BITPLUGIN,
-                                     [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"),
-                                      (ldap.MOD_ADD, 'nsslapd-pluginarg0', "mail"),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "userpassword"),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', ","),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', SUFFIX),
-                                      (ldap.MOD_DELETE, 'nsslapd-pluginarg4', None)])
+        topology_st.standalone.modify_s(DN_7BITPLUGIN,
+                                        [(ldap.MOD_REPLACE, 'nsslapd-pluginarg0', "uid"),
+                                         (ldap.MOD_ADD, 'nsslapd-pluginarg0', "mail"),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "userpassword"),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg2', ","),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', SUFFIX),
+                                         (ldap.MOD_DELETE, 'nsslapd-pluginarg4', None)])
     except ValueError:
         log.error('modify failed: Some problem occured with a value that was provided')
         assert False
 
     # PLUGIN LOG LEVEL
-    topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
+    topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
 
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
-    cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology.standalone.errlog)
+    cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology_st.standalone.errlog)
     p = os.popen(cmdline, "r")
     i = 0
     while ATTRS[i]:
@@ -175,7 +135,7 @@ def test_ticket47431_2(topology):
     log.info("Ticket 47431 - 2: done")
 
 
-def test_ticket47431_3(topology):
+def test_ticket47431_3(topology_st):
     '''
     nsslapd-pluginarg1: uid
     nsslapd-pluginarg3: mail
@@ -199,27 +159,27 @@ def test_ticket47431_3(topology):
     log.info("Ticket 47431 - 3: Check missing args are fixed...")
 
     try:
-        topology.standalone.modify_s(DN_7BITPLUGIN,
-                                     [(ldap.MOD_DELETE, 'nsslapd-pluginarg0', None),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "uid"),
-                                      (ldap.MOD_DELETE, 'nsslapd-pluginarg2', None),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', "mail"),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg5', "userpassword"),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg7', ","),
-                                      (ldap.MOD_REPLACE, 'nsslapd-pluginarg9', SUFFIX)])
+        topology_st.standalone.modify_s(DN_7BITPLUGIN,
+                                        [(ldap.MOD_DELETE, 'nsslapd-pluginarg0', None),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', "uid"),
+                                         (ldap.MOD_DELETE, 'nsslapd-pluginarg2', None),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg3', "mail"),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg5', "userpassword"),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg7', ","),
+                                         (ldap.MOD_REPLACE, 'nsslapd-pluginarg9', SUFFIX)])
     except ValueError:
         log.error('modify failed: Some problem occured with a value that was provided')
         assert False
 
     # PLUGIN LOG LEVEL
-    topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
+    topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
 
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.47431' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
-    topology.standalone.start(timeout=10)
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.47431' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
+    topology_st.standalone.start(timeout=10)
 
-    cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology.standalone.errlog)
+    cmdline = 'egrep -i %s %s' % ("NS7bitAttr_Init", topology_st.standalone.errlog)
     p = os.popen(cmdline, "r")
     i = 0
     while ATTRS[i]:
@@ -242,4 +202,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 71 - 184
dirsrvtests/tests/tickets/ticket47462_test.py

@@ -6,25 +6,19 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
 from lib389.properties import *
+from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
 DES_PLUGIN = 'cn=DES,cn=Password Storage Schemes,cn=plugins,cn=config'
 AES_PLUGIN = 'cn=AES,cn=Password Storage Schemes,cn=plugins,cn=config'
 MMR_PLUGIN = 'cn=Multimaster Replication Plugin,cn=plugins,cn=config'
@@ -35,115 +29,7 @@ TEST_REPL_DN = 'cn=test repl,' + DEFAULT_SUFFIX
 DES2AES_TASK_DN = 'cn=convert,cn=des2aes,cn=tasks,cn=config'
 
 
-class TopologyMaster1Master2(object):
-    def __init__(self, master1, master2):
-        master1.open()
-        self.master1 = master1
-
-        master2.open()
-        self.master2 = master2
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to create a replicated topology for the 'module'.
-        The replicated topology is MASTER1 <-> Master2.
-    '''
-    global installation1_prefix
-    global installation2_prefix
-
-    # allocate master1 on a given deployement
-    master1 = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Args for the master1 instance
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_master = args_instance.copy()
-    master1.allocate(args_master)
-
-    # allocate master1 on a given deployement
-    master2 = DirSrv(verbose=False)
-    if installation2_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
-    # Args for the consumer instance
-    args_instance[SER_HOST] = HOST_MASTER_2
-    args_instance[SER_PORT] = PORT_MASTER_2
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
-    args_master = args_instance.copy()
-    master2.allocate(args_master)
-
-    # Get the status of the instance and restart it if it exists
-    instance_master1 = master1.exists()
-    instance_master2 = master2.exists()
-
-    # Remove all the instances
-    if instance_master1:
-        master1.delete()
-    if instance_master2:
-        master2.delete()
-
-    # Create the instances
-    master1.create()
-    master1.open()
-    master2.create()
-    master2.open()
-
-    #
-    # Now prepare the Master-Consumer topology
-    #
-    # First Enable replication
-    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
-    # Initialize the supplier->consumer
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    AGMT_DN = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-    master1.agreement
-    if not AGMT_DN:
-        log.fatal("Fail to create a replica agreement")
-        sys.exit(1)
-
-    log.debug("%s created" % AGMT_DN)
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    master2.agreement.create(suffix=DEFAULT_SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
-    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    master1.waitForReplInit(AGMT_DN)
-
-    # Check replication is working fine
-    if master1.testReplication(DEFAULT_SUFFIX, master2):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    # clear the tmp directory
-    master1.clearTmpDir(__file__)
-
-    def fin():
-        master1.delete()
-        master2.delete()
-    request.addfinalizer(fin)
-
-    return TopologyMaster1Master2(master1, master2)
-
-
-def test_ticket47462(topology):
+def test_ticket47462(topology_m2):
     """
         Test that AES properly replaces DES during an update/restart, and that
         replication also works correctly.
@@ -157,55 +43,55 @@ def test_ticket47462(topology):
     # Add an extra attribute to the DES plugin args
     #
     try:
-        topology.master1.modify_s(DES_PLUGIN,
-                      [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')])
+        topology_m2.ms["master1"].modify_s(DES_PLUGIN,
+                                           [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')])
     except ldap.LDAPError as e:
-            log.fatal('Failed to enable DES plugin, error: ' +
-                      e.message['desc'])
-            assert False
+        log.fatal('Failed to enable DES plugin, error: ' +
+                  e.message['desc'])
+        assert False
 
     try:
-        topology.master1.modify_s(DES_PLUGIN,
-                      [(ldap.MOD_ADD, 'nsslapd-pluginarg2', 'description')])
+        topology_m2.ms["master1"].modify_s(DES_PLUGIN,
+                                           [(ldap.MOD_ADD, 'nsslapd-pluginarg2', 'description')])
     except ldap.LDAPError as e:
-            log.fatal('Failed to reset DES plugin, error: ' +
-                      e.message['desc'])
-            assert False
+        log.fatal('Failed to reset DES plugin, error: ' +
+                  e.message['desc'])
+        assert False
 
     try:
-        topology.master1.modify_s(MMR_PLUGIN,
-                      [(ldap.MOD_DELETE,
-                        'nsslapd-plugin-depends-on-named',
-                        'AES')])
+        topology_m2.ms["master1"].modify_s(MMR_PLUGIN,
+                                           [(ldap.MOD_DELETE,
+                                             'nsslapd-plugin-depends-on-named',
+                                             'AES')])
 
     except ldap.NO_SUCH_ATTRIBUTE:
         pass
     except ldap.LDAPError as e:
-            log.fatal('Failed to reset MMR plugin, error: ' +
-                      e.message['desc'])
-            assert False
+        log.fatal('Failed to reset MMR plugin, error: ' +
+                  e.message['desc'])
+        assert False
 
     #
     # Delete the AES plugin
     #
     try:
-        topology.master1.delete_s(AES_PLUGIN)
+        topology_m2.ms["master1"].delete_s(AES_PLUGIN)
     except ldap.NO_SUCH_OBJECT:
         pass
     except ldap.LDAPError as e:
-            log.fatal('Failed to delete AES plugin, error: ' +
-                      e.message['desc'])
-            assert False
+        log.fatal('Failed to delete AES plugin, error: ' +
+                  e.message['desc'])
+        assert False
 
     # restart the server so we must use DES plugin
-    topology.master1.restart(timeout=10)
+    topology_m2.ms["master1"].restart(timeout=10)
 
     #
     # Get the agmt dn, and set the password
     #
     try:
-        entry = topology.master1.search_s('cn=config', ldap.SCOPE_SUBTREE,
-                                          'objectclass=nsDS5ReplicationAgreement')
+        entry = topology_m2.ms["master1"].search_s('cn=config', ldap.SCOPE_SUBTREE,
+                                                   'objectclass=nsDS5ReplicationAgreement')
         if entry:
             agmt_dn = entry[0].dn
             log.info('Found agmt dn (%s)' % agmt_dn)
@@ -219,8 +105,8 @@ def test_ticket47462(topology):
 
     try:
         properties = {RA_BINDPW: "password"}
-        topology.master1.agreement.setProperties(None, agmt_dn, None,
-                                                 properties)
+        topology_m2.ms["master1"].agreement.setProperties(None, agmt_dn, None,
+                                                          properties)
         log.info('Successfully modified replication agreement')
     except ValueError:
         log.error('Failed to update replica agreement: ' + AGMT_DN)
@@ -230,17 +116,17 @@ def test_ticket47462(topology):
     # Check replication works with the new DES password
     #
     try:
-        topology.master1.add_s(Entry((USER1_DN,
-                                      {'objectclass': "top person".split(),
-                                       'sn': 'sn',
-                                       'description': 'DES value to convert',
-                                       'cn': 'test_user'})))
+        topology_m2.ms["master1"].add_s(Entry((USER1_DN,
+                                               {'objectclass': "top person".split(),
+                                                'sn': 'sn',
+                                                'description': 'DES value to convert',
+                                                'cn': 'test_user'})))
         loop = 0
         ent = None
         while loop <= 10:
             try:
-                ent = topology.master2.getEntry(USER1_DN, ldap.SCOPE_BASE,
-                                                "(objectclass=*)")
+                ent = topology_m2.ms["master2"].getEntry(USER1_DN, ldap.SCOPE_BASE,
+                                                         "(objectclass=*)")
                 break
             except ldap.NO_SUCH_OBJECT:
                 time.sleep(1)
@@ -258,7 +144,7 @@ def test_ticket47462(topology):
     # Add a backend (that has no entries)
     #
     try:
-        topology.master1.backend.create("o=empty", {BACKEND_NAME: "empty"})
+        topology_m2.ms["master1"].backend.create("o=empty", {BACKEND_NAME: "empty"})
     except ldap.LDAPError as e:
         log.fatal('Failed to create extra/empty backend: ' + e.message['desc'])
         assert False
@@ -266,16 +152,16 @@ def test_ticket47462(topology):
     #
     # Run the upgrade...
     #
-    topology.master1.upgrade('online')
-    topology.master1.restart()
-    topology.master2.restart()
+    topology_m2.ms["master1"].upgrade('online')
+    topology_m2.ms["master1"].restart()
+    topology_m2.ms["master2"].restart()
 
     #
     # Check that the restart converted existing DES credentials
     #
     try:
-        entry = topology.master1.search_s('cn=config', ldap.SCOPE_SUBTREE,
-                                          'nsDS5ReplicaCredentials=*')
+        entry = topology_m2.ms["master1"].search_s('cn=config', ldap.SCOPE_SUBTREE,
+                                                   'nsDS5ReplicaCredentials=*')
         if entry:
             val = entry[0].getValue('nsDS5ReplicaCredentials')
             if val.startswith('{AES-'):
@@ -297,11 +183,11 @@ def test_ticket47462(topology):
     # all the attributes.
     #
     try:
-        entry = topology.master1.search_s(AES_PLUGIN, ldap.SCOPE_BASE,
-                                          'objectclass=*')
+        entry = topology_m2.ms["master1"].search_s(AES_PLUGIN, ldap.SCOPE_BASE,
+                                                   'objectclass=*')
         if not entry[0].hasValue('nsslapd-pluginarg0', 'description') and \
-           not entry[0].hasValue('nsslapd-pluginarg1', 'description') and \
-           not entry[0].hasValue('nsslapd-pluginarg2', 'description'):
+                not entry[0].hasValue('nsslapd-pluginarg1', 'description') and \
+                not entry[0].hasValue('nsslapd-pluginarg2', 'description'):
             log.fatal('The AES plugin did not have the DES attribute copied ' +
                       'over correctly')
             assert False
@@ -315,8 +201,8 @@ def test_ticket47462(topology):
     # Check that the MMR plugin was updated
     #
     try:
-        entry = topology.master1.search_s(MMR_PLUGIN, ldap.SCOPE_BASE,
-                                          'objectclass=*')
+        entry = topology_m2.ms["master1"].search_s(MMR_PLUGIN, ldap.SCOPE_BASE,
+                                                   'objectclass=*')
         if not entry[0].hasValue('nsslapd-plugin-depends-on-named', 'AES'):
             log.fatal('The MMR Plugin was not correctly updated')
             assert False
@@ -330,8 +216,8 @@ def test_ticket47462(topology):
     # Check that the DES plugin was correctly updated
     #
     try:
-        entry = topology.master1.search_s(DES_PLUGIN, ldap.SCOPE_BASE,
-                                          'objectclass=*')
+        entry = topology_m2.ms["master1"].search_s(DES_PLUGIN, ldap.SCOPE_BASE,
+                                                   'objectclass=*')
         if not entry[0].hasValue('nsslapd-pluginPath', 'libpbe-plugin'):
             log.fatal('The DES Plugin was not correctly updated')
             assert False
@@ -345,16 +231,16 @@ def test_ticket47462(topology):
     # Check replication one last time
     #
     try:
-        topology.master1.add_s(Entry((USER_DN,
-                                      {'objectclass': "top person".split(),
-                                       'sn': 'sn',
-                                       'cn': 'test_user'})))
+        topology_m2.ms["master1"].add_s(Entry((USER_DN,
+                                               {'objectclass': "top person".split(),
+                                                'sn': 'sn',
+                                                'cn': 'test_user'})))
         loop = 0
         ent = None
         while loop <= 10:
             try:
-                ent = topology.master2.getEntry(USER_DN, ldap.SCOPE_BASE,
-                                                "(objectclass=*)")
+                ent = topology_m2.ms["master2"].getEntry(USER_DN, ldap.SCOPE_BASE,
+                                                         "(objectclass=*)")
                 break
             except ldap.NO_SUCH_OBJECT:
                 time.sleep(1)
@@ -371,9 +257,9 @@ def test_ticket47462(topology):
     # Check the entry
     log.info('Entry before running task...')
     try:
-        entry = topology.master1.search_s(USER1_DN,
-                                          ldap.SCOPE_BASE,
-                                          'objectclass=*')
+        entry = topology_m2.ms["master1"].search_s(USER1_DN,
+                                                   ldap.SCOPE_BASE,
+                                                   'objectclass=*')
         if entry:
             print(str(entry))
         else:
@@ -388,27 +274,27 @@ def test_ticket47462(topology):
     # Test the DES2AES Task on USER1_DN
     #
     try:
-        topology.master1.add_s(Entry((DES2AES_TASK_DN,
-                                      {'objectclass': ['top',
-                                                       'extensibleObject'],
-                                       'suffix': DEFAULT_SUFFIX,
-                                       'cn': 'convert'})))
+        topology_m2.ms["master1"].add_s(Entry((DES2AES_TASK_DN,
+                                               {'objectclass': ['top',
+                                                                'extensibleObject'],
+                                                'suffix': DEFAULT_SUFFIX,
+                                                'cn': 'convert'})))
     except ldap.LDAPError as e:
         log.fatal('Failed to add task entry: ' + e.message['desc'])
         assert False
 
     # Wait for task
     task_entry = Entry(DES2AES_TASK_DN)
-    (done, exitCode) = topology.master1.tasks.checkTask(task_entry, True)
+    (done, exitCode) = topology_m2.ms["master1"].tasks.checkTask(task_entry, True)
     if exitCode:
         log.fatal("Error: des2aes task exited with %d" % (exitCode))
         assert False
 
     # Check the entry
     try:
-        entry = topology.master1.search_s(USER1_DN,
-                                          ldap.SCOPE_BASE,
-                                          'objectclass=*')
+        entry = topology_m2.ms["master1"].search_s(USER1_DN,
+                                                   ldap.SCOPE_BASE,
+                                                   'objectclass=*')
         if entry:
             val = entry[0].getValue('description')
             print(str(entry[0]))
@@ -426,6 +312,7 @@ def test_ticket47462(topology):
                   e.message['desc'])
         assert False
 
+
 if __name__ == '__main__':
     # Run isolated
     # -s for DEBUG mode

+ 118 - 196
dirsrvtests/tests/tickets/ticket47490_test.py

@@ -11,16 +11,15 @@ Created on Nov 7, 2013
 
 @author: tbordaz
 '''
-import os
-import sys
-import ldap
-import time
 import logging
-import pytest
 import re
-from lib389 import DirSrv, Entry
+import time
+
+import ldap
+import pytest
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m1c1
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
@@ -29,25 +28,16 @@ TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
 ENTRY_DN = "cn=test_entry, %s" % SUFFIX
 MUST_OLD = "(postalAddress $ preferredLocale)"
 MUST_NEW = "(postalAddress $ preferredLocale $ telexNumber)"
-MAY_OLD  = "(postalCode $ street)"
-MAY_NEW  = "(postalCode $ street $ postOfficeBox)"
-
-
-class TopologyMasterConsumer(object):
-    def __init__(self, master, consumer):
-        master.open()
-        self.master = master
+MAY_OLD = "(postalCode $ street)"
+MAY_NEW = "(postalCode $ street $ postOfficeBox)"
 
-        consumer.open()
-        self.consumer = consumer
 
-
-def _header(topology, label):
-    topology.master.log.info("\n\n###############################################")
-    topology.master.log.info("#######")
-    topology.master.log.info("####### %s" % label)
-    topology.master.log.info("#######")
-    topology.master.log.info("###################################################")
+def _header(topology_m1c1, label):
+    topology_m1c1.ms["master1"].log.info("\n\n###############################################")
+    topology_m1c1.ms["master1"].log.info("#######")
+    topology_m1c1.ms["master1"].log.info("####### %s" % label)
+    topology_m1c1.ms["master1"].log.info("#######")
+    topology_m1c1.ms["master1"].log.info("###################################################")
 
 
 def pattern_errorlog(file, log_pattern):
@@ -75,9 +65,9 @@ def pattern_errorlog(file, log_pattern):
 
 
 def _oc_definition(oid_ext, name, must=None, may=None):
-    oid  = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
+    oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
     desc = 'To test ticket 47490'
-    sup  = 'person'
+    sup = 'person'
     if not must:
         must = MUST_OLD
     if not may:
@@ -99,7 +89,7 @@ def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None,
     instance.schema.add_schema('objectClasses', new_oc)
 
 
-def support_schema_learning(topology):
+def support_schema_learning(topology_m1c1):
     """
     with https://fedorahosted.org/389/ticket/47721, the supplier and consumer can learn
     schema definitions when a replication occurs.
@@ -112,7 +102,7 @@ def support_schema_learning(topology):
     This function returns True if 47721 is fixed in the current release
     False else
     """
-    ent = topology.consumer.getEntry(DN_CONFIG, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-versionstring'])
+    ent = topology_m1c1.cs["consumer1"].getEntry(DN_CONFIG, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-versionstring'])
     if ent.hasAttr('nsslapd-versionstring'):
         val = ent.getValue('nsslapd-versionstring')
         version = val.split('/')[1].split('.')  # something like ['1', '3', '1', '23', 'final_fix']
@@ -130,7 +120,7 @@ def support_schema_learning(topology):
         return False
 
 
-def trigger_update(topology):
+def trigger_update(topology_m1c1):
     """
         It triggers an update on the supplier. This will start a replication
         session and a schema push
@@ -140,13 +130,14 @@ def trigger_update(topology):
     except AttributeError:
         trigger_update.value = 1
     replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_update.value))]
-    topology.master.modify_s(ENTRY_DN, replace)
+    topology_m1c1.ms["master1"].modify_s(ENTRY_DN, replace)
 
     # wait 10 seconds that the update is replicated
     loop = 0
     while loop <= 10:
         try:
-            ent = topology.consumer.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
+            ent = topology_m1c1.cs["consumer1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)",
+                                                         ['telephonenumber'])
             val = ent.telephonenumber or "0"
             if int(val) == trigger_update.value:
                 return
@@ -159,7 +150,7 @@ def trigger_update(topology):
             loop += 1
 
 
-def trigger_schema_push(topology):
+def trigger_schema_push(topology_m1c1):
     '''
     Trigger update to create a replication session.
     In case of 47721 is fixed and the replica needs to learn the missing definition, then
@@ -167,111 +158,35 @@ def trigger_schema_push(topology):
     push the schema (and the schemaCSN.
     This is why there is two updates and replica agreement is stopped/start (to create a second session)
     '''
-    agreements = topology.master.agreement.list(suffix=SUFFIX, consumer_host=topology.consumer.host, consumer_port=topology.consumer.port)
-    assert(len(agreements) == 1)
+    agreements = topology_m1c1.ms["master1"].agreement.list(suffix=SUFFIX,
+                                                            consumer_host=topology_m1c1.cs["consumer1"].host,
+                                                            consumer_port=topology_m1c1.cs["consumer1"].port)
+    assert (len(agreements) == 1)
     ra = agreements[0]
-    trigger_update(topology)
-    topology.master.agreement.pause(ra.dn)
-    topology.master.agreement.resume(ra.dn)
-    trigger_update(topology)
+    trigger_update(topology_m1c1)
+    topology_m1c1.ms["master1"].agreement.pause(ra.dn)
+    topology_m1c1.ms["master1"].agreement.resume(ra.dn)
+    trigger_update(topology_m1c1)
 
 
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to create a replicated topology for the 'module'.
-        The replicated topology is MASTER -> Consumer.
-    '''
-    master   = DirSrv(verbose=False)
-    consumer = DirSrv(verbose=False)
-
-    # Args for the master instance
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_master = args_instance.copy()
-    master.allocate(args_master)
-
-    # Args for the consumer instance
-    args_instance[SER_HOST] = HOST_CONSUMER_1
-    args_instance[SER_PORT] = PORT_CONSUMER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
-    args_consumer = args_instance.copy()
-    consumer.allocate(args_consumer)
-
-    # Get the status of the instance
-    instance_master = master.exists()
-    instance_consumer = consumer.exists()
-
-    # Remove all the instances
-    if instance_master:
-        master.delete()
-    if instance_consumer:
-        consumer.delete()
-
-    # Create the instances
-    master.create()
-    master.open()
-    consumer.create()
-    consumer.open()
-
-    #
-    # Now prepare the Master-Consumer topology
-    #
-    # First Enable replication
-    master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-    consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
-
-    # Initialize the supplier->consumer
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
-
-    if not repl_agreement:
-        log.fatal("Fail to create a replica agreement")
-        sys.exit(1)
-
-    log.debug("%s created" % repl_agreement)
-    master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
-    master.waitForReplInit(repl_agreement)
-
-    # Check replication is working fine
-    if master.testReplication(DEFAULT_SUFFIX, consumer):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    def fin():
-        master.delete()
-        consumer.delete()
-    request.addfinalizer(fin)
-    #
-    # Here we have two instances master and consumer
-    # with replication working.
-    return TopologyMasterConsumer(master, consumer)
-
-
-def test_ticket47490_init(topology):
+def test_ticket47490_init(topology_m1c1):
     """
         Initialize the test environment
     """
-    log.debug("test_ticket47490_init topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer))
+    log.debug("test_ticket47490_init topology_m1c1 %r (master %r, consumer %r" % (
+    topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"]))
     # the test case will check if a warning message is logged in the
     # error log of the supplier
-    topology.master.errorlog_file = open(topology.master.errlog, "r")
+    topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r")
 
     # This entry will be used to trigger attempt of schema push
-    topology.master.add_s(Entry((ENTRY_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn': 'test_entry',
-                                            'cn': 'test_entry'})))
+    topology_m1c1.ms["master1"].add_s(Entry((ENTRY_DN, {
+        'objectclass': "top person".split(),
+        'sn': 'test_entry',
+        'cn': 'test_entry'})))
 
 
-def test_ticket47490_one(topology):
+def test_ticket47490_one(topology_m1c1):
     """
         Summary: Extra OC Schema is pushed - no error
 
@@ -285,16 +200,17 @@ def test_ticket47490_one(topology):
             - consumer +masterNewOCA
 
     """
-    _header(topology, "Extra OC Schema is pushed - no error")
+    _header(topology_m1c1, "Extra OC Schema is pushed - no error")
 
-    log.debug("test_ticket47490_one topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer))
+    log.debug("test_ticket47490_one topology_m1c1 %r (master %r, consumer %r" % (
+    topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"]))
     # update the schema of the supplier so that it is a superset of
     # consumer. Schema should be pushed
-    add_OC(topology.master, 2, 'masterNewOCA')
+    add_OC(topology_m1c1.ms["master1"], 2, 'masterNewOCA')
 
-    trigger_schema_push(topology)
-    master_schema_csn = topology.master.schema.get_schema_csn()
-    consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+    trigger_schema_push(topology_m1c1)
+    master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+    consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
 
     # Check the schemaCSN was updated on the consumer
     log.debug("test_ticket47490_one master_schema_csn=%s", master_schema_csn)
@@ -303,12 +219,12 @@ def test_ticket47490_one(topology):
 
     # Check the error log of the supplier does not contain an error
     regex = re.compile("must not be overwritten \(set replication log for additional info\)")
-    res = pattern_errorlog(topology.master.errorlog_file, regex)
+    res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
     if res is not None:
         assert False
 
 
-def test_ticket47490_two(topology):
+def test_ticket47490_two(topology_m1c1):
     """
         Summary: Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)
 
@@ -323,25 +239,25 @@ def test_ticket47490_two(topology):
 
     """
 
-    _header(topology, "Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)")
+    _header(topology_m1c1, "Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)")
 
     # add this OC on consumer. Supplier will no push the schema
-    add_OC(topology.consumer, 1, 'consumerNewOCA')
+    add_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA')
 
     # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s)
     time.sleep(2)
-    add_OC(topology.master, 3, 'masterNewOCB')
+    add_OC(topology_m1c1.ms["master1"], 3, 'masterNewOCB')
 
     # now push the scheam
-    trigger_schema_push(topology)
-    master_schema_csn = topology.master.schema.get_schema_csn()
-    consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+    trigger_schema_push(topology_m1c1)
+    master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+    consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
 
     # Check the schemaCSN was NOT updated on the consumer
     # with 47721, supplier learns the missing definition
     log.debug("test_ticket47490_two master_schema_csn=%s", master_schema_csn)
     log.debug("test_ticket47490_two consumer_schema_csn=%s", consumer_schema_csn)
-    if support_schema_learning(topology):
+    if support_schema_learning(topology_m1c1):
         assert master_schema_csn == consumer_schema_csn
     else:
         assert master_schema_csn != consumer_schema_csn
@@ -349,10 +265,10 @@ def test_ticket47490_two(topology):
     # Check the error log of the supplier does not contain an error
     # This message may happen during the learning phase
     regex = re.compile("must not be overwritten \(set replication log for additional info\)")
-    res = pattern_errorlog(topology.master.errorlog_file, regex)
+    res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
 
 
-def test_ticket47490_three(topology):
+def test_ticket47490_three(topology_m1c1):
     """
         Summary: Extra OC Schema is pushed - no error
 
@@ -366,16 +282,16 @@ def test_ticket47490_three(topology):
             - consumer +masterNewOCA +masterNewOCB +consumerNewOCA
 
     """
-    _header(topology, "Extra OC Schema is pushed - no error")
+    _header(topology_m1c1, "Extra OC Schema is pushed - no error")
 
     # Do an upate to trigger the schema push attempt
     # add this OC on consumer. Supplier will no push the schema
-    add_OC(topology.master, 1, 'consumerNewOCA')
+    add_OC(topology_m1c1.ms["master1"], 1, 'consumerNewOCA')
 
     # now push the scheam
-    trigger_schema_push(topology)
-    master_schema_csn = topology.master.schema.get_schema_csn()
-    consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+    trigger_schema_push(topology_m1c1)
+    master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+    consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
 
     # Check the schemaCSN was NOT updated on the consumer
     log.debug("test_ticket47490_three master_schema_csn=%s", master_schema_csn)
@@ -384,12 +300,12 @@ def test_ticket47490_three(topology):
 
     # Check the error log of the supplier does not contain an error
     regex = re.compile("must not be overwritten \(set replication log for additional info\)")
-    res = pattern_errorlog(topology.master.errorlog_file, regex)
+    res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
     if res is not None:
         assert False
 
 
-def test_ticket47490_four(topology):
+def test_ticket47490_four(topology_m1c1):
     """
         Summary: Same OC - extra MUST: Schema is pushed - no error
 
@@ -405,13 +321,14 @@ def test_ticket47490_four(topology):
                        +must=telexnumber
 
     """
-    _header(topology, "Same OC - extra MUST: Schema is pushed - no error")
+    _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - no error")
 
-    mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD)
+    mod_OC(topology_m1c1.ms["master1"], 2, 'masterNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD,
+           new_may=MAY_OLD)
 
-    trigger_schema_push(topology)
-    master_schema_csn = topology.master.schema.get_schema_csn()
-    consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+    trigger_schema_push(topology_m1c1)
+    master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+    consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
 
     # Check the schemaCSN was updated on the consumer
     log.debug("test_ticket47490_four master_schema_csn=%s", master_schema_csn)
@@ -420,12 +337,12 @@ def test_ticket47490_four(topology):
 
     # Check the error log of the supplier does not contain an error
     regex = re.compile("must not be overwritten \(set replication log for additional info\)")
-    res = pattern_errorlog(topology.master.errorlog_file, regex)
+    res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
     if res is not None:
         assert False
 
 
-def test_ticket47490_five(topology):
+def test_ticket47490_five(topology_m1c1):
     """
         Summary: Same OC - extra MUST: Schema is pushed - (fix for 47721)
 
@@ -444,26 +361,27 @@ def test_ticket47490_five(topology):
 
         Note: replication log is enabled to get more details
     """
-    _header(topology, "Same OC - extra MUST: Schema is pushed - (fix for 47721)")
+    _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - (fix for 47721)")
 
     # get more detail why it fails
-    topology.master.enableReplLogging()
+    topology_m1c1.ms["master1"].enableReplLogging()
 
     # add telenumber to 'consumerNewOCA' on the consumer
-    mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD)
+    mod_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD,
+           new_may=MAY_OLD)
     # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s)
     time.sleep(2)
-    add_OC(topology.master, 4, 'masterNewOCC')
+    add_OC(topology_m1c1.ms["master1"], 4, 'masterNewOCC')
 
-    trigger_schema_push(topology)
-    master_schema_csn = topology.master.schema.get_schema_csn()
-    consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+    trigger_schema_push(topology_m1c1)
+    master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+    consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
 
     # Check the schemaCSN was NOT updated on the consumer
     # with 47721, supplier learns the missing definition
     log.debug("test_ticket47490_five master_schema_csn=%s", master_schema_csn)
     log.debug("ctest_ticket47490_five onsumer_schema_csn=%s", consumer_schema_csn)
-    if support_schema_learning(topology):
+    if support_schema_learning(topology_m1c1):
         assert master_schema_csn == consumer_schema_csn
     else:
         assert master_schema_csn != consumer_schema_csn
@@ -471,10 +389,10 @@ def test_ticket47490_five(topology):
     # Check the error log of the supplier does not contain an error
     # This message may happen during the learning phase
     regex = re.compile("must not be overwritten \(set replication log for additional info\)")
-    res = pattern_errorlog(topology.master.errorlog_file, regex)
+    res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
 
 
-def test_ticket47490_six(topology):
+def test_ticket47490_six(topology_m1c1):
     """
         Summary: Same OC - extra MUST: Schema is pushed - no error
 
@@ -494,14 +412,15 @@ def test_ticket47490_six(topology):
 
         Note: replication log is enabled to get more details
     """
-    _header(topology, "Same OC - extra MUST: Schema is pushed - no error")
+    _header(topology_m1c1, "Same OC - extra MUST: Schema is pushed - no error")
 
     # add telenumber to 'consumerNewOCA' on the consumer
-    mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD)
+    mod_OC(topology_m1c1.ms["master1"], 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD,
+           new_may=MAY_OLD)
 
-    trigger_schema_push(topology)
-    master_schema_csn = topology.master.schema.get_schema_csn()
-    consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+    trigger_schema_push(topology_m1c1)
+    master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+    consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
 
     # Check the schemaCSN was NOT updated on the consumer
     log.debug("test_ticket47490_six master_schema_csn=%s", master_schema_csn)
@@ -511,12 +430,12 @@ def test_ticket47490_six(topology):
     # Check the error log of the supplier does not contain an error
     # This message may happen during the learning phase
     regex = re.compile("must not be overwritten \(set replication log for additional info\)")
-    res = pattern_errorlog(topology.master.errorlog_file, regex)
+    res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
     if res is not None:
         assert False
 
 
-def test_ticket47490_seven(topology):
+def test_ticket47490_seven(topology_m1c1):
     """
         Summary: Same OC - extra MAY: Schema is pushed - no error
 
@@ -535,13 +454,14 @@ def test_ticket47490_seven(topology):
                        +must=telexnumber                   +must=telexnumber
                        +may=postOfficeBox
     """
-    _header(topology, "Same OC - extra MAY: Schema is pushed - no error")
+    _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed - no error")
 
-    mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW)
+    mod_OC(topology_m1c1.ms["master1"], 2, 'masterNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD,
+           new_may=MAY_NEW)
 
-    trigger_schema_push(topology)
-    master_schema_csn = topology.master.schema.get_schema_csn()
-    consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+    trigger_schema_push(topology_m1c1)
+    master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+    consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
 
     # Check the schemaCSN was updated on the consumer
     log.debug("test_ticket47490_seven master_schema_csn=%s", master_schema_csn)
@@ -550,12 +470,12 @@ def test_ticket47490_seven(topology):
 
     # Check the error log of the supplier does not contain an error
     regex = re.compile("must not be overwritten \(set replication log for additional info\)")
-    res = pattern_errorlog(topology.master.errorlog_file, regex)
+    res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
     if res is not None:
         assert False
 
 
-def test_ticket47490_eight(topology):
+def test_ticket47490_eight(topology_m1c1):
     """
         Summary: Same OC - extra MAY: Schema is pushed (fix for 47721)
 
@@ -576,23 +496,25 @@ def test_ticket47490_eight(topology):
                        +must=telexnumber                   +must=telexnumber
                        +may=postOfficeBox                  +may=postOfficeBox
     """
-    _header(topology, "Same OC - extra MAY: Schema is pushed (fix for 47721)")
+    _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed (fix for 47721)")
 
-    mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW)
+    mod_OC(topology_m1c1.cs["consumer1"], 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD,
+           new_may=MAY_NEW)
 
     # modify OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s)
     time.sleep(2)
-    mod_OC(topology.master, 4, 'masterNewOCC', old_must=MUST_OLD, new_must=MUST_OLD, old_may=MAY_OLD, new_may=MAY_NEW)
+    mod_OC(topology_m1c1.ms["master1"], 4, 'masterNewOCC', old_must=MUST_OLD, new_must=MUST_OLD, old_may=MAY_OLD,
+           new_may=MAY_NEW)
 
-    trigger_schema_push(topology)
-    master_schema_csn = topology.master.schema.get_schema_csn()
-    consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+    trigger_schema_push(topology_m1c1)
+    master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+    consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
 
     # Check the schemaCSN was not updated on the consumer
     # with 47721, supplier learns the missing definition
     log.debug("test_ticket47490_eight master_schema_csn=%s", master_schema_csn)
     log.debug("ctest_ticket47490_eight onsumer_schema_csn=%s", consumer_schema_csn)
-    if support_schema_learning(topology):
+    if support_schema_learning(topology_m1c1):
         assert master_schema_csn == consumer_schema_csn
     else:
         assert master_schema_csn != consumer_schema_csn
@@ -600,10 +522,10 @@ def test_ticket47490_eight(topology):
     # Check the error log of the supplier does not contain an error
     # This message may happen during the learning phase
     regex = re.compile("must not be overwritten \(set replication log for additional info\)")
-    res = pattern_errorlog(topology.master.errorlog_file, regex)
+    res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
 
 
-def test_ticket47490_nine(topology):
+def test_ticket47490_nine(topology_m1c1):
     """
         Summary: Same OC - extra MAY: Schema is pushed - no error
 
@@ -626,13 +548,14 @@ def test_ticket47490_nine(topology):
                        +must=telexnumber                   +must=telexnumber
                        +may=postOfficeBox                  +may=postOfficeBox +may=postOfficeBox
     """
-    _header(topology, "Same OC - extra MAY: Schema is pushed - no error")
+    _header(topology_m1c1, "Same OC - extra MAY: Schema is pushed - no error")
 
-    mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW)
+    mod_OC(topology_m1c1.ms["master1"], 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD,
+           new_may=MAY_NEW)
 
-    trigger_schema_push(topology)
-    master_schema_csn = topology.master.schema.get_schema_csn()
-    consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+    trigger_schema_push(topology_m1c1)
+    master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+    consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
 
     # Check the schemaCSN was updated on the consumer
     log.debug("test_ticket47490_nine master_schema_csn=%s", master_schema_csn)
@@ -641,7 +564,7 @@ def test_ticket47490_nine(topology):
 
     # Check the error log of the supplier does not contain an error
     regex = re.compile("must not be overwritten \(set replication log for additional info\)")
-    res = pattern_errorlog(topology.master.errorlog_file, regex)
+    res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
     if res is not None:
         assert False
 
@@ -653,4 +576,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 65 - 169
dirsrvtests/tests/tickets/ticket47536_test.py

@@ -6,21 +6,12 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import shlex
-import subprocess
-import ldap
-import logging
-import pytest
 import base64
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
@@ -35,108 +26,8 @@ M1SERVERCERT = 'Server-Cert1'
 M2SERVERCERT = 'Server-Cert2'
 M1LDAPSPORT = '41636'
 M2LDAPSPORT = '42636'
-
-
-class TopologyReplication(object):
-    def __init__(self, master1, master2):
-        master1.open()
-        self.master1 = master1
-        master2.open()
-        self.master2 = master2
-
-
[email protected](scope="module")
-def topology(request):
-    # Creating master 1...
-    master1 = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_master = args_instance.copy()
-    master1.allocate(args_master)
-    instance_master1 = master1.exists()
-    if instance_master1:
-        master1.delete()
-    master1.create()
-    master1.open()
-    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
-    # Creating master 2...
-    master2 = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_MASTER_2
-    args_instance[SER_PORT] = PORT_MASTER_2
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_master = args_instance.copy()
-    master2.allocate(args_master)
-    instance_master2 = master2.exists()
-    if instance_master2:
-        master2.delete()
-    master2.create()
-    master2.open()
-    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
-    # Delete each instance in the end
-    def fin():
-        master1.delete()
-        master2.delete()
-    request.addfinalizer(fin)
-
-    #
-    # Create all the agreements
-    #
-    # Creating agreement from master 1 to master 2
-    properties = {RA_NAME:      r'meTo_%s:%s' % (master2.host, master2.port),
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    global m1_m2_agmt
-    m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-    if not m1_m2_agmt:
-        log.fatal("Fail to create a master -> master replica agreement")
-        sys.exit(1)
-    log.debug("%s created" % m1_m2_agmt)
-
-    # Creating agreement from master 2 to master 1
-    properties = {RA_NAME:      r'meTo_%s:%s' % (master1.host, master1.port),
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    global m2_m1_agmt
-    m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-    if not m2_m1_agmt:
-        log.fatal("Fail to create a master -> master replica agreement")
-        sys.exit(1)
-    log.debug("%s created" % m2_m1_agmt)
-
-    # Allow the replicas to get situated with the new agreements...
-    time.sleep(2)
-
-    global M1SUBJECT
-    M1SUBJECT = 'CN=%s,OU=389 Directory Server' % (master1.host)
-    global M2SUBJECT
-    M2SUBJECT = 'CN=%s,OU=390 Directory Server' % (master2.host)
-
-    #
-    # Initialize all the agreements
-    #
-    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    master1.waitForReplInit(m1_m2_agmt)
-
-    # Check replication is working...
-    if master1.testReplication(DEFAULT_SUFFIX, master2):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    return TopologyReplication(master1, master2)
-
-
[email protected](scope="module")
+M1SUBJECT = 'CN={},OU=389 Directory Server'.format(HOST_MASTER_1)
+M2SUBJECT = 'CN={},OU=390 Directory Server'.format(HOST_MASTER_2)
 
 
 def add_entry(server, name, rdntmpl, start, num):
@@ -232,16 +123,16 @@ def doAndPrintIt(cmdline):
         assert False
 
 
-def create_keys_certs(topology):
+def create_keys_certs(topology_m2):
     log.info("\n######################### Creating SSL Keys and Certs ######################\n")
 
     global m1confdir
-    m1confdir = topology.master1.confdir
+    m1confdir = topology_m2.ms["master1"].confdir
     global m2confdir
-    m2confdir = topology.master2.confdir
+    m2confdir = topology_m2.ms["master2"].confdir
 
     log.info("##### shutdown master1")
-    topology.master1.stop(timeout=10)
+    topology_m2.ms["master1"].stop(timeout=10)
 
     log.info("##### Creating a password file")
     pwdfile = '%s/pwdfile.txt' % (m1confdir)
@@ -275,51 +166,55 @@ def create_keys_certs(topology):
     log.info("##### Create key3.db and cert8.db database (master1): %s" % cmdline)
     doAndPrintIt(cmdline)
 
-    cmdline = ['certutil', '-G', '-d', m1confdir, '-z',  noisefile, '-f', pwdfile]
+    cmdline = ['certutil', '-G', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
     log.info("##### Creating encryption key for CA (master1): %s" % cmdline)
-    #os.system('certutil -G -d %s -z %s -f %s' % (m1confdir, noisefile, pwdfile))
+    # os.system('certutil -G -d %s -z %s -f %s' % (m1confdir, noisefile, pwdfile))
     doAndPrintIt(cmdline)
 
     time.sleep(2)
 
     log.info("##### Creating self-signed CA certificate (master1) -- nickname %s" % CACERT)
-    os.system('( echo y ; echo ; echo y ) | certutil -S -n "%s" -s "%s" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (CACERT, ISSUER, m1confdir, noisefile, pwdfile))
+    os.system(
+        '( echo y ; echo ; echo y ) | certutil -S -n "%s" -s "%s" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (
+        CACERT, ISSUER, m1confdir, noisefile, pwdfile))
 
     global M1SUBJECT
-    cmdline = ['certutil', '-S', '-n', M1SERVERCERT, '-s', M1SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1001', '-v', '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
+    cmdline = ['certutil', '-S', '-n', M1SERVERCERT, '-s', M1SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1001', '-v',
+               '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
     log.info("##### Creating Server certificate -- nickname %s: %s" % (M1SERVERCERT, cmdline))
     doAndPrintIt(cmdline)
 
     time.sleep(2)
 
     global M2SUBJECT
-    cmdline = ['certutil', '-S', '-n', M2SERVERCERT, '-s', M2SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1002', '-v', '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
+    cmdline = ['certutil', '-S', '-n', M2SERVERCERT, '-s', M2SUBJECT, '-c', CACERT, '-t', ',,', '-m', '1002', '-v',
+               '120', '-d', m1confdir, '-z', noisefile, '-f', pwdfile]
     log.info("##### Creating Server certificate -- nickname %s: %s" % (M2SERVERCERT, cmdline))
     doAndPrintIt(cmdline)
 
     time.sleep(2)
 
     log.info("##### start master1")
-    topology.master1.start(timeout=10)
+    topology_m2.ms["master1"].start(timeout=10)
 
     log.info("##### enable SSL in master1 with all ciphers")
-    enable_ssl(topology.master1, M1LDAPSPORT, M1SERVERCERT)
+    enable_ssl(topology_m2.ms["master1"], M1LDAPSPORT, M1SERVERCERT)
 
     cmdline = ['certutil', '-L', '-d', m1confdir]
     log.info("##### Check the cert db: %s" % cmdline)
     doAndPrintIt(cmdline)
 
     log.info("##### restart master1")
-    topology.master1.restart(timeout=10)
+    topology_m2.ms["master1"].restart(timeout=10)
 
     log.info("##### Check PEM files of master1 (before setting nsslapd-extract-pemfiles")
     check_pems(m1confdir, CACERT, M1SERVERCERT, M1SERVERCERT + '-Key', " not")
 
     log.info("##### Set on to nsslapd-extract-pemfiles")
-    topology.master1.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-extract-pemfiles', 'on')])
+    topology_m2.ms["master1"].modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-extract-pemfiles', 'on')])
 
     log.info("##### restart master1")
-    topology.master1.restart(timeout=10)
+    topology_m2.ms["master1"].restart(timeout=10)
 
     log.info("##### Check PEM files of master1 (after setting nsslapd-extract-pemfiles")
     check_pems(m1confdir, CACERT, M1SERVERCERT, M1SERVERCERT + '-Key', "")
@@ -339,7 +234,7 @@ def create_keys_certs(topology):
         assert False
 
     log.info("##### stop master2")
-    topology.master2.stop(timeout=10)
+    topology_m2.ms["master2"].stop(timeout=10)
 
     log.info("##### Initialize Cert DB for master2")
     cmdline = ['certutil', '-N', '-d', m2confdir, '-f', pwdfile]
@@ -358,41 +253,40 @@ def create_keys_certs(topology):
     os.system('chmod 400 %s' % m2pinfile)
 
     log.info("##### start master2")
-    topology.master2.start(timeout=10)
+    topology_m2.ms["master2"].start(timeout=10)
 
     log.info("##### enable SSL in master2 with all ciphers")
-    enable_ssl(topology.master2, M2LDAPSPORT, M2SERVERCERT)
+    enable_ssl(topology_m2.ms["master2"], M2LDAPSPORT, M2SERVERCERT)
 
     log.info("##### restart master2")
-    topology.master2.restart(timeout=10)
+    topology_m2.ms["master2"].restart(timeout=10)
 
     log.info("##### Check PEM files of master2 (before setting nsslapd-extract-pemfiles")
     check_pems(m2confdir, CACERT, M2SERVERCERT, M2SERVERCERT + '-Key', " not")
 
     log.info("##### Set on to nsslapd-extract-pemfiles")
-    topology.master2.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-extract-pemfiles', 'on')])
+    topology_m2.ms["master2"].modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-extract-pemfiles', 'on')])
 
     log.info("##### restart master2")
-    topology.master2.restart(timeout=10)
+    topology_m2.ms["master2"].restart(timeout=10)
 
     log.info("##### Check PEM files of master2 (after setting nsslapd-extract-pemfiles")
     check_pems(m2confdir, CACERT, M2SERVERCERT, M2SERVERCERT + '-Key', "")
 
     log.info("##### restart master1")
-    topology.master1.restart(timeout=10)
-
+    topology_m2.ms["master1"].restart(timeout=10)
 
     log.info("\n######################### Creating SSL Keys and Certs Done ######################\n")
 
 
-def config_tls_agreements(topology):
+def config_tls_agreements(topology_m2):
     log.info("######################### Configure SSL/TLS agreements ######################")
     log.info("######################## master1 -- startTLS -> master2 #####################")
     log.info("##################### master1 <- tls_clientAuth -- master2 ##################")
 
     log.info("##### Update the agreement of master1")
-    global m1_m2_agmt
-    topology.master1.modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')])
+    m1_m2_agmt = topology_m2.ms["master1_agmts"]["m1_m2"]
+    topology_m2.ms["master1"].modify_s(m1_m2_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS')])
 
     log.info("##### Add the cert to the repl manager on master1")
     global mytmp
@@ -402,8 +296,8 @@ def config_tls_agreements(topology):
     m2servercertstr = ''
     for l in m2sc.readlines():
         if ((l == "") or l.startswith('This file is auto-generated') or
-            l.startswith('Do not edit') or l.startswith('Issuer:') or
-            l.startswith('Subject:') or l.startswith('-----')):
+                l.startswith('Do not edit') or l.startswith('Issuer:') or
+                l.startswith('Subject:') or l.startswith('-----')):
             continue
         m2servercertstr = "%s%s" % (m2servercertstr, l.rstrip())
     m2sc.close()
@@ -411,17 +305,18 @@ def config_tls_agreements(topology):
     log.info('##### master2 Server Cert in base64 format: %s' % m2servercertstr)
 
     replmgr = defaultProperties[REPLICATION_BIND_DN]
-    rentry = topology.master1.search_s(replmgr, ldap.SCOPE_BASE, 'objectclass=*')
+    rentry = topology_m2.ms["master1"].search_s(replmgr, ldap.SCOPE_BASE, 'objectclass=*')
     log.info('##### Replication manager on master1: %s' % replmgr)
     oc = 'ObjectClass'
     log.info('      %s:' % oc)
     if rentry:
         for val in rentry[0].getValues(oc):
             log.info('                 : %s' % val)
-    topology.master1.modify_s(replmgr, [(ldap.MOD_ADD, oc, 'extensibleObject')])
+    topology_m2.ms["master1"].modify_s(replmgr, [(ldap.MOD_ADD, oc, 'extensibleObject')])
 
     global M2SUBJECT
-    topology.master1.modify_s(replmgr, [(ldap.MOD_ADD, 'userCertificate;binary', base64.b64decode(m2servercertstr)),
+    topology_m2.ms["master1"].modify_s(replmgr,
+                                       [(ldap.MOD_ADD, 'userCertificate;binary', base64.b64decode(m2servercertstr)),
                                         (ldap.MOD_ADD, 'description', M2SUBJECT)])
 
     log.info("##### Modify the certmap.conf on master1")
@@ -437,32 +332,32 @@ def config_tls_agreements(topology):
     os.system('chmod 440 %s' % m1certmap)
 
     log.info("##### Update the agreement of master2")
-    global m2_m1_agmt
-    topology.master2.modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS'),
-                                           (ldap.MOD_REPLACE, 'nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH')])
+    m2_m1_agmt = topology_m2.ms["master2_agmts"]["m2_m1"]
+    topology_m2.ms["master2"].modify_s(m2_m1_agmt, [(ldap.MOD_REPLACE, 'nsDS5ReplicaTransportInfo', 'TLS'),
+                                                    (ldap.MOD_REPLACE, 'nsDS5ReplicaBindMethod', 'SSLCLIENTAUTH')])
 
-    topology.master1.stop(10)
-    topology.master2.stop(10)
-    topology.master1.start(10)
-    topology.master2.start(10)
+    topology_m2.ms["master1"].stop(10)
+    topology_m2.ms["master2"].stop(10)
+    topology_m2.ms["master1"].start(10)
+    topology_m2.ms["master2"].start(10)
 
     log.info("\n######################### Configure SSL/TLS agreements Done ######################\n")
 
 
-def relocate_pem_files(topology):
+def relocate_pem_files(topology_m2):
     log.info("######################### Relocate PEM files on master1 ######################")
     mycacert = 'MyCA'
-    topology.master1.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'CACertExtractFile', mycacert)])
+    topology_m2.ms["master1"].modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'CACertExtractFile', mycacert)])
     myservercert = 'MyServerCert1'
     myserverkey = 'MyServerKey1'
-    topology.master1.modify_s(RSA_DN, [(ldap.MOD_REPLACE, 'ServerCertExtractFile', myservercert),
-                                       (ldap.MOD_REPLACE, 'ServerKeyExtractFile', myserverkey)])
+    topology_m2.ms["master1"].modify_s(RSA_DN, [(ldap.MOD_REPLACE, 'ServerCertExtractFile', myservercert),
+                                                (ldap.MOD_REPLACE, 'ServerKeyExtractFile', myserverkey)])
     log.info("##### restart master1")
-    topology.master1.restart(timeout=10)
+    topology_m2.ms["master1"].restart(timeout=10)
     check_pems(m1confdir, mycacert, myservercert, myserverkey, "")
 
 
-def test_ticket47536(topology):
+def test_ticket47536(topology_m2):
     """
     Set up 2way MMR:
         master_1 ----- startTLS -----> master_2
@@ -477,44 +372,45 @@ def test_ticket47536(topology):
     """
     log.info("Ticket 47536 - Allow usage of OpenLDAP libraries that don't use NSS for crypto")
 
-    create_keys_certs(topology)
-    config_tls_agreements(topology)
+    create_keys_certs(topology_m2)
+    config_tls_agreements(topology_m2)
 
-    add_entry(topology.master1, 'master1', 'uid=m1user', 0, 5)
-    add_entry(topology.master2, 'master2', 'uid=m2user', 0, 5)
+    add_entry(topology_m2.ms["master1"], 'master1', 'uid=m1user', 0, 5)
+    add_entry(topology_m2.ms["master2"], 'master2', 'uid=m2user', 0, 5)
 
     time.sleep(1)
 
     log.info('##### Searching for entries on master1...')
-    entries = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+    entries = topology_m2.ms["master1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
     assert 10 == len(entries)
 
     log.info('##### Searching for entries on master2...')
-    entries = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+    entries = topology_m2.ms["master2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
     assert 10 == len(entries)
 
-    relocate_pem_files(topology)
+    relocate_pem_files(topology_m2)
 
-    add_entry(topology.master1, 'master1', 'uid=m1user', 10, 5)
-    add_entry(topology.master2, 'master2', 'uid=m2user', 10, 5)
+    add_entry(topology_m2.ms["master1"], 'master1', 'uid=m1user', 10, 5)
+    add_entry(topology_m2.ms["master2"], 'master2', 'uid=m2user', 10, 5)
 
     time.sleep(10)
 
     log.info('##### Searching for entries on master1...')
-    entries = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+    entries = topology_m2.ms["master1"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
     assert 20 == len(entries)
 
     log.info('##### Searching for entries on master2...')
-    entries = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
+    entries = topology_m2.ms["master2"].search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
     assert 20 == len(entries)
 
-    db2ldifpl = '%s/sbin/db2ldif.pl' % topology.master1.prefix
+    db2ldifpl = '%s/sbin/db2ldif.pl' % topology_m2.ms["master1"].prefix
     cmdline = [db2ldifpl, '-n', 'userRoot', '-Z', SERVERID_MASTER_1, '-D', DN_DM, '-w', PASSWORD]
     log.info("##### db2ldif.pl -- %s" % (cmdline))
     doAndPrintIt(cmdline)
 
     log.info("Ticket 47536 - PASSED")
 
+
 if __name__ == '__main__':
     # Run isolated
     # -s for DEBUG mode

+ 30 - 77
dirsrvtests/tests/tickets/ticket47553_test.py

@@ -6,24 +6,14 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
 CONTAINER_1_OU = 'test_ou_1'
 CONTAINER_2_OU = 'test_ou_2'
 CONTAINER_1 = 'ou=%s,dc=example,dc=com' % CONTAINER_1_OU
@@ -33,69 +23,32 @@ USER_PWD = 'Secret123'
 USER = 'cn=%s,%s' % (USER_CN, CONTAINER_1)
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Delete each instance in the end
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Clear out the tmp dir
-    standalone.clearTmpDir(__file__)
-
-    return TopologyStandalone(standalone)
-
-
 @pytest.fixture(scope="module")
-def env_setup(topology):
+def env_setup(topology_st):
     """Adds two containers, one user and two ACI rules"""
 
     try:
         log.info("Add a container: %s" % CONTAINER_1)
-        topology.standalone.add_s(Entry((CONTAINER_1,
-                                         {'objectclass': 'top',
-                                          'objectclass': 'organizationalunit',
-                                          'ou': CONTAINER_1_OU,
-                                          })))
+        topology_st.standalone.add_s(Entry((CONTAINER_1,
+                                            {'objectclass': 'top',
+                                             'objectclass': 'organizationalunit',
+                                             'ou': CONTAINER_1_OU,
+                                             })))
 
         log.info("Add a container: %s" % CONTAINER_2)
-        topology.standalone.add_s(Entry((CONTAINER_2,
-                                         {'objectclass': 'top',
-                                          'objectclass': 'organizationalunit',
-                                          'ou': CONTAINER_2_OU,
-                                          })))
+        topology_st.standalone.add_s(Entry((CONTAINER_2,
+                                            {'objectclass': 'top',
+                                             'objectclass': 'organizationalunit',
+                                             'ou': CONTAINER_2_OU,
+                                             })))
 
         log.info("Add a user: %s" % USER)
-        topology.standalone.add_s(Entry((USER,
-                                         {'objectclass': 'top person'.split(),
-                                          'cn': USER_CN,
-                                          'sn': USER_CN,
-                                          'userpassword': USER_PWD
-                                          })))
+        topology_st.standalone.add_s(Entry((USER,
+                                            {'objectclass': 'top person'.split(),
+                                             'cn': USER_CN,
+                                             'sn': USER_CN,
+                                             'userpassword': USER_PWD
+                                             })))
     except ldap.LDAPError as e:
         log.error('Failed to add object to database: %s' % e.message['desc'])
         assert False
@@ -109,17 +62,17 @@ def env_setup(topology):
     try:
         log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER,
                                                                CONTAINER_1))
-        topology.standalone.modify_s(CONTAINER_1, mod)
+        topology_st.standalone.modify_s(CONTAINER_1, mod)
 
         log.info("Add an ACI 'allow (all)' by %s to the %s" % (USER,
                                                                CONTAINER_2))
-        topology.standalone.modify_s(CONTAINER_2, mod)
+        topology_st.standalone.modify_s(CONTAINER_2, mod)
     except ldap.LDAPError as e:
         log.fatal('Failed to add ACI: error (%s)' % (e.message['desc']))
         assert False
 
 
-def test_ticket47553(topology, env_setup):
+def test_ticket47553(topology_st, env_setup):
     """Tests, that MODRDN operation is allowed,
     if user has ACI right '(all)' under superior entries,
     but doesn't have '(modrdn)'
@@ -127,7 +80,7 @@ def test_ticket47553(topology, env_setup):
 
     log.info("Bind as %s" % USER)
     try:
-        topology.standalone.simple_bind_s(USER, USER_PWD)
+        topology_st.standalone.simple_bind_s(USER, USER_PWD)
     except ldap.LDAPError as e:
         log.error('Bind failed for %s, error %s' % (USER, e.message['desc']))
         assert False
@@ -135,23 +88,23 @@ def test_ticket47553(topology, env_setup):
     log.info("User MODRDN operation from %s to %s" % (CONTAINER_1,
                                                       CONTAINER_2))
     try:
-        topology.standalone.rename_s(USER, "cn=%s" % USER_CN,
-                                     newsuperior=CONTAINER_2, delold=1)
+        topology_st.standalone.rename_s(USER, "cn=%s" % USER_CN,
+                                        newsuperior=CONTAINER_2, delold=1)
     except ldap.LDAPError as e:
         log.error('MODRDN failed for %s, error %s' % (USER, e.message['desc']))
         assert False
 
     try:
         log.info("Check there is no user in %s" % CONTAINER_1)
-        entries = topology.standalone.search_s(CONTAINER_1,
-                                               ldap.SCOPE_ONELEVEL,
-                                               'cn=%s' % USER_CN)
+        entries = topology_st.standalone.search_s(CONTAINER_1,
+                                                  ldap.SCOPE_ONELEVEL,
+                                                  'cn=%s' % USER_CN)
         assert not entries
 
         log.info("Check there is our user in %s" % CONTAINER_2)
-        entries = topology.standalone.search_s(CONTAINER_2,
-                                               ldap.SCOPE_ONELEVEL,
-                                               'cn=%s' % USER_CN)
+        entries = topology_st.standalone.search_s(CONTAINER_2,
+                                                  ldap.SCOPE_ONELEVEL,
+                                                  'cn=%s' % USER_CN)
         assert entries
     except ldap.LDAPError as e:
         log.fatal('Search failed, error: ' + e.message['desc'])

+ 22 - 65
dirsrvtests/tests/tickets/ticket47560_test.py

@@ -6,62 +6,20 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
 from lib389.properties import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47560(topology):
+def test_ticket47560(topology_st):
     """
        This test case does the following:
           SETUP
@@ -90,19 +48,19 @@ def test_ticket47560(topology):
         """
         # enable/disable the mbo plugin
         if value == 'on':
-            topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+            topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
         else:
-            topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+            topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
 
         log.debug("-------------> _enable_disable_mbo(%s)" % value)
 
-        topology.standalone.stop(timeout=120)
+        topology_st.standalone.stop(timeout=120)
         time.sleep(1)
-        topology.standalone.start(timeout=120)
+        topology_st.standalone.start(timeout=120)
         time.sleep(3)
 
         # need to reopen a connection toward the instance
-        topology.standalone.open()
+        topology_st.standalone.open()
 
     def _test_ticket47560_setup():
         """
@@ -123,7 +81,7 @@ def test_ticket47560(topology):
         entry.setValues('objectclass', 'top', 'groupOfNames', 'inetUser')
         entry.setValues('cn', 'group')
         try:
-            topology.standalone.add_s(entry)
+            topology_st.standalone.add_s(entry)
         except ldap.ALREADY_EXISTS:
             log.debug("Entry %s already exists" % (group_DN))
 
@@ -133,12 +91,12 @@ def test_ticket47560(topology):
         entry.setValues('cn', 'member')
         entry.setValues('sn', 'member')
         try:
-            topology.standalone.add_s(entry)
+            topology_st.standalone.add_s(entry)
         except ldap.ALREADY_EXISTS:
             log.debug("Entry %s already exists" % (member_DN))
 
         replace = [(ldap.MOD_REPLACE, 'memberof', group_DN)]
-        topology.standalone.modify_s(member_DN, replace)
+        topology_st.standalone.modify_s(member_DN, replace)
 
         #
         # enable the memberof plugin and restart the instance
@@ -149,12 +107,12 @@ def test_ticket47560(topology):
         # check memberof attribute is still present
         #
         filt = 'uid=member'
-        ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
+        ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
         assert len(ents) == 1
         ent = ents[0]
-        #print ent
+        # print ent
         value = ent.getValue('memberof')
-        #print "memberof: %s" % (value)
+        # print "memberof: %s" % (value)
         assert value == group_DN
 
     def _test_ticket47560_teardown():
@@ -166,11 +124,11 @@ def test_ticket47560(topology):
         log.debug("-------- > _test_ticket47560_teardown\n")
         # remove the entries group_DN and member_DN
         try:
-            topology.standalone.delete_s(group_DN)
+            topology_st.standalone.delete_s(group_DN)
         except:
             log.warning("Entry %s fail to delete" % (group_DN))
         try:
-            topology.standalone.delete_s(member_DN)
+            topology_st.standalone.delete_s(member_DN)
         except:
             log.warning("Entry %s fail to delete" % (member_DN))
         #
@@ -178,7 +136,7 @@ def test_ticket47560(topology):
         #
         _enable_disable_mbo('off')
 
-    group_DN  = "cn=group,%s"   % (SUFFIX)
+    group_DN = "cn=group,%s" % (SUFFIX)
     member_DN = "uid=member,%s" % (SUFFIX)
 
     #
@@ -194,15 +152,15 @@ def test_ticket47560(topology):
     log.debug("-------- > Start ticket tests\n")
 
     filt = 'uid=member'
-    ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
+    ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
     assert len(ents) == 1
     ent = ents[0]
     log.debug("Unfixed entry %r\n" % ent)
 
     # run the fixup task
-    topology.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True})
+    topology_st.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True})
 
-    ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
+    ents = topology_st.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt)
     assert len(ents) == 1
     ent = ents[0]
     log.debug("Fixed entry %r\n" % ent)
@@ -228,4 +186,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 48 - 137
dirsrvtests/tests/tickets/ticket47573_test.py

@@ -11,17 +11,15 @@ Created on Nov 7, 2013
 
 @author: tbordaz
 '''
-import os
-import sys
+import logging
+import re
 import time
+
 import ldap
-import logging
 import pytest
-import re
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m1c1
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
@@ -30,19 +28,10 @@ TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
 ENTRY_DN = "cn=test_entry, %s" % SUFFIX
 
 MUST_OLD = "(postalAddress $ preferredLocale $ telexNumber)"
-MAY_OLD  = "(postalCode $ street)"
+MAY_OLD = "(postalCode $ street)"
 
 MUST_NEW = "(postalAddress $ preferredLocale)"
-MAY_NEW  = "(telexNumber $ postalCode $ street)"
-
-
-class TopologyMasterConsumer(object):
-    def __init__(self, master, consumer):
-        master.open()
-        self.master = master
-
-        consumer.open()
-        self.consumer = consumer
+MAY_NEW = "(telexNumber $ postalCode $ street)"
 
 
 def pattern_errorlog(file, log_pattern):
@@ -70,9 +59,9 @@ def pattern_errorlog(file, log_pattern):
 
 
 def _oc_definition(oid_ext, name, must=None, may=None):
-    oid  = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
+    oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
     desc = 'To test ticket 47573'
-    sup  = 'person'
+    sup = 'person'
     if not must:
         must = MUST_OLD
     if not may:
@@ -94,7 +83,7 @@ def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None,
     instance.schema.add_schema('objectClasses', new_oc)
 
 
-def trigger_schema_push(topology):
+def trigger_schema_push(topology_m1c1):
     """
         It triggers an update on the supplier. This will start a replication
         session and a schema push
@@ -104,13 +93,14 @@ def trigger_schema_push(topology):
     except AttributeError:
         trigger_schema_push.value = 1
     replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_schema_push.value))]
-    topology.master.modify_s(ENTRY_DN, replace)
+    topology_m1c1.ms["master1"].modify_s(ENTRY_DN, replace)
 
     # wait 10 seconds that the update is replicated
     loop = 0
     while loop <= 10:
         try:
-            ent = topology.consumer.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
+            ent = topology_m1c1.cs["consumer1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)",
+                                                         ['telephonenumber'])
             val = ent.telephonenumber or "0"
             if int(val) == trigger_schema_push.value:
                 return
@@ -123,104 +113,24 @@ def trigger_schema_push(topology):
             loop += 1
 
 
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to create a replicated topology for the 'module'.
-        The replicated topology is MASTER -> Consumer.
-    '''
-    master   = DirSrv(verbose=False)
-    consumer = DirSrv(verbose=False)
-
-    # Args for the master instance
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_master = args_instance.copy()
-    master.allocate(args_master)
-
-    # Args for the consumer instance
-    args_instance[SER_HOST] = HOST_CONSUMER_1
-    args_instance[SER_PORT] = PORT_CONSUMER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
-    args_consumer = args_instance.copy()
-    consumer.allocate(args_consumer)
-
-    # Get the status of the instance
-    instance_master = master.exists()
-    instance_consumer = consumer.exists()
-
-    # Remove all the instances
-    if instance_master:
-        master.delete()
-    if instance_consumer:
-        consumer.delete()
-
-    # Create the instances
-    master.create()
-    master.open()
-    consumer.create()
-    consumer.open()
-
-    #
-    # Now prepare the Master-Consumer topology
-    #
-    # First Enable replication
-    master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-    consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
-
-    # Initialize the supplier->consumer
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
-
-    if not repl_agreement:
-        log.fatal("Fail to create a replica agreement")
-        sys.exit(1)
-
-    log.debug("%s created" % repl_agreement)
-    master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
-    master.waitForReplInit(repl_agreement)
-
-    # Check replication is working fine
-    if master.testReplication(DEFAULT_SUFFIX, consumer):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    def fin():
-        master.delete()
-        consumer.delete()
-    request.addfinalizer(fin)
-
-    # Here we have two instances master and consumer
-    # with replication working.
-    return TopologyMasterConsumer(master, consumer)
-
-
-def test_ticket47573_init(topology):
+def test_ticket47573_init(topology_m1c1):
     """
         Initialize the test environment
     """
-    log.debug("test_ticket47573_init topology %r (master %r, consumer %r" %
-              (topology, topology.master, topology.consumer))
+    log.debug("test_ticket47573_init topology_m1c1 %r (master %r, consumer %r" %
+              (topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"]))
     # the test case will check if a warning message is logged in the
     # error log of the supplier
-    topology.master.errorlog_file = open(topology.master.errlog, "r")
+    topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r")
 
     # This entry will be used to trigger attempt of schema push
-    topology.master.add_s(Entry((ENTRY_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn': 'test_entry',
-                                            'cn': 'test_entry'})))
+    topology_m1c1.ms["master1"].add_s(Entry((ENTRY_DN, {
+        'objectclass': "top person".split(),
+        'sn': 'test_entry',
+        'cn': 'test_entry'})))
 
 
-def test_ticket47573_one(topology):
+def test_ticket47573_one(topology_m1c1):
     """
         Summary: Add a custom OC with MUST and MAY
             MUST = postalAddress $ preferredLocale
@@ -231,17 +141,18 @@ def test_ticket47573_one(topology):
             - consumer +OCwithMayAttr
 
     """
-    log.debug("test_ticket47573_one topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer))
+    log.debug("test_ticket47573_one topology_m1c1 %r (master %r, consumer %r" % (
+    topology_m1c1, topology_m1c1.ms["master1"], topology_m1c1.cs["consumer1"]))
     # update the schema of the supplier so that it is a superset of
     # consumer. Schema should be pushed
     new_oc = _oc_definition(2, 'OCwithMayAttr',
-                            must = MUST_OLD,
-                            may  = MAY_OLD)
-    topology.master.schema.add_schema('objectClasses', new_oc)
+                            must=MUST_OLD,
+                            may=MAY_OLD)
+    topology_m1c1.ms["master1"].schema.add_schema('objectClasses', new_oc)
 
-    trigger_schema_push(topology)
-    master_schema_csn = topology.master.schema.get_schema_csn()
-    consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+    trigger_schema_push(topology_m1c1)
+    master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+    consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
 
     # Check the schemaCSN was updated on the consumer
     log.debug("test_ticket47573_one master_schema_csn=%s", master_schema_csn)
@@ -250,11 +161,11 @@ def test_ticket47573_one(topology):
 
     # Check the error log of the supplier does not contain an error
     regex = re.compile("must not be overwritten \(set replication log for additional info\)")
-    res = pattern_errorlog(topology.master.errorlog_file, regex)
+    res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
     assert res is None
 
 
-def test_ticket47573_two(topology):
+def test_ticket47573_two(topology_m1c1):
     """
         Summary: Change OCwithMayAttr to move a MAY attribute to a MUST attribute
 
@@ -266,12 +177,13 @@ def test_ticket47573_two(topology):
     """
 
     # Update the objectclass so that a MAY attribute is moved to MUST attribute
-    mod_OC(topology.master, 2, 'OCwithMayAttr', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW)
+    mod_OC(topology_m1c1.ms["master1"], 2, 'OCwithMayAttr', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD,
+           new_may=MAY_NEW)
 
     # now push the scheam
-    trigger_schema_push(topology)
-    master_schema_csn = topology.master.schema.get_schema_csn()
-    consumer_schema_csn = topology.consumer.schema.get_schema_csn()
+    trigger_schema_push(topology_m1c1)
+    master_schema_csn = topology_m1c1.ms["master1"].schema.get_schema_csn()
+    consumer_schema_csn = topology_m1c1.cs["consumer1"].schema.get_schema_csn()
 
     # Check the schemaCSN was NOT updated on the consumer
     log.debug("test_ticket47573_two master_schema_csn=%s", master_schema_csn)
@@ -280,29 +192,29 @@ def test_ticket47573_two(topology):
 
     # Check the error log of the supplier does not contain an error
     regex = re.compile("must not be overwritten \(set replication log for additional info\)")
-    res = pattern_errorlog(topology.master.errorlog_file, regex)
+    res = pattern_errorlog(topology_m1c1.ms["master1"].errorlog_file, regex)
     assert res is None
 
 
-def test_ticket47573_three(topology):
+def test_ticket47573_three(topology_m1c1):
     '''
         Create a entry with OCwithMayAttr OC
     '''
     # Check replication is working fine
     dn = "cn=ticket47573, %s" % SUFFIX
-    topology.master.add_s(Entry((dn,
-                                 {'objectclass': "top person OCwithMayAttr".split(),
-                                  'sn':               'test_repl',
-                                  'cn':               'test_repl',
-                                  'postalAddress':    'here',
-                                  'preferredLocale':  'en',
-                                  'telexNumber':      '12$us$21',
-                                  'postalCode':       '54321'})))
+    topology_m1c1.ms["master1"].add_s(Entry((dn,
+                                             {'objectclass': "top person OCwithMayAttr".split(),
+                                              'sn': 'test_repl',
+                                              'cn': 'test_repl',
+                                              'postalAddress': 'here',
+                                              'preferredLocale': 'en',
+                                              'telexNumber': '12$us$21',
+                                              'postalCode': '54321'})))
     loop = 0
     ent = None
     while loop <= 10:
         try:
-            ent = topology.consumer.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m1c1.cs["consumer1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
             break
         except ldap.NO_SUCH_OBJECT:
             time.sleep(1)
@@ -318,4 +230,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 26 - 118
dirsrvtests/tests/tickets/ticket47619_test.py

@@ -11,16 +11,15 @@ Created on Nov 7, 2013
 
 @author: tbordaz
 '''
-import os
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
 from lib389.properties import *
+from lib389.topologies import topology_m1c1
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
@@ -34,148 +33,58 @@ MAX_OTHERS = 100
 ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber']
 
 
-class TopologyMasterConsumer(object):
-    def __init__(self, master, consumer):
-        master.open()
-        self.master = master
-
-        consumer.open()
-        self.consumer = consumer
-
-    def __repr__(self):
-            return "Master[%s] -> Consumer[%s" % (self.master, self.consumer)
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to create a replicated topology for the 'module'.
-        The replicated topology is MASTER -> Consumer.
-    '''
-    master   = DirSrv(verbose=False)
-    consumer = DirSrv(verbose=False)
-
-    # Args for the master instance
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_master = args_instance.copy()
-    master.allocate(args_master)
-
-    # Args for the consumer instance
-    args_instance[SER_HOST] = HOST_CONSUMER_1
-    args_instance[SER_PORT] = PORT_CONSUMER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
-    args_consumer = args_instance.copy()
-    consumer.allocate(args_consumer)
-
-    # Get the status of the instance
-    instance_master   = master.exists()
-    instance_consumer = consumer.exists()
-
-    # Remove all the instances
-    if instance_master:
-        master.delete()
-    if instance_consumer:
-        consumer.delete()
-
-    # Create the instances
-    master.create()
-    master.open()
-    consumer.create()
-    consumer.open()
-
-    #
-    # Now prepare the Master-Consumer topology
-    #
-    # First Enable replication
-    master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-    consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
-
-    # Initialize the supplier->consumer
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
-
-    if not repl_agreement:
-        log.fatal("Fail to create a replica agreement")
-        sys.exit(1)
-
-    log.debug("%s created" % repl_agreement)
-    master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
-    master.waitForReplInit(repl_agreement)
-
-    # Check replication is working fine
-    if master.testReplication(DEFAULT_SUFFIX, consumer):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    def fin():
-        master.delete()
-        consumer.delete()
-    request.addfinalizer(fin)
-
-    # Here we have two instances master and consumer
-    # with replication working.
-    return TopologyMasterConsumer(master, consumer)
-
-
-def test_ticket47619_init(topology):
+def test_ticket47619_init(topology_m1c1):
     """
         Initialize the test environment
     """
-    topology.master.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
-    #topology.master.plugins.enable(name=PLUGIN_MEMBER_OF)
-    #topology.master.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
-    topology.master.stop(timeout=10)
-    topology.master.start(timeout=10)
+    topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+    # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_MEMBER_OF)
+    # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+    topology_m1c1.ms["master1"].stop(timeout=10)
+    topology_m1c1.ms["master1"].start(timeout=10)
 
-    topology.master.log.info("test_ticket47619_init topology %r" % (topology))
+    topology_m1c1.ms["master1"].log.info("test_ticket47619_init topology_m1c1 %r" % (topology_m1c1))
     # the test case will check if a warning message is logged in the
     # error log of the supplier
-    topology.master.errorlog_file = open(topology.master.errlog, "r")
+    topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r")
 
     # add dummy entries
     for cpt in range(MAX_OTHERS):
         name = "%s%d" % (OTHER_NAME, cpt)
-        topology.master.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
-                                            'objectclass': "top person".split(),
-                                            'sn': name,
-                                            'cn': name})))
+        topology_m1c1.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+            'objectclass': "top person".split(),
+            'sn': name,
+            'cn': name})))
 
-    topology.master.log.info("test_ticket47619_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS-1))
+    topology_m1c1.ms["master1"].log.info(
+        "test_ticket47619_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1))
 
     # Check the number of entries in the retro changelog
     time.sleep(2)
-    ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
+    ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
     assert len(ents) == MAX_OTHERS
 
 
-def test_ticket47619_create_index(topology):
+def test_ticket47619_create_index(topology_m1c1):
     args = {INDEX_TYPE: 'eq'}
     for attr in ATTRIBUTES:
-        topology.master.index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args)
-    topology.master.restart(timeout=10)
+        topology_m1c1.ms["master1"].index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args)
+    topology_m1c1.ms["master1"].restart(timeout=10)
 
 
-def test_ticket47619_reindex(topology):
+def test_ticket47619_reindex(topology_m1c1):
     '''
     Reindex all the attributes in ATTRIBUTES
     '''
     args = {TASK_WAIT: True}
     for attr in ATTRIBUTES:
-        rc = topology.master.tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args)
+        rc = topology_m1c1.ms["master1"].tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args)
         assert rc == 0
 
 
-def test_ticket47619_check_indexed_search(topology):
+def test_ticket47619_check_indexed_search(topology_m1c1):
     for attr in ATTRIBUTES:
-        ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr)
+        ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr)
         assert len(ents) == 0
 
 
@@ -184,4 +93,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 15 - 60
dirsrvtests/tests/tickets/ticket47640_test.py

@@ -6,59 +6,16 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
 
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47640(topology):
+def test_ticket47640(topology_st):
     '''
     Linked Attrs Plugins - verify that if the plugin fails to update the link entry
     that the entire operation is aborted
@@ -66,25 +23,25 @@ def test_ticket47640(topology):
 
     # Enable Dynamic plugins, and the linked Attrs plugin
     try:
-        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+        topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
     except ldap.LDAPError as e:
         ldap.fatal('Failed to enable dynamic plugin!' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
+        topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)
     except ValueError as e:
         ldap.fatal('Failed to enable linked attributes plugin!' + e.message['desc'])
         assert False
 
     # Add the plugin config entry
     try:
-        topology.standalone.add_s(Entry(('cn=manager link,cn=Linked Attributes,cn=plugins,cn=config', {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'cn': 'Manager Link',
-                          'linkType': 'seeAlso',
-                          'managedType': 'seeAlso'
-                          })))
+        topology_st.standalone.add_s(Entry(('cn=manager link,cn=Linked Attributes,cn=plugins,cn=config', {
+            'objectclass': 'top extensibleObject'.split(),
+            'cn': 'Manager Link',
+            'linkType': 'seeAlso',
+            'managedType': 'seeAlso'
+        })))
     except ldap.LDAPError as e:
         log.fatal('Failed to add linked attr config entry: error ' + e.message['desc'])
         assert False
@@ -92,11 +49,11 @@ def test_ticket47640(topology):
     # Add an entry who has a link to an entry that does not exist
     OP_REJECTED = False
     try:
-        topology.standalone.add_s(Entry(('uid=manager,' + DEFAULT_SUFFIX, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'manager',
-                          'seeAlso': 'uid=user,dc=example,dc=com'
-                          })))
+        topology_st.standalone.add_s(Entry(('uid=manager,' + DEFAULT_SUFFIX, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'manager',
+            'seeAlso': 'uid=user,dc=example,dc=com'
+        })))
     except ldap.UNWILLING_TO_PERFORM:
         # Success
         log.info('Add operation correctly rejected.')
@@ -117,5 +74,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-
-

+ 103 - 217
dirsrvtests/tests/tickets/ticket47653MMR_test.py

@@ -11,47 +11,39 @@ Created on Nov 7, 2013
 
 @author: tbordaz
 '''
-import os
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
 TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
 OC_NAME = 'OCticket47653'
 MUST = "(postalAddress $ postalCode)"
-MAY  = "(member $ street)"
+MAY = "(member $ street)"
 
 OTHER_NAME = 'other_entry'
 MAX_OTHERS = 10
 
-BIND_NAME  = 'bind_entry'
-BIND_DN    = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW    = 'password'
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
 
 ENTRY_NAME = 'test_entry'
-ENTRY_DN   = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
-ENTRY_OC   = "top person %s" % OC_NAME
+ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
+ENTRY_OC = "top person %s" % OC_NAME
 
 
 def _oc_definition(oid_ext, name, must=None, may=None):
-    oid  = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
+    oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
     desc = 'To test ticket 47490'
-    sup  = 'person'
+    sup = 'person'
     if not must:
         must = MUST
     if not may:
@@ -61,114 +53,7 @@ def _oc_definition(oid_ext, name, must=None, may=None):
     return new_oc
 
 
-class TopologyMaster1Master2(object):
-    def __init__(self, master1, master2):
-        master1.open()
-        self.master1 = master1
-
-        master2.open()
-        self.master2 = master2
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to create a replicated topology for the 'module'.
-        The replicated topology is MASTER1 <-> Master2.
-    '''
-    global installation1_prefix
-    global installation2_prefix
-
-    # allocate master1 on a given deployement
-    master1 = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Args for the master1 instance
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_master = args_instance.copy()
-    master1.allocate(args_master)
-
-    # allocate master1 on a given deployement
-    master2 = DirSrv(verbose=False)
-    if installation2_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
-    # Args for the consumer instance
-    args_instance[SER_HOST] = HOST_MASTER_2
-    args_instance[SER_PORT] = PORT_MASTER_2
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
-    args_master = args_instance.copy()
-    master2.allocate(args_master)
-
-    # Get the status of the instance and restart it if it exists
-    instance_master1 = master1.exists()
-    instance_master2 = master2.exists()
-
-    # Remove all the instances
-    if instance_master1:
-        master1.delete()
-    if instance_master2:
-        master2.delete()
-
-    # Create the instances
-    master1.create()
-    master1.open()
-    master2.create()
-    master2.open()
-
-    #
-    # Now prepare the Master-Consumer topology
-    #
-    # First Enable replication
-    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
-    # Initialize the supplier->consumer
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
-    if not repl_agreement:
-        log.fatal("Fail to create a replica agreement")
-        sys.exit(1)
-
-    log.debug("%s created" % repl_agreement)
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
-    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    master1.waitForReplInit(repl_agreement)
-
-    # Check replication is working fine
-    if master1.testReplication(DEFAULT_SUFFIX, master2):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    def fin():
-        master1.delete()
-        master2.delete()
-    request.addfinalizer(fin)
-
-    # Here we have two instances master and consumer
-    # with replication working.
-    return TopologyMaster1Master2(master1, master2)
-
-
-def test_ticket47653_init(topology):
+def test_ticket47653_init(topology_m2):
     """
         It adds
            - Objectclass with MAY 'member'
@@ -177,38 +62,38 @@ def test_ticket47653_init(topology):
 
     """
 
-    topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME)
+    topology_m2.ms["master1"].log.info("Add %s that allows 'member' attribute" % OC_NAME)
     new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY)
-    topology.master1.schema.add_schema('objectClasses', new_oc)
+    topology_m2.ms["master1"].schema.add_schema('objectClasses', new_oc)
 
     # entry used to bind with
-    topology.master1.log.info("Add %s" % BIND_DN)
-    topology.master1.add_s(Entry((BIND_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           BIND_NAME,
-                                            'cn':           BIND_NAME,
-                                            'userpassword': BIND_PW})))
+    topology_m2.ms["master1"].log.info("Add %s" % BIND_DN)
+    topology_m2.ms["master1"].add_s(Entry((BIND_DN, {
+        'objectclass': "top person".split(),
+        'sn': BIND_NAME,
+        'cn': BIND_NAME,
+        'userpassword': BIND_PW})))
 
     # enable acl error logging
     mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))]  # ACL + REPL
-    topology.master1.modify_s(DN_CONFIG, mod)
-    topology.master2.modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
 
     # remove all aci's and start with a clean slate
     mod = [(ldap.MOD_DELETE, 'aci', None)]
-    topology.master1.modify_s(SUFFIX, mod)
-    topology.master2.modify_s(SUFFIX, mod)
+    topology_m2.ms["master1"].modify_s(SUFFIX, mod)
+    topology_m2.ms["master2"].modify_s(SUFFIX, mod)
 
     # add dummy entries
     for cpt in range(MAX_OTHERS):
         name = "%s%d" % (OTHER_NAME, cpt)
-        topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
-                                            'objectclass': "top person".split(),
-                                            'sn': name,
-                                            'cn': name})))
+        topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+            'objectclass': "top person".split(),
+            'sn': name,
+            'cn': name})))
 
 
-def test_ticket47653_add(topology):
+def test_ticket47653_add(topology_m2):
     '''
         This test ADD an entry on MASTER1 where 47653 is fixed. Then it checks that entry is replicated
         on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 and check the update on MASTER1
@@ -218,11 +103,11 @@ def test_ticket47653_add(topology):
             - with the proper ACI we can not ADD with 'member' attribute
             - with the proper ACI and 'member' it succeeds to ADD
     '''
-    topology.master1.log.info("\n\n######################### ADD ######################\n")
+    topology_m2.ms["master1"].log.info("\n\n######################### ADD ######################\n")
 
     # bind as bind_entry
-    topology.master1.log.info("Bind as %s" % BIND_DN)
-    topology.master1.simple_bind_s(BIND_DN, BIND_PW)
+    topology_m2.ms["master1"].log.info("Bind as %s" % BIND_DN)
+    topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW)
 
     # Prepare the entry with multivalued members
     entry_with_members = Entry(ENTRY_DN)
@@ -251,68 +136,68 @@ def test_ticket47653_add(topology):
 
     # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS
     try:
-        topology.master1.log.info("Try to add Add  %s (aci is missing): %r" % (ENTRY_DN, entry_with_member))
+        topology_m2.ms["master1"].log.info("Try to add Add  %s (aci is missing): %r" % (ENTRY_DN, entry_with_member))
 
-        topology.master1.add_s(entry_with_member)
+        topology_m2.ms["master1"].add_s(entry_with_member)
     except Exception as e:
-        topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+        topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
         assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
 
     # Ok Now add the proper ACI
-    topology.master1.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM)
-    topology.master1.simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master1"].log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM)
+    topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
 
-    ACI_TARGET       = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+    ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
     ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
-    ACI_ALLOW        = "(version 3.0; acl \"SelfDN add\"; allow (add)"
-    ACI_SUBJECT      = " userattr = \"member#selfDN\";)"
-    ACI_BODY         = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+    ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)"
+    ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+    ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
     mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
-    topology.master1.modify_s(SUFFIX, mod)
+    topology_m2.ms["master1"].modify_s(SUFFIX, mod)
     time.sleep(1)
 
     # bind as bind_entry
-    topology.master1.log.info("Bind as %s" % BIND_DN)
-    topology.master1.simple_bind_s(BIND_DN, BIND_PW)
+    topology_m2.ms["master1"].log.info("Bind as %s" % BIND_DN)
+    topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW)
 
     # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS
     try:
-        topology.master1.log.info("Try to add Add  %s (member is missing)" % ENTRY_DN)
-        topology.master1.add_s(Entry((ENTRY_DN, {
-                                            'objectclass':      ENTRY_OC.split(),
-                                            'sn':               ENTRY_NAME,
-                                            'cn':               ENTRY_NAME,
-                                            'postalAddress':    'here',
-                                            'postalCode':       '1234'})))
+        topology_m2.ms["master1"].log.info("Try to add Add  %s (member is missing)" % ENTRY_DN)
+        topology_m2.ms["master1"].add_s(Entry((ENTRY_DN, {
+            'objectclass': ENTRY_OC.split(),
+            'sn': ENTRY_NAME,
+            'cn': ENTRY_NAME,
+            'postalAddress': 'here',
+            'postalCode': '1234'})))
     except Exception as e:
-        topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+        topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
         assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
 
     # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS
     # member should contain only one value
     try:
-        topology.master1.log.info("Try to add Add  %s (with several member values)" % ENTRY_DN)
-        topology.master1.add_s(entry_with_members)
+        topology_m2.ms["master1"].log.info("Try to add Add  %s (with several member values)" % ENTRY_DN)
+        topology_m2.ms["master1"].add_s(entry_with_members)
     except Exception as e:
-        topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+        topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
         assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
 
-    topology.master1.log.info("Try to add Add  %s should be successful" % ENTRY_DN)
+    topology_m2.ms["master1"].log.info("Try to add Add  %s should be successful" % ENTRY_DN)
     try:
-        topology.master1.add_s(entry_with_member)
+        topology_m2.ms["master1"].add_s(entry_with_member)
     except ldap.LDAPError as e:
-        topology.master1.log.info("Failed to add entry,  error: " + e.message['desc'])
+        topology_m2.ms["master1"].log.info("Failed to add entry,  error: " + e.message['desc'])
         assert False
 
     #
     # Now check the entry as been replicated
     #
-    topology.master2.simple_bind_s(DN_DM, PASSWORD)
-    topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN)
+    topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master1"].log.info("Try to retrieve %s from Master2" % ENTRY_DN)
     loop = 0
     while loop <= 10:
         try:
-            ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
             break
         except ldap.NO_SUCH_OBJECT:
             time.sleep(1)
@@ -320,15 +205,15 @@ def test_ticket47653_add(topology):
     assert loop <= 10
 
     # Now update the entry on Master2 (as DM because 47653 is possibly not fixed on M2)
-    topology.master1.log.info("Update  %s on M2" % ENTRY_DN)
+    topology_m2.ms["master1"].log.info("Update  %s on M2" % ENTRY_DN)
     mod = [(ldap.MOD_REPLACE, 'description', 'test_add')]
-    topology.master2.modify_s(ENTRY_DN, mod)
+    topology_m2.ms["master2"].modify_s(ENTRY_DN, mod)
 
-    topology.master1.simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
     loop = 0
     while loop <= 10:
         try:
-            ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m2.ms["master1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
             if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'):
                 break
         except ldap.NO_SUCH_OBJECT:
@@ -338,7 +223,7 @@ def test_ticket47653_add(topology):
     assert ent.getValue('description') == 'test_add'
 
 
-def test_ticket47653_modify(topology):
+def test_ticket47653_modify(topology_m2):
     '''
         This test MOD an entry on MASTER1 where 47653 is fixed. Then it checks that update is replicated
         on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 (bound as BIND_DN).
@@ -349,59 +234,59 @@ def test_ticket47653_modify(topology):
             - adding the ACI, we can modify the entry
     '''
     # bind as bind_entry
-    topology.master1.log.info("Bind as %s" % BIND_DN)
-    topology.master1.simple_bind_s(BIND_DN, BIND_PW)
+    topology_m2.ms["master1"].log.info("Bind as %s" % BIND_DN)
+    topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW)
 
-    topology.master1.log.info("\n\n######################### MODIFY ######################\n")
+    topology_m2.ms["master1"].log.info("\n\n######################### MODIFY ######################\n")
 
     # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS
     try:
-        topology.master1.log.info("Try to modify  %s (aci is missing)" % ENTRY_DN)
+        topology_m2.ms["master1"].log.info("Try to modify  %s (aci is missing)" % ENTRY_DN)
         mod = [(ldap.MOD_REPLACE, 'postalCode', '9876')]
-        topology.master1.modify_s(ENTRY_DN, mod)
+        topology_m2.ms["master1"].modify_s(ENTRY_DN, mod)
     except Exception as e:
-        topology.master1.log.info("Exception (expected): %s" % type(e).__name__)
+        topology_m2.ms["master1"].log.info("Exception (expected): %s" % type(e).__name__)
         assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
 
     # Ok Now add the proper ACI
-    topology.master1.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM)
-    topology.master1.simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master1"].log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM)
+    topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
 
-    ACI_TARGET       = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
-    ACI_TARGETATTR   = "(targetattr = *)"
+    ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+    ACI_TARGETATTR = "(targetattr = *)"
     ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
-    ACI_ALLOW        = "(version 3.0; acl \"SelfDN write\"; allow (write)"
-    ACI_SUBJECT      = " userattr = \"member#selfDN\";)"
-    ACI_BODY         = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+    ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)"
+    ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+    ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
     mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
-    topology.master1.modify_s(SUFFIX, mod)
+    topology_m2.ms["master1"].modify_s(SUFFIX, mod)
     time.sleep(1)
 
     # bind as bind_entry
-    topology.master1.log.info("M1: Bind as %s" % BIND_DN)
-    topology.master1.simple_bind_s(BIND_DN, BIND_PW)
+    topology_m2.ms["master1"].log.info("M1: Bind as %s" % BIND_DN)
+    topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW)
 
     # modify the entry and checks the value
-    topology.master1.log.info("M1: Try to modify  %s. It should succeeds" % ENTRY_DN)
+    topology_m2.ms["master1"].log.info("M1: Try to modify  %s. It should succeeds" % ENTRY_DN)
     mod = [(ldap.MOD_REPLACE, 'postalCode', '1928')]
-    topology.master1.modify_s(ENTRY_DN, mod)
+    topology_m2.ms["master1"].modify_s(ENTRY_DN, mod)
 
-    topology.master1.log.info("M1: Bind as %s" % DN_DM)
-    topology.master1.simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master1"].log.info("M1: Bind as %s" % DN_DM)
+    topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
 
-    topology.master1.log.info("M1: Check the update of %s" % ENTRY_DN)
-    ents = topology.master1.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
+    topology_m2.ms["master1"].log.info("M1: Check the update of %s" % ENTRY_DN)
+    ents = topology_m2.ms["master1"].search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
     assert len(ents) == 1
     assert ents[0].postalCode == '1928'
 
     # Now check the update has been replicated on M2
-    topology.master1.log.info("M2: Bind as %s" % DN_DM)
-    topology.master2.simple_bind_s(DN_DM, PASSWORD)
-    topology.master1.log.info("M2: Try to retrieve %s" % ENTRY_DN)
+    topology_m2.ms["master1"].log.info("M2: Bind as %s" % DN_DM)
+    topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master1"].log.info("M2: Try to retrieve %s" % ENTRY_DN)
     loop = 0
     while loop <= 10:
         try:
-            ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
             if ent.hasAttr('postalCode') and (ent.getValue('postalCode') == '1928'):
                 break
         except ldap.NO_SUCH_OBJECT:
@@ -411,29 +296,30 @@ def test_ticket47653_modify(topology):
     assert ent.getValue('postalCode') == '1928'
 
     # Now update the entry on Master2 bound as BIND_DN (update may fail if  47653 is  not fixed on M2)
-    topology.master1.log.info("M2: Update  %s (bound as %s)" % (ENTRY_DN, BIND_DN))
-    topology.master2.simple_bind_s(BIND_DN, PASSWORD)
+    topology_m2.ms["master1"].log.info("M2: Update  %s (bound as %s)" % (ENTRY_DN, BIND_DN))
+    topology_m2.ms["master2"].simple_bind_s(BIND_DN, PASSWORD)
     fail = False
     try:
         mod = [(ldap.MOD_REPLACE, 'postalCode', '1929')]
-        topology.master2.modify_s(ENTRY_DN, mod)
+        topology_m2.ms["master2"].modify_s(ENTRY_DN, mod)
         fail = False
     except ldap.INSUFFICIENT_ACCESS:
-        topology.master1.log.info("M2: Exception (INSUFFICIENT_ACCESS): that is fine the bug is possibly not fixed on M2")
+        topology_m2.ms["master1"].log.info(
+            "M2: Exception (INSUFFICIENT_ACCESS): that is fine the bug is possibly not fixed on M2")
         fail = True
     except Exception as e:
-        topology.master1.log.info("M2: Exception (not expected): %s" % type(e).__name__)
+        topology_m2.ms["master1"].log.info("M2: Exception (not expected): %s" % type(e).__name__)
         assert 0
 
     if not fail:
         # Check the update has been replicaed on M1
-        topology.master1.log.info("M1: Bind as %s" % DN_DM)
-        topology.master1.simple_bind_s(DN_DM, PASSWORD)
-        topology.master1.log.info("M1: Check %s.postalCode=1929)" % (ENTRY_DN))
+        topology_m2.ms["master1"].log.info("M1: Bind as %s" % DN_DM)
+        topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
+        topology_m2.ms["master1"].log.info("M1: Check %s.postalCode=1929)" % (ENTRY_DN))
         loop = 0
         while loop <= 10:
             try:
-                ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+                ent = topology_m2.ms["master1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
                 if ent.hasAttr('postalCode') and (ent.getValue('postalCode') == '1929'):
                     break
             except ldap.NO_SUCH_OBJECT:

+ 112 - 154
dirsrvtests/tests/tickets/ticket47653_test.py

@@ -6,39 +6,36 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
+import ldap
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
 OC_NAME = 'OCticket47653'
 MUST = "(postalAddress $ postalCode)"
-MAY  = "(member $ street)"
+MAY = "(member $ street)"
 
 OTHER_NAME = 'other_entry'
 MAX_OTHERS = 10
 
-BIND_NAME  = 'bind_entry'
-BIND_DN    = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW    = 'password'
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
 
 ENTRY_NAME = 'test_entry'
-ENTRY_DN   = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
-ENTRY_OC   = "top person %s" % OC_NAME
+ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
+ENTRY_OC = "top person %s" % OC_NAME
 
 
 def _oc_definition(oid_ext, name, must=None, may=None):
-    oid  = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
+    oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
     desc = 'To test ticket 47490'
-    sup  = 'person'
+    sup = 'person'
     if not must:
         must = MUST
     if not may:
@@ -48,45 +45,7 @@ def _oc_definition(oid_ext, name, must=None, may=None):
     return new_oc
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47653_init(topology):
+def test_ticket47653_init(topology_st):
     """
         It adds
            - Objectclass with MAY 'member'
@@ -95,47 +54,47 @@ def test_ticket47653_init(topology):
 
     """
 
-    topology.standalone.log.info("Add %s that allows 'member' attribute" % OC_NAME)
+    topology_st.standalone.log.info("Add %s that allows 'member' attribute" % OC_NAME)
     new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY)
-    topology.standalone.schema.add_schema('objectClasses', new_oc)
+    topology_st.standalone.schema.add_schema('objectClasses', new_oc)
 
     # entry used to bind with
-    topology.standalone.log.info("Add %s" % BIND_DN)
-    topology.standalone.add_s(Entry((BIND_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           BIND_NAME,
-                                            'cn':           BIND_NAME,
-                                            'userpassword': BIND_PW})))
+    topology_st.standalone.log.info("Add %s" % BIND_DN)
+    topology_st.standalone.add_s(Entry((BIND_DN, {
+        'objectclass': "top person".split(),
+        'sn': BIND_NAME,
+        'cn': BIND_NAME,
+        'userpassword': BIND_PW})))
 
     # enable acl error logging
     mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')]
-    topology.standalone.modify_s(DN_CONFIG, mod)
+    topology_st.standalone.modify_s(DN_CONFIG, mod)
 
     # Remove aci's to start with a clean slate
     mod = [(ldap.MOD_DELETE, 'aci', None)]
-    topology.standalone.modify_s(SUFFIX, mod)
+    topology_st.standalone.modify_s(SUFFIX, mod)
 
     # add dummy entries
     for cpt in range(MAX_OTHERS):
         name = "%s%d" % (OTHER_NAME, cpt)
-        topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
-                                            'objectclass': "top person".split(),
-                                            'sn': name,
-                                            'cn': name})))
+        topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+            'objectclass': "top person".split(),
+            'sn': name,
+            'cn': name})))
 
 
-def test_ticket47653_add(topology):
+def test_ticket47653_add(topology_st):
     '''
         It checks that, bound as bind_entry,
             - we can not ADD an entry without the proper SELFDN aci.
             - with the proper ACI we can not ADD with 'member' attribute
             - with the proper ACI and 'member' it succeeds to ADD
     '''
-    topology.standalone.log.info("\n\n######################### ADD ######################\n")
+    topology_st.standalone.log.info("\n\n######################### ADD ######################\n")
 
     # bind as bind_entry
-    topology.standalone.log.info("Bind as %s" % BIND_DN)
-    topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+    topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+    topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
 
     # Prepare the entry with multivalued members
     entry_with_members = Entry(ENTRY_DN)
@@ -164,182 +123,181 @@ def test_ticket47653_add(topology):
 
     # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS
     try:
-        topology.standalone.log.info("Try to add Add  %s (aci is missing): %r" % (ENTRY_DN, entry_with_member))
+        topology_st.standalone.log.info("Try to add Add  %s (aci is missing): %r" % (ENTRY_DN, entry_with_member))
 
-        topology.standalone.add_s(entry_with_member)
+        topology_st.standalone.add_s(entry_with_member)
     except Exception as e:
-        topology.standalone.log.info("Exception (expected): %s" % type(e).__name__)
+        topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__)
         assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
 
     # Ok Now add the proper ACI
-    topology.standalone.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
-    ACI_TARGET       = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+    ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
     ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
-    ACI_ALLOW        = "(version 3.0; acl \"SelfDN add\"; allow (add)"
-    ACI_SUBJECT      = " userattr = \"member#selfDN\";)"
-    ACI_BODY         = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+    ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)"
+    ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+    ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
     mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
-    topology.standalone.modify_s(SUFFIX, mod)
+    topology_st.standalone.modify_s(SUFFIX, mod)
 
     # bind as bind_entry
-    topology.standalone.log.info("Bind as %s" % BIND_DN)
-    topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+    topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+    topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
 
     # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS
     try:
-        topology.standalone.log.info("Try to add Add  %s (member is missing)" % ENTRY_DN)
-        topology.standalone.add_s(Entry((ENTRY_DN, {
-                                            'objectclass':      ENTRY_OC.split(),
-                                            'sn':               ENTRY_NAME,
-                                            'cn':               ENTRY_NAME,
-                                            'postalAddress':    'here',
-                                            'postalCode':       '1234'})))
+        topology_st.standalone.log.info("Try to add Add  %s (member is missing)" % ENTRY_DN)
+        topology_st.standalone.add_s(Entry((ENTRY_DN, {
+            'objectclass': ENTRY_OC.split(),
+            'sn': ENTRY_NAME,
+            'cn': ENTRY_NAME,
+            'postalAddress': 'here',
+            'postalCode': '1234'})))
     except Exception as e:
-        topology.standalone.log.info("Exception (expected): %s" % type(e).__name__)
+        topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__)
         assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
 
     # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS
     # member should contain only one value
     try:
-        topology.standalone.log.info("Try to add Add  %s (with several member values)" % ENTRY_DN)
-        topology.standalone.add_s(entry_with_members)
+        topology_st.standalone.log.info("Try to add Add  %s (with several member values)" % ENTRY_DN)
+        topology_st.standalone.add_s(entry_with_members)
     except Exception as e:
-        topology.standalone.log.info("Exception (expected): %s" % type(e).__name__)
+        topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__)
         assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
 
-    topology.standalone.log.info("Try to add Add  %s should be successful" % ENTRY_DN)
-    topology.standalone.add_s(entry_with_member)
+    topology_st.standalone.log.info("Try to add Add  %s should be successful" % ENTRY_DN)
+    topology_st.standalone.add_s(entry_with_member)
 
 
-def test_ticket47653_search(topology):
+def test_ticket47653_search(topology_st):
     '''
         It checks that, bound as bind_entry,
             - we can not search an entry without the proper SELFDN aci.
             - adding the ACI, we can search the entry
     '''
-    topology.standalone.log.info("\n\n######################### SEARCH ######################\n")
+    topology_st.standalone.log.info("\n\n######################### SEARCH ######################\n")
     # bind as bind_entry
-    topology.standalone.log.info("Bind as %s" % BIND_DN)
-    topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+    topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+    topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
 
     # entry to search WITH member being BIND_DN but WITHOUT the ACI -> no entry returned
-    topology.standalone.log.info("Try to search  %s (aci is missing)" % ENTRY_DN)
-    ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
+    topology_st.standalone.log.info("Try to search  %s (aci is missing)" % ENTRY_DN)
+    ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
     assert len(ents) == 0
 
     # Ok Now add the proper ACI
-    topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
-    ACI_TARGET       = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
-    ACI_TARGETATTR   = "(targetattr = *)"
+    ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+    ACI_TARGETATTR = "(targetattr = *)"
     ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
-    ACI_ALLOW        = "(version 3.0; acl \"SelfDN search-read\"; allow (read, search, compare)"
-    ACI_SUBJECT      = " userattr = \"member#selfDN\";)"
-    ACI_BODY         = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+    ACI_ALLOW = "(version 3.0; acl \"SelfDN search-read\"; allow (read, search, compare)"
+    ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+    ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
     mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
-    topology.standalone.modify_s(SUFFIX, mod)
+    topology_st.standalone.modify_s(SUFFIX, mod)
 
     # bind as bind_entry
-    topology.standalone.log.info("Bind as %s" % BIND_DN)
-    topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+    topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+    topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
 
     # entry to search with the proper aci
-    topology.standalone.log.info("Try to search  %s should be successful" % ENTRY_DN)
-    ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
+    topology_st.standalone.log.info("Try to search  %s should be successful" % ENTRY_DN)
+    ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
     assert len(ents) == 1
 
 
-def test_ticket47653_modify(topology):
+def test_ticket47653_modify(topology_st):
     '''
         It checks that, bound as bind_entry,
             - we can not modify an entry without the proper SELFDN aci.
             - adding the ACI, we can modify the entry
     '''
     # bind as bind_entry
-    topology.standalone.log.info("Bind as %s" % BIND_DN)
-    topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+    topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+    topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
 
-    topology.standalone.log.info("\n\n######################### MODIFY ######################\n")
+    topology_st.standalone.log.info("\n\n######################### MODIFY ######################\n")
 
     # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS
     try:
-        topology.standalone.log.info("Try to modify  %s (aci is missing)" % ENTRY_DN)
+        topology_st.standalone.log.info("Try to modify  %s (aci is missing)" % ENTRY_DN)
         mod = [(ldap.MOD_REPLACE, 'postalCode', '9876')]
-        topology.standalone.modify_s(ENTRY_DN, mod)
+        topology_st.standalone.modify_s(ENTRY_DN, mod)
     except Exception as e:
-        topology.standalone.log.info("Exception (expected): %s" % type(e).__name__)
+        topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__)
         assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
 
-
     # Ok Now add the proper ACI
-    topology.standalone.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
-    ACI_TARGET       = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
-    ACI_TARGETATTR   = "(targetattr = *)"
+    ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+    ACI_TARGETATTR = "(targetattr = *)"
     ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
-    ACI_ALLOW        = "(version 3.0; acl \"SelfDN write\"; allow (write)"
-    ACI_SUBJECT      = " userattr = \"member#selfDN\";)"
-    ACI_BODY         = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+    ACI_ALLOW = "(version 3.0; acl \"SelfDN write\"; allow (write)"
+    ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+    ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
     mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
-    topology.standalone.modify_s(SUFFIX, mod)
+    topology_st.standalone.modify_s(SUFFIX, mod)
 
     # bind as bind_entry
-    topology.standalone.log.info("Bind as %s" % BIND_DN)
-    topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+    topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+    topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
 
     # modify the entry and checks the value
-    topology.standalone.log.info("Try to modify  %s. It should succeeds" % ENTRY_DN)
+    topology_st.standalone.log.info("Try to modify  %s. It should succeeds" % ENTRY_DN)
     mod = [(ldap.MOD_REPLACE, 'postalCode', '1928')]
-    topology.standalone.modify_s(ENTRY_DN, mod)
+    topology_st.standalone.modify_s(ENTRY_DN, mod)
 
-    ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
+    ents = topology_st.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*')
     assert len(ents) == 1
     assert ents[0].postalCode == '1928'
 
 
-def test_ticket47653_delete(topology):
+def test_ticket47653_delete(topology_st):
     '''
         It checks that, bound as bind_entry,
             - we can not delete an entry without the proper SELFDN aci.
             - adding the ACI, we can delete the entry
     '''
-    topology.standalone.log.info("\n\n######################### DELETE ######################\n")
+    topology_st.standalone.log.info("\n\n######################### DELETE ######################\n")
 
     # bind as bind_entry
-    topology.standalone.log.info("Bind as %s" % BIND_DN)
-    topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+    topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+    topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
 
     # entry to delete WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS
     try:
-        topology.standalone.log.info("Try to delete  %s (aci is missing)" % ENTRY_DN)
-        topology.standalone.delete_s(ENTRY_DN)
+        topology_st.standalone.log.info("Try to delete  %s (aci is missing)" % ENTRY_DN)
+        topology_st.standalone.delete_s(ENTRY_DN)
     except Exception as e:
-        topology.standalone.log.info("Exception (expected): %s" % type(e).__name__)
+        topology_st.standalone.log.info("Exception (expected): %s" % type(e).__name__)
         assert isinstance(e, ldap.INSUFFICIENT_ACCESS)
 
     # Ok Now add the proper ACI
-    topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
-    ACI_TARGET       = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
+    ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX
     ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME
-    ACI_ALLOW        = "(version 3.0; acl \"SelfDN delete\"; allow (delete)"
-    ACI_SUBJECT      = " userattr = \"member#selfDN\";)"
-    ACI_BODY         = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
+    ACI_ALLOW = "(version 3.0; acl \"SelfDN delete\"; allow (delete)"
+    ACI_SUBJECT = " userattr = \"member#selfDN\";)"
+    ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT
     mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
-    topology.standalone.modify_s(SUFFIX, mod)
+    topology_st.standalone.modify_s(SUFFIX, mod)
 
     # bind as bind_entry
-    topology.standalone.log.info("Bind as %s" % BIND_DN)
-    topology.standalone.simple_bind_s(BIND_DN, BIND_PW)
+    topology_st.standalone.log.info("Bind as %s" % BIND_DN)
+    topology_st.standalone.simple_bind_s(BIND_DN, BIND_PW)
 
     # entry to search with the proper aci
-    topology.standalone.log.info("Try to delete  %s should be successful" % ENTRY_DN)
-    topology.standalone.delete_s(ENTRY_DN)
+    topology_st.standalone.log.info("Try to delete  %s should be successful" % ENTRY_DN)
+    topology_st.standalone.delete_s(ENTRY_DN)
 
 
 if __name__ == '__main__':

+ 63 - 112
dirsrvtests/tests/tickets/ticket47669_test.py

@@ -6,19 +6,11 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
-from ldap.controls import SimplePagedResultsControl
-from ldap.controls.simple import GetEffectiveRightsControl
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -32,48 +24,7 @@ COMPACTDBINTERVAL = 'nsslapd-changelogcompactdb-interval'
 FILTER = '(cn=*)'
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47669_init(topology):
+def test_ticket47669_init(topology_st):
     """
     Add cn=changelog5,cn=config
     Enable cn=Retro Changelog Plugin,cn=plugins,cn=config
@@ -81,36 +32,36 @@ def test_ticket47669_init(topology):
     log.info('Testing Ticket 47669 - Test duration syntax in the changelogs')
 
     # bind as directory manager
-    topology.standalone.log.info("Bind as %s" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.log.info("Bind as %s" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
     try:
-        changelogdir = os.path.join(os.path.dirname(topology.standalone.dbdir), 'changelog')
-        topology.standalone.add_s(Entry((CHANGELOG,
-                                     {'objectclass': 'top extensibleObject'.split(),
-                                      'nsslapd-changelogdir': changelogdir})))
+        changelogdir = os.path.join(os.path.dirname(topology_st.standalone.dbdir), 'changelog')
+        topology_st.standalone.add_s(Entry((CHANGELOG,
+                                            {'objectclass': 'top extensibleObject'.split(),
+                                             'nsslapd-changelogdir': changelogdir})))
     except ldap.LDAPError as e:
         log.error('Failed to add ' + CHANGELOG + ': error ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')])
+        topology_st.standalone.modify_s(RETROCHANGELOG, [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')])
     except ldap.LDAPError as e:
         log.error('Failed to enable ' + RETROCHANGELOG + ': error ' + e.message['desc'])
         assert False
 
     # restart the server
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
 
-def add_and_check(topology, plugin, attr, val, isvalid):
+def add_and_check(topology_st, plugin, attr, val, isvalid):
     """
     Helper function to add/replace attr: val and check the added value
     """
     if isvalid:
         log.info('Test %s: %s -- valid' % (attr, val))
         try:
-            topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
+            topology_st.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
         except ldap.LDAPError as e:
             log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error ' + e.message['desc'])
             assert False
@@ -118,18 +69,18 @@ def add_and_check(topology, plugin, attr, val, isvalid):
         log.info('Test %s: %s -- invalid' % (attr, val))
         if plugin == CHANGELOG:
             try:
-                topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
+                topology_st.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
             except ldap.LDAPError as e:
                 log.error('Expectedly failed to add ' + attr + ': ' + val +
                           ' to ' + plugin + ': error ' + e.message['desc'])
         else:
             try:
-                topology.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
+                topology_st.standalone.modify_s(plugin, [(ldap.MOD_REPLACE, attr, val)])
             except ldap.LDAPError as e:
                 log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error ' + e.message['desc'])
 
     try:
-        entries = topology.standalone.search_s(plugin, ldap.SCOPE_BASE, FILTER, [attr])
+        entries = topology_st.standalone.search_s(plugin, ldap.SCOPE_BASE, FILTER, [attr])
         if isvalid:
             if not entries[0].hasValue(attr, val):
                 log.fatal('%s does not have expected (%s: %s)' % (plugin, attr, val))
@@ -148,86 +99,86 @@ def add_and_check(topology, plugin, attr, val, isvalid):
         assert False
 
 
-def test_ticket47669_changelog_maxage(topology):
+def test_ticket47669_changelog_maxage(topology_st):
     """
     Test nsslapd-changelogmaxage in cn=changelog5,cn=config
     """
     log.info('1. Test nsslapd-changelogmaxage in cn=changelog5,cn=config')
 
     # bind as directory manager
-    topology.standalone.log.info("Bind as %s" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.log.info("Bind as %s" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
-    add_and_check(topology, CHANGELOG, MAXAGE, '12345', True)
-    add_and_check(topology, CHANGELOG, MAXAGE, '10s', True)
-    add_and_check(topology, CHANGELOG, MAXAGE, '30M', True)
-    add_and_check(topology, CHANGELOG, MAXAGE, '12h', True)
-    add_and_check(topology, CHANGELOG, MAXAGE, '2D', True)
-    add_and_check(topology, CHANGELOG, MAXAGE, '4w', True)
-    add_and_check(topology, CHANGELOG, MAXAGE, '-123', False)
-    add_and_check(topology, CHANGELOG, MAXAGE, 'xyz', False)
+    add_and_check(topology_st, CHANGELOG, MAXAGE, '12345', True)
+    add_and_check(topology_st, CHANGELOG, MAXAGE, '10s', True)
+    add_and_check(topology_st, CHANGELOG, MAXAGE, '30M', True)
+    add_and_check(topology_st, CHANGELOG, MAXAGE, '12h', True)
+    add_and_check(topology_st, CHANGELOG, MAXAGE, '2D', True)
+    add_and_check(topology_st, CHANGELOG, MAXAGE, '4w', True)
+    add_and_check(topology_st, CHANGELOG, MAXAGE, '-123', False)
+    add_and_check(topology_st, CHANGELOG, MAXAGE, 'xyz', False)
 
 
-def test_ticket47669_changelog_triminterval(topology):
+def test_ticket47669_changelog_triminterval(topology_st):
     """
     Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config
     """
     log.info('2. Test nsslapd-changelogtrim-interval in cn=changelog5,cn=config')
 
     # bind as directory manager
-    topology.standalone.log.info("Bind as %s" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.log.info("Bind as %s" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
-    add_and_check(topology, CHANGELOG, TRIMINTERVAL, '12345', True)
-    add_and_check(topology, CHANGELOG, TRIMINTERVAL, '10s', True)
-    add_and_check(topology, CHANGELOG, TRIMINTERVAL, '30M', True)
-    add_and_check(topology, CHANGELOG, TRIMINTERVAL, '12h', True)
-    add_and_check(topology, CHANGELOG, TRIMINTERVAL, '2D', True)
-    add_and_check(topology, CHANGELOG, TRIMINTERVAL, '4w', True)
-    add_and_check(topology, CHANGELOG, TRIMINTERVAL, '-123', False)
-    add_and_check(topology, CHANGELOG, TRIMINTERVAL, 'xyz', False)
+    add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '12345', True)
+    add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '10s', True)
+    add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '30M', True)
+    add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '12h', True)
+    add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '2D', True)
+    add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '4w', True)
+    add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, '-123', False)
+    add_and_check(topology_st, CHANGELOG, TRIMINTERVAL, 'xyz', False)
 
 
-def test_ticket47669_changelog_compactdbinterval(topology):
+def test_ticket47669_changelog_compactdbinterval(topology_st):
     """
     Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config
     """
     log.info('3. Test nsslapd-changelogcompactdb-interval in cn=changelog5,cn=config')
 
     # bind as directory manager
-    topology.standalone.log.info("Bind as %s" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.log.info("Bind as %s" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
-    add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '12345', True)
-    add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '10s', True)
-    add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '30M', True)
-    add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '12h', True)
-    add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '2D', True)
-    add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '4w', True)
-    add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, '-123', False)
-    add_and_check(topology, CHANGELOG, COMPACTDBINTERVAL, 'xyz', False)
+    add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '12345', True)
+    add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '10s', True)
+    add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '30M', True)
+    add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '12h', True)
+    add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '2D', True)
+    add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '4w', True)
+    add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, '-123', False)
+    add_and_check(topology_st, CHANGELOG, COMPACTDBINTERVAL, 'xyz', False)
 
 
-def test_ticket47669_retrochangelog_maxage(topology):
+def test_ticket47669_retrochangelog_maxage(topology_st):
     """
     Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config
     """
     log.info('4. Test nsslapd-changelogmaxage in cn=Retro Changelog Plugin,cn=plugins,cn=config')
 
     # bind as directory manager
-    topology.standalone.log.info("Bind as %s" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-
-    add_and_check(topology, RETROCHANGELOG, MAXAGE, '12345', True)
-    add_and_check(topology, RETROCHANGELOG, MAXAGE, '10s', True)
-    add_and_check(topology, RETROCHANGELOG, MAXAGE, '30M', True)
-    add_and_check(topology, RETROCHANGELOG, MAXAGE, '12h', True)
-    add_and_check(topology, RETROCHANGELOG, MAXAGE, '2D', True)
-    add_and_check(topology, RETROCHANGELOG, MAXAGE, '4w', True)
-    add_and_check(topology, RETROCHANGELOG, MAXAGE, '-123', False)
-    add_and_check(topology, RETROCHANGELOG, MAXAGE, 'xyz', False)
-
-    topology.standalone.log.info("ticket47669 was successfully verified.")
+    topology_st.standalone.log.info("Bind as %s" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+
+    add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '12345', True)
+    add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '10s', True)
+    add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '30M', True)
+    add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '12h', True)
+    add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '2D', True)
+    add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '4w', True)
+    add_and_check(topology_st, RETROCHANGELOG, MAXAGE, '-123', False)
+    add_and_check(topology_st, RETROCHANGELOG, MAXAGE, 'xyz', False)
+
+    topology_st.standalone.log.info("ticket47669 was successfully verified.")
 
 
 if __name__ == '__main__':

+ 74 - 190
dirsrvtests/tests/tickets/ticket47676_test.py

@@ -11,37 +11,29 @@ Created on Nov 7, 2013
 
 @author: tbordaz
 '''
-import os
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
-SCHEMA_DN    = "cn=schema"
+SCHEMA_DN = "cn=schema"
 TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
-OC_NAME      = 'OCticket47676'
-OC_OID_EXT   = 2
+OC_NAME = 'OCticket47676'
+OC_OID_EXT = 2
 MUST = "(postalAddress $ postalCode)"
-MAY  = "(member $ street)"
+MAY = "(member $ street)"
 
-OC2_NAME    = 'OC2ticket47676'
+OC2_NAME = 'OC2ticket47676'
 OC2_OID_EXT = 3
 MUST_2 = "(postalAddress $ postalCode)"
-MAY_2  = "(member $ street)"
+MAY_2 = "(member $ street)"
 
 REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config"
 REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
@@ -49,21 +41,21 @@ REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
 OTHER_NAME = 'other_entry'
 MAX_OTHERS = 10
 
-BIND_NAME  = 'bind_entry'
-BIND_DN    = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW    = 'password'
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
 
 ENTRY_NAME = 'test_entry'
-ENTRY_DN   = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
-ENTRY_OC   = "top person %s" % OC_NAME
+ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
+ENTRY_OC = "top person %s" % OC_NAME
 
 BASE_OID = "1.2.3.4.5.6.7.8.9.10"
 
 
 def _oc_definition(oid_ext, name, must=None, may=None):
-    oid  = "%s.%d" % (BASE_OID, oid_ext)
+    oid = "%s.%d" % (BASE_OID, oid_ext)
     desc = 'To test ticket 47490'
-    sup  = 'person'
+    sup = 'person'
     if not must:
         must = MUST
     if not may:
@@ -73,114 +65,7 @@ def _oc_definition(oid_ext, name, must=None, may=None):
     return new_oc
 
 
-class TopologyMaster1Master2(object):
-    def __init__(self, master1, master2):
-        master1.open()
-        self.master1 = master1
-
-        master2.open()
-        self.master2 = master2
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to create a replicated topology for the 'module'.
-        The replicated topology is MASTER1 <-> Master2.
-    '''
-    global installation1_prefix
-    global installation2_prefix
-
-    # allocate master1 on a given deployement
-    master1 = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Args for the master1 instance
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_master = args_instance.copy()
-    master1.allocate(args_master)
-
-    # allocate master1 on a given deployement
-    master2 = DirSrv(verbose=False)
-    if installation2_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
-    # Args for the consumer instance
-    args_instance[SER_HOST] = HOST_MASTER_2
-    args_instance[SER_PORT] = PORT_MASTER_2
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
-    args_master = args_instance.copy()
-    master2.allocate(args_master)
-
-    # Get the status of the instance and restart it if it exists
-    instance_master1 = master1.exists()
-    instance_master2 = master2.exists()
-
-    # Remove all the instances
-    if instance_master1:
-        master1.delete()
-    if instance_master2:
-        master2.delete()
-
-    # Create the instances
-    master1.create()
-    master1.open()
-    master2.create()
-    master2.open()
-
-    #
-    # Now prepare the Master-Consumer topology
-    #
-    # First Enable replication
-    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
-    # Initialize the supplier->consumer
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
-    if not repl_agreement:
-        log.fatal("Fail to create a replica agreement")
-        sys.exit(1)
-
-    log.debug("%s created" % repl_agreement)
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
-    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    master1.waitForReplInit(repl_agreement)
-
-    # Check replication is working fine
-    if master1.testReplication(DEFAULT_SUFFIX, master2):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    def fin():
-        master1.delete()
-        master2.delete()
-    request.addfinalizer(fin)
-
-    # Here we have two instances master and consumer
-    # with replication working.
-    return TopologyMaster1Master2(master1, master2)
-
-
-def test_ticket47676_init(topology):
+def test_ticket47676_init(topology_m2):
     """
         It adds
            - Objectclass with MAY 'member'
@@ -189,43 +74,43 @@ def test_ticket47676_init(topology):
 
     """
 
-    topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME)
-    new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must = MUST, may  = MAY)
-    topology.master1.schema.add_schema('objectClasses', new_oc)
+    topology_m2.ms["master1"].log.info("Add %s that allows 'member' attribute" % OC_NAME)
+    new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must=MUST, may=MAY)
+    topology_m2.ms["master1"].schema.add_schema('objectClasses', new_oc)
 
     # entry used to bind with
-    topology.master1.log.info("Add %s" % BIND_DN)
-    topology.master1.add_s(Entry((BIND_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           BIND_NAME,
-                                            'cn':           BIND_NAME,
-                                            'userpassword': BIND_PW})))
+    topology_m2.ms["master1"].log.info("Add %s" % BIND_DN)
+    topology_m2.ms["master1"].add_s(Entry((BIND_DN, {
+        'objectclass': "top person".split(),
+        'sn': BIND_NAME,
+        'cn': BIND_NAME,
+        'userpassword': BIND_PW})))
 
     # enable acl error logging
     mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))]  # ACL + REPL
-    topology.master1.modify_s(DN_CONFIG, mod)
-    topology.master2.modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
 
     # add dummy entries
     for cpt in range(MAX_OTHERS):
         name = "%s%d" % (OTHER_NAME, cpt)
-        topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
-                                            'objectclass': "top person".split(),
-                                            'sn': name,
-                                            'cn': name})))
+        topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+            'objectclass': "top person".split(),
+            'sn': name,
+            'cn': name})))
 
 
-def test_ticket47676_skip_oc_at(topology):
+def test_ticket47676_skip_oc_at(topology_m2):
     '''
         This test ADD an entry on MASTER1 where 47676 is fixed. Then it checks that entry is replicated
         on MASTER2 (even if on MASTER2 47676 is NOT fixed). Then update on MASTER2.
         If the schema has successfully been pushed, updating Master2 should succeed
     '''
-    topology.master1.log.info("\n\n######################### ADD ######################\n")
+    topology_m2.ms["master1"].log.info("\n\n######################### ADD ######################\n")
 
     # bind as 'cn=Directory manager'
-    topology.master1.log.info("Bind as %s and add the add the entry with specific oc" % DN_DM)
-    topology.master1.simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master1"].log.info("Bind as %s and add the add the entry with specific oc" % DN_DM)
+    topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
 
     # Prepare the entry with multivalued members
     entry = Entry(ENTRY_DN)
@@ -241,18 +126,18 @@ def test_ticket47676_skip_oc_at(topology):
     members.append(BIND_DN)
     entry.setValues('member', members)
 
-    topology.master1.log.info("Try to add Add  %s should be successful" % ENTRY_DN)
-    topology.master1.add_s(entry)
+    topology_m2.ms["master1"].log.info("Try to add Add  %s should be successful" % ENTRY_DN)
+    topology_m2.ms["master1"].add_s(entry)
 
     #
     # Now check the entry as been replicated
     #
-    topology.master2.simple_bind_s(DN_DM, PASSWORD)
-    topology.master1.log.info("Try to retrieve %s from Master2" % ENTRY_DN)
+    topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master1"].log.info("Try to retrieve %s from Master2" % ENTRY_DN)
     loop = 0
     while loop <= 10:
         try:
-            ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
             break
         except ldap.NO_SUCH_OBJECT:
             time.sleep(2)
@@ -260,14 +145,14 @@ def test_ticket47676_skip_oc_at(topology):
     assert loop <= 10
 
     # Now update the entry on Master2 (as DM because 47676 is possibly not fixed on M2)
-    topology.master1.log.info("Update  %s on M2" % ENTRY_DN)
+    topology_m2.ms["master1"].log.info("Update  %s on M2" % ENTRY_DN)
     mod = [(ldap.MOD_REPLACE, 'description', 'test_add')]
-    topology.master2.modify_s(ENTRY_DN, mod)
+    topology_m2.ms["master2"].modify_s(ENTRY_DN, mod)
 
-    topology.master1.simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
     loop = 0
     while loop <= 10:
-        ent = topology.master1.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+        ent = topology_m2.ms["master1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)")
         if ent.hasAttr('description') and (ent.getValue('description') == 'test_add'):
             break
         time.sleep(1)
@@ -276,29 +161,28 @@ def test_ticket47676_skip_oc_at(topology):
     assert ent.getValue('description') == 'test_add'
 
 
-def test_ticket47676_reject_action(topology):
-
-    topology.master1.log.info("\n\n######################### REJECT ACTION ######################\n")
+def test_ticket47676_reject_action(topology_m2):
+    topology_m2.ms["master1"].log.info("\n\n######################### REJECT ACTION ######################\n")
 
-    topology.master1.simple_bind_s(DN_DM, PASSWORD)
-    topology.master2.simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD)
 
     # make master1 to refuse to push the schema if OC_NAME is present in consumer schema
     mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))]  # ACL + REPL
-    topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
+    topology_m2.ms["master1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
 
     # Restart is required to take into account that policy
-    topology.master1.stop(timeout=10)
-    topology.master1.start(timeout=10)
+    topology_m2.ms["master1"].stop(timeout=10)
+    topology_m2.ms["master1"].start(timeout=10)
 
     # Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema
-    topology.master1.log.info("Add %s on M1" % OC2_NAME)
+    topology_m2.ms["master1"].log.info("Add %s on M1" % OC2_NAME)
     new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must=MUST, may=MAY)
-    topology.master1.schema.add_schema('objectClasses', new_oc)
+    topology_m2.ms["master1"].schema.add_schema('objectClasses', new_oc)
 
     # Safety checking that the schema has been updated on M1
-    topology.master1.log.info("Check %s is in M1" % OC2_NAME)
-    ent = topology.master1.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
+    topology_m2.ms["master1"].log.info("Check %s is in M1" % OC2_NAME)
+    ent = topology_m2.ms["master1"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
     assert ent.hasAttr('objectclasses')
     found = False
     for objectclass in ent.getValues('objectclasses'):
@@ -308,15 +192,15 @@ def test_ticket47676_reject_action(topology):
     assert found
 
     # Do an update of M1 so that M1 will try to push the schema
-    topology.master1.log.info("Update  %s on M1" % ENTRY_DN)
+    topology_m2.ms["master1"].log.info("Update  %s on M1" % ENTRY_DN)
     mod = [(ldap.MOD_REPLACE, 'description', 'test_reject')]
-    topology.master1.modify_s(ENTRY_DN, mod)
+    topology_m2.ms["master1"].modify_s(ENTRY_DN, mod)
 
     # Check the replication occured and so also M1 attempted to push the schema
-    topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
+    topology_m2.ms["master1"].log.info("Check updated %s on M2" % ENTRY_DN)
     loop = 0
     while loop <= 10:
-        ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
+        ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
         if ent.hasAttr('description') and ent.getValue('description') == 'test_reject':
             # update was replicated
             break
@@ -325,8 +209,8 @@ def test_ticket47676_reject_action(topology):
     assert loop <= 10
 
     # Check that the schema has not been pushed
-    topology.master1.log.info("Check %s is not in M2" % OC2_NAME)
-    ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
+    topology_m2.ms["master1"].log.info("Check %s is not in M2" % OC2_NAME)
+    ent = topology_m2.ms["master2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
     assert ent.hasAttr('objectclasses')
     found = False
     for objectclass in ent.getValues('objectclasses'):
@@ -335,26 +219,26 @@ def test_ticket47676_reject_action(topology):
             break
     assert not found
 
-    topology.master1.log.info("\n\n######################### NO MORE REJECT ACTION ######################\n")
+    topology_m2.ms["master1"].log.info("\n\n######################### NO MORE REJECT ACTION ######################\n")
 
     # make master1 to do no specific action on OC_NAME
     mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))]  # ACL + REPL
-    topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
+    topology_m2.ms["master1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
 
     # Restart is required to take into account that policy
-    topology.master1.stop(timeout=10)
-    topology.master1.start(timeout=10)
+    topology_m2.ms["master1"].stop(timeout=10)
+    topology_m2.ms["master1"].start(timeout=10)
 
     # Do an update of M1 so that M1 will try to push the schema
-    topology.master1.log.info("Update  %s on M1" % ENTRY_DN)
+    topology_m2.ms["master1"].log.info("Update  %s on M1" % ENTRY_DN)
     mod = [(ldap.MOD_REPLACE, 'description', 'test_no_more_reject')]
-    topology.master1.modify_s(ENTRY_DN, mod)
+    topology_m2.ms["master1"].modify_s(ENTRY_DN, mod)
 
     # Check the replication occured and so also M1 attempted to push the schema
-    topology.master1.log.info("Check updated %s on M2" % ENTRY_DN)
+    topology_m2.ms["master1"].log.info("Check updated %s on M2" % ENTRY_DN)
     loop = 0
     while loop <= 10:
-        ent = topology.master2.getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
+        ent = topology_m2.ms["master2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
         if ent.hasAttr('description') and ent.getValue('description') == 'test_no_more_reject':
             # update was replicated
             break
@@ -363,15 +247,15 @@ def test_ticket47676_reject_action(topology):
     assert loop <= 10
 
     # Check that the schema has been pushed
-    topology.master1.log.info("Check %s is in M2" % OC2_NAME)
-    ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
+    topology_m2.ms["master1"].log.info("Check %s is in M2" % OC2_NAME)
+    ent = topology_m2.ms["master2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"])
     assert ent.hasAttr('objectclasses')
     found = False
     for objectclass in ent.getValues('objectclasses'):
         if str(objectclass).find(OC2_NAME) >= 0:
             found = True
             break
-    assert  found
+    assert found
 
 
 if __name__ == '__main__':

+ 65 - 103
dirsrvtests/tests/tickets/ticket47714_test.py

@@ -6,17 +6,14 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-import shutil
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -31,78 +28,40 @@ TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, SUFFIX)
 TEST_USER_PW = '%s' % TEST_USER
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
-    topology.standalone.log.info("\n\n###############################################")
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("####### %s" % label)
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("###############################################")
+def _header(topology_st, label):
+    topology_st.standalone.log.info("\n\n###############################################")
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("####### %s" % label)
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("###############################################")
 
 
-def test_ticket47714_init(topology):
+def test_ticket47714_init(topology_st):
     """
     1. Add account policy entry to the DB
     2. Add a test user to the DB
     """
-    _header(topology, 'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.')
+    _header(topology_st,
+            'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
     log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN)
-    topology.standalone.add_s(Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(),
-                                                      'accountInactivityLimit': INACTIVITY_LIMIT})))
+    topology_st.standalone.add_s(
+        Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(),
+                                'accountInactivityLimit': INACTIVITY_LIMIT})))
 
     log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN)
-    topology.standalone.add_s(Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                    'cn': TEST_USER,
-                                                    'sn': TEST_USER,
-                                                    'givenname': TEST_USER,
-                                                    'userPassword': TEST_USER_PW,
-                                                    'acctPolicySubentry': ACCT_POLICY_DN})))
+    topology_st.standalone.add_s(
+        Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                              'cn': TEST_USER,
+                              'sn': TEST_USER,
+                              'givenname': TEST_USER,
+                              'userPassword': TEST_USER_PW,
+                              'acctPolicySubentry': ACCT_POLICY_DN})))
 
 
-def test_ticket47714_run_0(topology):
+def test_ticket47714_run_0(topology_st):
     """
     Check this change has no inpact to the existing functionality.
     1. Set account policy config without the new attr alwaysRecordLoginAttr
@@ -111,67 +70,68 @@ def test_ticket47714_run_0(topology):
     4. Waint longer than the accountInactivityLimit time and bind as the test user,
        which should fail with CONSTANT_VIOLATION.
     """
-    _header(topology, 'Account Policy - No new attr alwaysRecordLoginAttr in config')
+    _header(topology_st, 'Account Policy - No new attr alwaysRecordLoginAttr in config')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
     # Modify Account Policy config entry
-    topology.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
-                                                         (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'),
-                                                         (ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'),
-                                                         (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
-                                                         (ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
+    topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
+                                                            (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'),
+                                                            (ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'),
+                                                            (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
+                                                            (ldap.MOD_REPLACE, 'limitattrname',
+                                                             'accountInactivityLimit')])
 
     # Enable the plugins
-    topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
+    topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
 
-    topology.standalone.restart(timeout=120)
+    topology_st.standalone.restart(timeout=120)
 
     log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN)
     try:
-        topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+        topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
     except ldap.CONSTRAINT_VIOLATION as e:
         log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
 
     time.sleep(2)
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
 
     lastLoginTime0 = entry[0].lastLoginTime
 
     log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN)
     try:
-        topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+        topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
     except ldap.CONSTRAINT_VIOLATION as e:
         log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
 
     time.sleep(2)
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
 
     lastLoginTime1 = entry[0].lastLoginTime
 
     log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1))
     assert lastLoginTime0 < lastLoginTime1
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    entry = topology.standalone.search_s(ACCT_POLICY_DN, ldap.SCOPE_BASE, SEARCHFILTER)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    entry = topology_st.standalone.search_s(ACCT_POLICY_DN, ldap.SCOPE_BASE, SEARCHFILTER)
     log.info("\n######################### %s ######################\n" % ACCT_POLICY_CONFIG_DN)
     log.info("accountInactivityLimit: %s" % entry[0].accountInactivityLimit)
     log.info("\n######################### %s DONE ######################\n" % ACCT_POLICY_CONFIG_DN)
 
     log.info("\n######################### Bind as %s again to fail ######################\n" % TEST_USER_DN)
     try:
-        topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+        topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
     except ldap.CONSTRAINT_VIOLATION as e:
         log.info('CONSTRAINT VIOLATION ' + e.message['desc'])
         log.info("%s was successfully inactivated." % TEST_USER_DN)
         pass
 
 
-def test_ticket47714_run_1(topology):
+def test_ticket47714_run_1(topology_st):
     """
     Verify a new config attr alwaysRecordLoginAttr
     1. Set account policy config with the new attr alwaysRecordLoginAttr: lastLoginTime
@@ -180,52 +140,54 @@ def test_ticket47714_run_1(topology):
     2. Bind as a test user
     3. Bind as the test user again and check the alwaysRecordLoginAttr: lastLoginTime is updated
     """
-    _header(topology, 'Account Policy - With new attr alwaysRecordLoginAttr in config')
+    _header(topology_st, 'Account Policy - With new attr alwaysRecordLoginAttr in config')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'lastLoginTime', None)])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'lastLoginTime', None)])
 
     # Modify Account Policy config entry
-    topology.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
-                                                         (ldap.MOD_REPLACE, 'stateattrname', 'bogus'),
-                                                         (ldap.MOD_REPLACE, 'altstateattrname', 'modifyTimestamp'),
-                                                         (ldap.MOD_REPLACE, 'alwaysRecordLoginAttr', 'lastLoginTime'),
-                                                         (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
-                                                         (ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
+    topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
+                                                            (ldap.MOD_REPLACE, 'stateattrname', 'bogus'),
+                                                            (ldap.MOD_REPLACE, 'altstateattrname', 'modifyTimestamp'),
+                                                            (
+                                                            ldap.MOD_REPLACE, 'alwaysRecordLoginAttr', 'lastLoginTime'),
+                                                            (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
+                                                            (ldap.MOD_REPLACE, 'limitattrname',
+                                                             'accountInactivityLimit')])
 
     # Enable the plugins
-    topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
+    topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
 
-    topology.standalone.restart(timeout=120)
+    topology_st.standalone.restart(timeout=120)
 
     log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN)
     try:
-        topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+        topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
     except ldap.CONSTRAINT_VIOLATION as e:
         log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
 
     time.sleep(1)
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
     lastLoginTime0 = entry[0].lastLoginTime
 
     log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN)
     try:
-        topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+        topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
     except ldap.CONSTRAINT_VIOLATION as e:
         log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
 
     time.sleep(1)
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
     lastLoginTime1 = entry[0].lastLoginTime
 
     log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1))
     assert lastLoginTime0 < lastLoginTime1
 
-    topology.standalone.log.info("ticket47714 was successfully verified.")
+    topology_st.standalone.log.info("ticket47714 was successfully verified.")
 
 
 if __name__ == '__main__':

+ 87 - 202
dirsrvtests/tests/tickets/ticket47721_test.py

@@ -11,38 +11,29 @@ Created on Nov 7, 2013
 
 @author: tbordaz
 '''
-import os
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
-from lib389._constants import REPLICAROLE_MASTER
+from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
-SCHEMA_DN    = "cn=schema"
+SCHEMA_DN = "cn=schema"
 TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
-OC_NAME      = 'OCticket47721'
-OC_OID_EXT   = 2
+OC_NAME = 'OCticket47721'
+OC_OID_EXT = 2
 MUST = "(postalAddress $ postalCode)"
-MAY  = "(member $ street)"
+MAY = "(member $ street)"
 
-OC2_NAME    = 'OC2ticket47721'
+OC2_NAME = 'OC2ticket47721'
 OC2_OID_EXT = 3
 MUST_2 = "(postalAddress $ postalCode)"
-MAY_2  = "(member $ street)"
+MAY_2 = "(member $ street)"
 
 REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config"
 REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
@@ -50,20 +41,22 @@ REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
 OTHER_NAME = 'other_entry'
 MAX_OTHERS = 10
 
-BIND_NAME  = 'bind_entry'
-BIND_DN    = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW    = 'password'
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
 
 ENTRY_NAME = 'test_entry'
-ENTRY_DN   = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
-ENTRY_OC   = "top person %s" % OC_NAME
+ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
+ENTRY_OC = "top person %s" % OC_NAME
 
 BASE_OID = "1.2.3.4.5.6.7.8.9.10"
 
 SLEEP_INTERVAL = 60
 
+
 def _add_custom_at_definition(name='ATticket47721'):
-    new_at = "( %s-oid NAME '%s' DESC 'test AT ticket 47721' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN ( 'Test 47721' 'user defined' ) )" % (name, name)
+    new_at = "( %s-oid NAME '%s' DESC 'test AT ticket 47721' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN ( 'Test 47721' 'user defined' ) )" % (
+    name, name)
     return new_at
 
 
@@ -73,7 +66,8 @@ def _chg_std_at_defintion():
 
 
 def _add_custom_oc_defintion(name='OCticket47721'):
-    new_oc = "( %s-oid NAME '%s' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' )" % (name, name)
+    new_oc = "( %s-oid NAME '%s' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' )" % (
+    name, name)
     return new_oc
 
 
@@ -82,116 +76,7 @@ def _chg_std_oc_defintion():
     return new_oc
 
 
-class TopologyMaster1Master2(object):
-    def __init__(self, master1, master2):
-        master1.open()
-        self.master1 = master1
-
-        master2.open()
-        self.master2 = master2
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to create a replicated topology for the 'module'.
-        The replicated topology is MASTER1 <-> Master2.
-    '''
-    global installation1_prefix
-    global installation2_prefix
-
-    # allocate master1 on a given deployement
-    master1 = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Args for the master1 instance
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_master = args_instance.copy()
-    master1.allocate(args_master)
-
-    # allocate master1 on a given deployement
-    master2 = DirSrv(verbose=False)
-    if installation2_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
-    # Args for the consumer instance
-    args_instance[SER_HOST] = HOST_MASTER_2
-    args_instance[SER_PORT] = PORT_MASTER_2
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
-    args_master = args_instance.copy()
-    master2.allocate(args_master)
-
-    # Get the status of the instance and restart it if it exists
-    instance_master1 = master1.exists()
-    instance_master2 = master2.exists()
-
-    # Remove all the instances
-    if instance_master1:
-        master1.delete()
-    if instance_master2:
-        master2.delete()
-
-    # Create the instances
-    master1.create()
-    master1.open()
-    master2.create()
-    master2.open()
-
-    #
-    # Now prepare the Master-Consumer topology
-    #
-    # First Enable replication
-    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
-    # Initialize the supplier->consumer
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
-    if not repl_agreement:
-        log.fatal("Fail to create a replica agreement")
-        sys.exit(1)
-
-    log.debug("%s created" % repl_agreement)
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
-    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    master1.waitForReplInit(repl_agreement)
-
-    # Check replication is working fine
-    if master1.testReplication(DEFAULT_SUFFIX, master2):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    def fin():
-        master1.delete()
-        master2.delete()
-    request.addfinalizer(fin)
-    #
-    # Here we have two instances master and consumer
-    # with replication working. Either coming from a backup recovery
-    # or from a fresh (re)init
-    # Time to return the topology
-    return TopologyMaster1Master2(master1, master2)
-
-
-def test_ticket47721_init(topology):
+def test_ticket47721_init(topology_m2):
     """
         It adds
            - Objectclass with MAY 'member'
@@ -201,34 +86,34 @@ def test_ticket47721_init(topology):
     """
 
     # entry used to bind with
-    topology.master1.log.info("Add %s" % BIND_DN)
-    topology.master1.add_s(Entry((BIND_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           BIND_NAME,
-                                            'cn':           BIND_NAME,
-                                            'userpassword': BIND_PW})))
+    topology_m2.ms["master1"].log.info("Add %s" % BIND_DN)
+    topology_m2.ms["master1"].add_s(Entry((BIND_DN, {
+        'objectclass': "top person".split(),
+        'sn': BIND_NAME,
+        'cn': BIND_NAME,
+        'userpassword': BIND_PW})))
 
     # enable repl error logging
     mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))]  # REPL logging
-    topology.master1.modify_s(DN_CONFIG, mod)
-    topology.master2.modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
 
     # add dummy entries
     for cpt in range(MAX_OTHERS):
         name = "%s%d" % (OTHER_NAME, cpt)
-        topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
-                                            'objectclass': "top person".split(),
-                                            'sn': name,
-                                            'cn': name})))
+        topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+            'objectclass': "top person".split(),
+            'sn': name,
+            'cn': name})))
 
 
-def test_ticket47721_0(topology):
+def test_ticket47721_0(topology_m2):
     dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
     loop = 0
     ent = None
     while loop <= 10:
         try:
-            ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
             break
         except ldap.NO_SUCH_OBJECT:
             time.sleep(1)
@@ -237,35 +122,35 @@ def test_ticket47721_0(topology):
         assert False
 
 
-def test_ticket47721_1(topology):
+def test_ticket47721_1(topology_m2):
     log.info('Running test 1...')
-    #topology.master1.log.info("Attach debugger\n\n")
-    #time.sleep(30)
+    # topology_m2.ms["master1"].log.info("Attach debugger\n\n")
+    # time.sleep(30)
 
     new = _add_custom_at_definition()
-    topology.master1.log.info("Add (M2) %s " % new)
-    topology.master2.schema.add_schema('attributetypes', new)
+    topology_m2.ms["master1"].log.info("Add (M2) %s " % new)
+    topology_m2.ms["master2"].schema.add_schema('attributetypes', new)
 
     new = _chg_std_at_defintion()
-    topology.master1.log.info("Chg (M2) %s " % new)
-    topology.master2.schema.add_schema('attributetypes', new)
+    topology_m2.ms["master1"].log.info("Chg (M2) %s " % new)
+    topology_m2.ms["master2"].schema.add_schema('attributetypes', new)
 
     new = _add_custom_oc_defintion()
-    topology.master1.log.info("Add (M2) %s " % new)
-    topology.master2.schema.add_schema('objectClasses', new)
+    topology_m2.ms["master1"].log.info("Add (M2) %s " % new)
+    topology_m2.ms["master2"].schema.add_schema('objectClasses', new)
 
     new = _chg_std_oc_defintion()
-    topology.master1.log.info("Chg (M2) %s " % new)
-    topology.master2.schema.add_schema('objectClasses', new)
+    topology_m2.ms["master1"].log.info("Chg (M2) %s " % new)
+    topology_m2.ms["master2"].schema.add_schema('objectClasses', new)
 
     mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 1')]
     dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
-    topology.master2.modify_s(dn, mod)
+    topology_m2.ms["master2"].modify_s(dn, mod)
 
     loop = 0
     while loop <= 10:
         try:
-            ent = topology.master1.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m2.ms["master1"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
             if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 1'):
                 break
         except ldap.NO_SUCH_OBJECT:
@@ -274,23 +159,23 @@ def test_ticket47721_1(topology):
     assert loop <= 10
 
     time.sleep(2)
-    schema_csn_master1 = topology.master1.schema.get_schema_csn()
-    schema_csn_master2 = topology.master2.schema.get_schema_csn()
+    schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+    schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
     log.debug('Master 1 schemaCSN: %s' % schema_csn_master1)
     log.debug('Master 2 schemaCSN: %s' % schema_csn_master2)
 
 
-def test_ticket47721_2(topology):
+def test_ticket47721_2(topology_m2):
     log.info('Running test 2...')
 
     mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 2')]
     dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
-    topology.master1.modify_s(dn, mod)
+    topology_m2.ms["master1"].modify_s(dn, mod)
 
     loop = 0
     while loop <= 10:
         try:
-            ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
             if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 2'):
                 break
         except ldap.NO_SUCH_OBJECT:
@@ -299,23 +184,23 @@ def test_ticket47721_2(topology):
     assert loop <= 10
 
     time.sleep(2)
-    schema_csn_master1 = topology.master1.schema.get_schema_csn()
-    schema_csn_master2 = topology.master2.schema.get_schema_csn()
+    schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+    schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
     log.debug('Master 1 schemaCSN: %s' % schema_csn_master1)
     log.debug('Master 2 schemaCSN: %s' % schema_csn_master2)
     if schema_csn_master1 != schema_csn_master2:
         # We need to give the server a little more time, then check it again
         log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...'
-            % (schema_csn_master1, schema_csn_master2))
+                 % (schema_csn_master1, schema_csn_master2))
         time.sleep(SLEEP_INTERVAL)
-        schema_csn_master1 = topology.master1.schema.get_schema_csn()
-        schema_csn_master2 = topology.master2.schema.get_schema_csn()
+        schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+        schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
 
     assert schema_csn_master1 is not None
     assert schema_csn_master1 == schema_csn_master2
 
 
-def test_ticket47721_3(topology):
+def test_ticket47721_3(topology_m2):
     '''
     Check that the supplier can update its schema from consumer schema
     Update M2 schema, then trigger a replication M1->M2
@@ -323,26 +208,26 @@ def test_ticket47721_3(topology):
     log.info('Running test 3...')
 
     # stop RA M2->M1, so that M1 can only learn being a supplier
-    ents = topology.master2.agreement.list(suffix=SUFFIX)
+    ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
-    topology.master2.agreement.pause(ents[0].dn)
+    topology_m2.ms["master2"].agreement.pause(ents[0].dn)
 
     new = _add_custom_at_definition('ATtest3')
-    topology.master1.log.info("Update schema (M2) %s " % new)
-    topology.master2.schema.add_schema('attributetypes', new)
+    topology_m2.ms["master1"].log.info("Update schema (M2) %s " % new)
+    topology_m2.ms["master2"].schema.add_schema('attributetypes', new)
 
     new = _add_custom_oc_defintion('OCtest3')
-    topology.master1.log.info("Update schema (M2) %s " % new)
-    topology.master2.schema.add_schema('objectClasses', new)
+    topology_m2.ms["master1"].log.info("Update schema (M2) %s " % new)
+    topology_m2.ms["master2"].schema.add_schema('objectClasses', new)
 
     mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 3')]
     dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
-    topology.master1.modify_s(dn, mod)
+    topology_m2.ms["master1"].modify_s(dn, mod)
 
     loop = 0
     while loop <= 10:
         try:
-            ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
             if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 3'):
                 break
         except ldap.NO_SUCH_OBJECT:
@@ -351,24 +236,24 @@ def test_ticket47721_3(topology):
     assert loop <= 10
 
     time.sleep(2)
-    schema_csn_master1 = topology.master1.schema.get_schema_csn()
-    schema_csn_master2 = topology.master2.schema.get_schema_csn()
+    schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+    schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
     log.debug('Master 1 schemaCSN: %s' % schema_csn_master1)
     log.debug('Master 2 schemaCSN: %s' % schema_csn_master2)
     if schema_csn_master1 == schema_csn_master2:
         # We need to give the server a little more time, then check it again
         log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...'
-            % (schema_csn_master1, schema_csn_master2))
+                 % (schema_csn_master1, schema_csn_master2))
         time.sleep(SLEEP_INTERVAL)
-        schema_csn_master1 = topology.master1.schema.get_schema_csn()
-        schema_csn_master2 = topology.master2.schema.get_schema_csn()
+        schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+        schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
 
     assert schema_csn_master1 is not None
     # schema csn on M2 is larger that on M1. M1 only took the new definitions
     assert schema_csn_master1 != schema_csn_master2
 
 
-def test_ticket47721_4(topology):
+def test_ticket47721_4(topology_m2):
     '''
     Here M2->M1 agreement is disabled.
     with test_ticket47721_3, M1 schema and M2 should be identical BUT
@@ -378,22 +263,22 @@ def test_ticket47721_4(topology):
     log.info('Running test 4...')
 
     new = _add_custom_at_definition('ATtest4')
-    topology.master1.log.info("Update schema (M1) %s " % new)
-    topology.master1.schema.add_schema('attributetypes', new)
+    topology_m2.ms["master1"].log.info("Update schema (M1) %s " % new)
+    topology_m2.ms["master1"].schema.add_schema('attributetypes', new)
 
     new = _add_custom_oc_defintion('OCtest4')
-    topology.master1.log.info("Update schema (M1) %s " % new)
-    topology.master1.schema.add_schema('objectClasses', new)
+    topology_m2.ms["master1"].log.info("Update schema (M1) %s " % new)
+    topology_m2.ms["master1"].schema.add_schema('objectClasses', new)
 
-    topology.master1.log.info("trigger replication M1->M2: to update the schema")
+    topology_m2.ms["master1"].log.info("trigger replication M1->M2: to update the schema")
     mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 4')]
     dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
-    topology.master1.modify_s(dn, mod)
+    topology_m2.ms["master1"].modify_s(dn, mod)
 
     loop = 0
     while loop <= 10:
         try:
-            ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
             if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 4'):
                 break
         except ldap.NO_SUCH_OBJECT:
@@ -401,15 +286,15 @@ def test_ticket47721_4(topology):
         time.sleep(1)
     assert loop <= 10
 
-    topology.master1.log.info("trigger replication M1->M2: to push the schema")
+    topology_m2.ms["master1"].log.info("trigger replication M1->M2: to push the schema")
     mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 5')]
     dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
-    topology.master1.modify_s(dn, mod)
+    topology_m2.ms["master1"].modify_s(dn, mod)
 
     loop = 0
     while loop <= 10:
         try:
-            ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)")
             if ent.hasAttr('description') and (ent.getValue('description') == 'Hello world 5'):
                 break
         except ldap.NO_SUCH_OBJECT:
@@ -418,16 +303,16 @@ def test_ticket47721_4(topology):
     assert loop <= 10
 
     time.sleep(2)
-    schema_csn_master1 = topology.master1.schema.get_schema_csn()
-    schema_csn_master2 = topology.master2.schema.get_schema_csn()
+    schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+    schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
     log.debug('Master 1 schemaCSN: %s' % schema_csn_master1)
     log.debug('Master 2 schemaCSN: %s' % schema_csn_master2)
     if schema_csn_master1 != schema_csn_master2:
         # We need to give the server a little more time, then check it again
         log.info('Schema CSNs are incorrectly in sync, wait a little...')
         time.sleep(SLEEP_INTERVAL)
-        schema_csn_master1 = topology.master1.schema.get_schema_csn()
-        schema_csn_master2 = topology.master2.schema.get_schema_csn()
+        schema_csn_master1 = topology_m2.ms["master1"].schema.get_schema_csn()
+        schema_csn_master2 = topology_m2.ms["master2"].schema.get_schema_csn()
 
     assert schema_csn_master1 is not None
     assert schema_csn_master1 == schema_csn_master2

+ 28 - 75
dirsrvtests/tests/tickets/ticket47781_test.py

@@ -6,63 +6,16 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47781(topology):
+def test_ticket47781(topology_st):
     """
         Testing for a deadlock after doing an online import of an LDIF with
         replication data.  The replication agreement should be invalid.
@@ -74,38 +27,38 @@ def test_ticket47781(topology):
     # Setup Replication
     #
     log.info('Setting up replication...')
-    topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
-                                                  replicaId=REPLICAID_MASTER_1)
+    topology_st.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
+                                                     replicaId=REPLICAID_MASTER_1)
 
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
+    properties = {RA_NAME: r'meTo_$host:$port',
+                  RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+                  RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+                  RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
                   RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
     # The agreement should point to a server that does NOT exist (invalid port)
-    repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
-                                                          host=topology.standalone.host,
-                                                          port=5555,
-                                                          properties=properties)
+    repl_agreement = topology_st.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
+                                                             host=topology_st.standalone.host,
+                                                             port=5555,
+                                                             properties=properties)
 
     #
     # add two entries
     #
     log.info('Adding two entries...')
     try:
-        topology.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', {
-                                  'objectclass': 'top person'.split(),
-                                  'sn': 'user',
-                                  'cn': 'entry1'})))
+        topology_st.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', {
+            'objectclass': 'top person'.split(),
+            'sn': 'user',
+            'cn': 'entry1'})))
     except ldap.LDAPError as e:
         log.error('Failed to add entry 1: ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.add_s(Entry(('cn=entry2,dc=example,dc=com', {
-                                  'objectclass': 'top person'.split(),
-                                  'sn': 'user',
-                                  'cn': 'entry2'})))
+        topology_st.standalone.add_s(Entry(('cn=entry2,dc=example,dc=com', {
+            'objectclass': 'top person'.split(),
+            'sn': 'user',
+            'cn': 'entry2'})))
     except ldap.LDAPError as e:
         log.error('Failed to add entry 2: ' + e.message['desc'])
         assert False
@@ -115,7 +68,7 @@ def test_ticket47781(topology):
     #
     log.info('Exporting replication ldif...')
     args = {EXPORT_REPL_INFO: True}
-    exportTask = Tasks(topology.standalone)
+    exportTask = Tasks(topology_st.standalone)
     try:
         exportTask.exportLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)
     except ValueError:
@@ -125,14 +78,14 @@ def test_ticket47781(topology):
     # Restart the server
     #
     log.info('Restarting server...')
-    topology.standalone.stop(timeout=5)
-    topology.standalone.start(timeout=5)
+    topology_st.standalone.stop(timeout=5)
+    topology_st.standalone.start(timeout=5)
 
     #
     # Import the ldif
     #
     log.info('Import replication LDIF file...')
-    importTask = Tasks(topology.standalone)
+    importTask = Tasks(topology_st.standalone)
     args = {TASK_WAIT: True}
     try:
         importTask.importLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)
@@ -145,10 +98,10 @@ def test_ticket47781(topology):
     # Search for tombstones - we should not hang/timeout
     #
     log.info('Search for tombstone entries(should find one and not hang)...')
-    topology.standalone.set_option(ldap.OPT_NETWORK_TIMEOUT, 5)
-    topology.standalone.set_option(ldap.OPT_TIMEOUT, 5)
+    topology_st.standalone.set_option(ldap.OPT_NETWORK_TIMEOUT, 5)
+    topology_st.standalone.set_option(ldap.OPT_TIMEOUT, 5)
     try:
-        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=nsTombstone')
+        entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=nsTombstone')
         if not entries:
             log.fatal('Search failed to find any entries.')
             assert PR_False

+ 110 - 223
dirsrvtests/tests/tickets/ticket47787_test.py

@@ -11,163 +11,47 @@ Created on April 14, 2014
 
 @author: tbordaz
 '''
-import os
-import sys
+import logging
+import re
 import time
+
 import ldap
-import logging
 import pytest
-import re
-from lib389 import DirSrv, Entry, tools, NoSuchEntryError
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
-from lib389._constants import REPLICAROLE_MASTER
+from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
 # set this flag to False so that it will assert on failure _status_entry_both_server
 DEBUG_FLAG = False
 
 TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
 
-STAGING_CN     = "staged user"
-PRODUCTION_CN  = "accounts"
-EXCEPT_CN      = "excepts"
+STAGING_CN = "staged user"
+PRODUCTION_CN = "accounts"
+EXCEPT_CN = "excepts"
 
-STAGING_DN    = "cn=%s,%s" % (STAGING_CN, SUFFIX)
+STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX)
 PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX)
 PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN)
 
-STAGING_PATTERN    = "cn=%s*,%s" % (STAGING_CN[:2],    SUFFIX)
+STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX)
 PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX)
-BAD_STAGING_PATTERN    = "cn=bad*,%s" % (SUFFIX)
+BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX)
 BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX)
 
-BIND_CN        = "bind_entry"
-BIND_DN        = "cn=%s,%s" % (BIND_CN, SUFFIX)
-BIND_PW        = "password"
+BIND_CN = "bind_entry"
+BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX)
+BIND_PW = "password"
 
-NEW_ACCOUNT    = "new_account"
-MAX_ACCOUNTS   = 20
+NEW_ACCOUNT = "new_account"
+MAX_ACCOUNTS = 20
 
 CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci"
 
 
-class TopologyMaster1Master2(object):
-    def __init__(self, master1, master2):
-        master1.open()
-        self.master1 = master1
-
-        master2.open()
-        self.master2 = master2
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to create a replicated topology for the 'module'.
-        The replicated topology is MASTER1 <-> Master2.
-    '''
-    global installation1_prefix
-    global installation2_prefix
-
-    # allocate master1 on a given deployement
-    master1 = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Args for the master1 instance
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_master = args_instance.copy()
-    master1.allocate(args_master)
-
-    # allocate master1 on a given deployement
-    master2 = DirSrv(verbose=False)
-    if installation2_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
-    # Args for the consumer instance
-    args_instance[SER_HOST] = HOST_MASTER_2
-    args_instance[SER_PORT] = PORT_MASTER_2
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
-    args_master = args_instance.copy()
-    master2.allocate(args_master)
-
-    # Get the status of the instance and restart it if it exists
-    instance_master1 = master1.exists()
-    instance_master2 = master2.exists()
-
-    # Remove all the instances
-    if instance_master1:
-        master1.delete()
-    if instance_master2:
-        master2.delete()
-
-    # Create the instances
-    master1.create()
-    master1.open()
-    master2.create()
-    master2.open()
-
-    #
-    # Now prepare the Master-Consumer topology
-    #
-    # First Enable replication
-    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
-    # Initialize the supplier->consumer
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
-    if not repl_agreement:
-        log.fatal("Fail to create a replica agreement")
-        sys.exit(1)
-
-    log.debug("%s created" % repl_agreement)
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
-    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    master1.waitForReplInit(repl_agreement)
-
-    # Check replication is working fine
-    if master1.testReplication(DEFAULT_SUFFIX, master2):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    def fin():
-        master1.delete()
-        master2.delete()
-    request.addfinalizer(fin)
-
-    # Here we have two instances master and consumer
-    # with replication working.
-    return TopologyMaster1Master2(master1, master2)
-
-
 def _bind_manager(server):
     server.log.info("Bind as %s " % DN_DM)
     server.simple_bind_s(DN_DM, PASSWORD)
@@ -178,23 +62,23 @@ def _bind_normal(server):
     server.simple_bind_s(BIND_DN, BIND_PW)
 
 
-def _header(topology, label):
-    topology.master1.log.info("\n\n###############################################")
-    topology.master1.log.info("#######")
-    topology.master1.log.info("####### %s" % label)
-    topology.master1.log.info("#######")
-    topology.master1.log.info("###############################################")
+def _header(topology_m2, label):
+    topology_m2.ms["master1"].log.info("\n\n###############################################")
+    topology_m2.ms["master1"].log.info("#######")
+    topology_m2.ms["master1"].log.info("####### %s" % label)
+    topology_m2.ms["master1"].log.info("#######")
+    topology_m2.ms["master1"].log.info("###############################################")
 
 
-def _status_entry_both_server(topology, name=None, desc=None, debug=True):
+def _status_entry_both_server(topology_m2, name=None, desc=None, debug=True):
     if not name:
         return
-    topology.master1.log.info("\n\n######################### Tombstone on M1 ######################\n")
+    topology_m2.ms["master1"].log.info("\n\n######################### Tombstone on M1 ######################\n")
     attr = 'description'
     found = False
     attempt = 0
     while not found and attempt < 10:
-        ent_m1 = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
+        ent_m1 = _find_tombstone(topology_m2.ms["master1"], SUFFIX, 'sn', name)
         if attr in ent_m1.getAttrs():
             found = True
         else:
@@ -202,40 +86,40 @@ def _status_entry_both_server(topology, name=None, desc=None, debug=True):
             attempt = attempt + 1
     assert ent_m1
 
-    topology.master1.log.info("\n\n######################### Tombstone on M2 ######################\n")
-    ent_m2 = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
+    topology_m2.ms["master1"].log.info("\n\n######################### Tombstone on M2 ######################\n")
+    ent_m2 = _find_tombstone(topology_m2.ms["master2"], SUFFIX, 'sn', name)
     assert ent_m2
 
-    topology.master1.log.info("\n\n######################### Description ######################\n%s\n" % desc)
-    topology.master1.log.info("M1 only\n")
+    topology_m2.ms["master1"].log.info("\n\n######################### Description ######################\n%s\n" % desc)
+    topology_m2.ms["master1"].log.info("M1 only\n")
     for attr in ent_m1.getAttrs():
 
         if not debug:
             assert attr in ent_m2.getAttrs()
 
         if not attr in ent_m2.getAttrs():
-            topology.master1.log.info("    %s" % attr)
+            topology_m2.ms["master1"].log.info("    %s" % attr)
             for val in ent_m1.getValues(attr):
-                topology.master1.log.info("        %s" % val)
+                topology_m2.ms["master1"].log.info("        %s" % val)
 
-    topology.master1.log.info("M2 only\n")
+    topology_m2.ms["master1"].log.info("M2 only\n")
     for attr in ent_m2.getAttrs():
 
         if not debug:
             assert attr in ent_m1.getAttrs()
 
         if not attr in ent_m1.getAttrs():
-            topology.master1.log.info("    %s" % attr)
+            topology_m2.ms["master1"].log.info("    %s" % attr)
             for val in ent_m2.getValues(attr):
-                topology.master1.log.info("        %s" % val)
+                topology_m2.ms["master1"].log.info("        %s" % val)
 
-    topology.master1.log.info("M1 differs M2\n")
+    topology_m2.ms["master1"].log.info("M1 differs M2\n")
 
     if not debug:
         assert ent_m1.dn == ent_m2.dn
 
     if ent_m1.dn != ent_m2.dn:
-        topology.master1.log.info("    M1[dn] = %s\n    M2[dn] = %s" % (ent_m1.dn, ent_m2.dn))
+        topology_m2.ms["master1"].log.info("    M1[dn] = %s\n    M2[dn] = %s" % (ent_m1.dn, ent_m2.dn))
 
     for attr1 in ent_m1.getAttrs():
         if attr1 in ent_m2.getAttrs():
@@ -250,7 +134,7 @@ def _status_entry_both_server(topology, name=None, desc=None, debug=True):
                     assert found
 
                 if not found:
-                    topology.master1.log.info("    M1[%s] = %s" % (attr1, val1))
+                    topology_m2.ms["master1"].log.info("    M1[%s] = %s" % (attr1, val1))
 
     for attr2 in ent_m2.getAttrs():
         if attr2 in ent_m1.getAttrs():
@@ -265,29 +149,29 @@ def _status_entry_both_server(topology, name=None, desc=None, debug=True):
                     assert found
 
                 if not found:
-                    topology.master1.log.info("    M2[%s] = %s" % (attr2, val2))
+                    topology_m2.ms["master1"].log.info("    M2[%s] = %s" % (attr2, val2))
 
 
-def _pause_RAs(topology):
-    topology.master1.log.info("\n\n######################### Pause RA M1<->M2 ######################\n")
-    ents = topology.master1.agreement.list(suffix=SUFFIX)
+def _pause_RAs(topology_m2):
+    topology_m2.ms["master1"].log.info("\n\n######################### Pause RA M1<->M2 ######################\n")
+    ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
-    topology.master1.agreement.pause(ents[0].dn)
+    topology_m2.ms["master1"].agreement.pause(ents[0].dn)
 
-    ents = topology.master2.agreement.list(suffix=SUFFIX)
+    ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
-    topology.master2.agreement.pause(ents[0].dn)
+    topology_m2.ms["master2"].agreement.pause(ents[0].dn)
 
 
-def _resume_RAs(topology):
-    topology.master1.log.info("\n\n######################### resume RA M1<->M2 ######################\n")
-    ents = topology.master1.agreement.list(suffix=SUFFIX)
+def _resume_RAs(topology_m2):
+    topology_m2.ms["master1"].log.info("\n\n######################### resume RA M1<->M2 ######################\n")
+    ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
-    topology.master1.agreement.resume(ents[0].dn)
+    topology_m2.ms["master1"].agreement.resume(ents[0].dn)
 
-    ents = topology.master2.agreement.list(suffix=SUFFIX)
+    ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
-    topology.master2.agreement.resume(ents[0].dn)
+    topology_m2.ms["master2"].agreement.resume(ents[0].dn)
 
 
 def _find_tombstone(instance, base, attr, value):
@@ -299,7 +183,7 @@ def _find_tombstone(instance, base, attr, value):
     #
     filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE
     ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt)
-    #found = False
+    # found = False
     for ent in ents:
         if ent.hasAttr(attr):
             for val in ent.getValues(attr):
@@ -357,43 +241,44 @@ def _check_entry_exists(instance, entry_dn):
 
 
 def _check_mod_received(instance, base, filt, attr, value):
-    instance.log.info("\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid)
+    instance.log.info(
+        "\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid)
     loop = 0
     while loop <= 10:
         ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt)
         if ent.hasAttr(attr) and ent.getValue(attr) == value:
-                break
+            break
         time.sleep(1)
         loop += 1
     assert loop <= 10
 
 
-def _check_replication(topology, entry_dn):
+def _check_replication(topology_m2, entry_dn):
     # prepare the filter to retrieve the entry
     filt = entry_dn.split(',')[0]
 
-    topology.master1.log.info("\n######################### Check replicat M1->M2 ######################\n")
+    topology_m2.ms["master1"].log.info("\n######################### Check replicat M1->M2 ######################\n")
     loop = 0
     while loop <= 10:
         attr = 'description'
         value = 'test_value_%d' % loop
         mod = [(ldap.MOD_REPLACE, attr, value)]
-        topology.master1.modify_s(entry_dn, mod)
-        _check_mod_received(topology.master2, SUFFIX, filt, attr, value)
+        topology_m2.ms["master1"].modify_s(entry_dn, mod)
+        _check_mod_received(topology_m2.ms["master2"], SUFFIX, filt, attr, value)
         loop += 1
 
-    topology.master1.log.info("\n######################### Check replicat M2->M1 ######################\n")
+    topology_m2.ms["master1"].log.info("\n######################### Check replicat M2->M1 ######################\n")
     loop = 0
     while loop <= 10:
         attr = 'description'
         value = 'test_value_%d' % loop
         mod = [(ldap.MOD_REPLACE, attr, value)]
-        topology.master2.modify_s(entry_dn, mod)
-        _check_mod_received(topology.master1, SUFFIX, filt, attr, value)
+        topology_m2.ms["master2"].modify_s(entry_dn, mod)
+        _check_mod_received(topology_m2.ms["master1"], SUFFIX, filt, attr, value)
         loop += 1
 
 
-def test_ticket47787_init(topology):
+def test_ticket47787_init(topology_m2):
     """
         Creates
             - a staging DIT
@@ -402,45 +287,45 @@ def test_ticket47787_init(topology):
 
     """
 
-    topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n")
+    topology_m2.ms["master1"].log.info("\n\n######################### INITIALIZATION ######################\n")
 
     # entry used to bind with
-    topology.master1.log.info("Add %s" % BIND_DN)
-    topology.master1.add_s(Entry((BIND_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           BIND_CN,
-                                            'cn':           BIND_CN,
-                                            'userpassword': BIND_PW})))
+    topology_m2.ms["master1"].log.info("Add %s" % BIND_DN)
+    topology_m2.ms["master1"].add_s(Entry((BIND_DN, {
+        'objectclass': "top person".split(),
+        'sn': BIND_CN,
+        'cn': BIND_CN,
+        'userpassword': BIND_PW})))
 
     # DIT for staging
-    topology.master1.log.info("Add %s" % STAGING_DN)
-    topology.master1.add_s(Entry((STAGING_DN, {
-                                            'objectclass': "top organizationalRole".split(),
-                                            'cn':           STAGING_CN,
-                                            'description': "staging DIT"})))
+    topology_m2.ms["master1"].log.info("Add %s" % STAGING_DN)
+    topology_m2.ms["master1"].add_s(Entry((STAGING_DN, {
+        'objectclass': "top organizationalRole".split(),
+        'cn': STAGING_CN,
+        'description': "staging DIT"})))
 
     # DIT for production
-    topology.master1.log.info("Add %s" % PRODUCTION_DN)
-    topology.master1.add_s(Entry((PRODUCTION_DN, {
-                                            'objectclass': "top organizationalRole".split(),
-                                            'cn':           PRODUCTION_CN,
-                                            'description': "production DIT"})))
+    topology_m2.ms["master1"].log.info("Add %s" % PRODUCTION_DN)
+    topology_m2.ms["master1"].add_s(Entry((PRODUCTION_DN, {
+        'objectclass': "top organizationalRole".split(),
+        'cn': PRODUCTION_CN,
+        'description': "production DIT"})))
 
     # enable replication error logging
     mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192')]
-    topology.master1.modify_s(DN_CONFIG, mod)
-    topology.master2.modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
 
     # add dummy entries in the staging DIT
     for cpt in range(MAX_ACCOUNTS):
         name = "%s%d" % (NEW_ACCOUNT, cpt)
-        topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), {
-                                            'objectclass': "top person".split(),
-                                            'sn': name,
-                                            'cn': name})))
+        topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), {
+            'objectclass': "top person".split(),
+            'sn': name,
+            'cn': name})))
 
 
-def test_ticket47787_2(topology):
+def test_ticket47787_2(topology_m2):
     '''
     Disable replication so that updates are not replicated
     Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior).
@@ -450,11 +335,11 @@ def test_ticket47787_2(topology):
     checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn)
     '''
 
-    _header(topology, "test_ticket47787_2")
-    _bind_manager(topology.master1)
-    _bind_manager(topology.master2)
+    _header(topology_m2, "test_ticket47787_2")
+    _bind_manager(topology_m2.ms["master1"])
+    _bind_manager(topology_m2.ms["master2"])
 
-    #entry to test the replication is still working
+    # entry to test the replication is still working
     name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 1)
     test_rdn = "cn=%s" % (name)
     testentry_dn = "%s,%s" % (test_rdn, STAGING_DN)
@@ -473,33 +358,34 @@ def test_ticket47787_2(topology):
     entry_dn = "%s,%s" % (rdn, STAGING_DN)
 
     # created on M1, wait the entry exists on M2
-    _check_entry_exists(topology.master2, entry_dn)
-    _check_entry_exists(topology.master2, testentry_dn)
+    _check_entry_exists(topology_m2.ms["master2"], entry_dn)
+    _check_entry_exists(topology_m2.ms["master2"], testentry_dn)
 
-    _pause_RAs(topology)
+    _pause_RAs(topology_m2)
 
     # Delete 'entry_dn' on M1.
     # dummy update is only have a first CSN before the DEL
     # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
-    _mod_entry(topology.master1, testentry2_dn, attr, 'dummy')
-    _delete_entry(topology.master1, entry_dn, name)
-    _mod_entry(topology.master1, testentry2_dn, attr, value)
+    _mod_entry(topology_m2.ms["master1"], testentry2_dn, attr, 'dummy')
+    _delete_entry(topology_m2.ms["master1"], entry_dn, name)
+    _mod_entry(topology_m2.ms["master1"], testentry2_dn, attr, value)
 
     time.sleep(1)  # important to have MOD.csn != DEL.csn
 
     # MOD 'entry_dn' on M1.
     # dummy update is only have a first CSN before the MOD entry_dn
     # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
-    _mod_entry(topology.master2, testentry_dn, attr, 'dummy')
-    _mod_entry(topology.master2, entry_dn, attr, value)
-    _mod_entry(topology.master2, testentry_dn, attr, value)
+    _mod_entry(topology_m2.ms["master2"], testentry_dn, attr, 'dummy')
+    _mod_entry(topology_m2.ms["master2"], entry_dn, attr, value)
+    _mod_entry(topology_m2.ms["master2"], testentry_dn, attr, value)
 
-    _resume_RAs(topology)
+    _resume_RAs(topology_m2)
 
-    topology.master1.log.info("\n\n######################### Check DEL replicated on M2 ######################\n")
+    topology_m2.ms["master1"].log.info(
+        "\n\n######################### Check DEL replicated on M2 ######################\n")
     loop = 0
     while loop <= 10:
-        ent = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
+        ent = _find_tombstone(topology_m2.ms["master2"], SUFFIX, 'sn', name)
         if ent:
             break
         time.sleep(1)
@@ -509,17 +395,18 @@ def test_ticket47787_2(topology):
 
     # the following checks are not necessary
     # as this bug is only for failing replicated MOD (entry_dn) on M1
-    #_check_mod_received(topology.master1, SUFFIX, "(%s)" % (test_rdn), attr, value)
-    #_check_mod_received(topology.master2, SUFFIX, "(%s)" % (test2_rdn), attr, value)
+    # _check_mod_received(topology_m2.ms["master1"], SUFFIX, "(%s)" % (test_rdn), attr, value)
+    # _check_mod_received(topology_m2.ms["master2"], SUFFIX, "(%s)" % (test2_rdn), attr, value)
     #
-    #_check_replication(topology, testentry_dn)
+    # _check_replication(topology_m2, testentry_dn)
 
-    _status_entry_both_server(topology, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG)
+    _status_entry_both_server(topology_m2, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG)
 
-    topology.master1.log.info("\n\n######################### Check MOD replicated on M1 ######################\n")
+    topology_m2.ms["master1"].log.info(
+        "\n\n######################### Check MOD replicated on M1 ######################\n")
     loop = 0
     while loop <= 10:
-        ent = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
+        ent = _find_tombstone(topology_m2.ms["master1"], SUFFIX, 'sn', name)
         if ent:
             break
         time.sleep(1)

+ 31 - 74
dirsrvtests/tests/tickets/ticket47808_test.py

@@ -6,16 +6,13 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
+import ldap
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -23,48 +20,7 @@ ATTRIBUTE_UNIQUENESS_PLUGIN = 'cn=attribute uniqueness,cn=plugins,cn=config'
 ENTRY_NAME = 'test_entry'
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47808_run(topology):
+def test_ticket47808_run(topology_st):
     """
         It enables attribute uniqueness plugin with sn as a unique attribute
         Add an entry 1 with sn = ENTRY_NAME
@@ -74,16 +30,17 @@ def test_ticket47808_run(topology):
     """
 
     # bind as directory manager
-    topology.standalone.log.info("Bind as %s" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.log.info("Bind as %s" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
-    topology.standalone.log.info("\n\n######################### SETUP ATTR UNIQ PLUGIN ######################\n")
+    topology_st.standalone.log.info("\n\n######################### SETUP ATTR UNIQ PLUGIN ######################\n")
 
     # enable attribute uniqueness plugin
-    mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'sn'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', SUFFIX)]
-    topology.standalone.modify_s(ATTRIBUTE_UNIQUENESS_PLUGIN, mod)
+    mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'sn'),
+           (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', SUFFIX)]
+    topology_st.standalone.modify_s(ATTRIBUTE_UNIQUENESS_PLUGIN, mod)
 
-    topology.standalone.log.info("\n\n######################### ADD USER 1 ######################\n")
+    topology_st.standalone.log.info("\n\n######################### ADD USER 1 ######################\n")
 
     # Prepare entry 1
     entry_name = '%s 1' % (ENTRY_NAME)
@@ -92,14 +49,14 @@ def test_ticket47808_run(topology):
     entry_1.setValues('objectclass', 'top', 'person')
     entry_1.setValues('sn', ENTRY_NAME)
     entry_1.setValues('cn', entry_name)
-    topology.standalone.log.info("Try to add Add %s: %r" % (entry_1, entry_1))
-    topology.standalone.add_s(entry_1)
+    topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_1, entry_1))
+    topology_st.standalone.add_s(entry_1)
 
-    topology.standalone.log.info("\n\n######################### Restart Server ######################\n")
-    topology.standalone.stop(timeout=10)
-    topology.standalone.start(timeout=10)
+    topology_st.standalone.log.info("\n\n######################### Restart Server ######################\n")
+    topology_st.standalone.stop(timeout=10)
+    topology_st.standalone.start(timeout=10)
 
-    topology.standalone.log.info("\n\n######################### ADD USER 2 ######################\n")
+    topology_st.standalone.log.info("\n\n######################### ADD USER 2 ######################\n")
 
     # Prepare entry 2 having the same sn, which crashes the server
     entry_name = '%s 2' % (ENTRY_NAME)
@@ -108,29 +65,29 @@ def test_ticket47808_run(topology):
     entry_2.setValues('objectclass', 'top', 'person')
     entry_2.setValues('sn', ENTRY_NAME)
     entry_2.setValues('cn', entry_name)
-    topology.standalone.log.info("Try to add Add %s: %r" % (entry_2, entry_2))
+    topology_st.standalone.log.info("Try to add Add %s: %r" % (entry_2, entry_2))
     try:
-        topology.standalone.add_s(entry_2)
+        topology_st.standalone.add_s(entry_2)
     except:
-        topology.standalone.log.warn("Adding %s failed" % entry_dn_2)
+        topology_st.standalone.log.warn("Adding %s failed" % entry_dn_2)
         pass
 
-    topology.standalone.log.info("\n\n######################### IS SERVER UP? ######################\n")
-    ents = topology.standalone.search_s(entry_dn_1, ldap.SCOPE_BASE, '(objectclass=*)')
+    topology_st.standalone.log.info("\n\n######################### IS SERVER UP? ######################\n")
+    ents = topology_st.standalone.search_s(entry_dn_1, ldap.SCOPE_BASE, '(objectclass=*)')
     assert len(ents) == 1
-    topology.standalone.log.info("Yes, it's up.")
+    topology_st.standalone.log.info("Yes, it's up.")
 
-    topology.standalone.log.info("\n\n######################### CHECK USER 2 NOT ADDED ######################\n")
-    topology.standalone.log.info("Try to search %s" % entry_dn_2)
+    topology_st.standalone.log.info("\n\n######################### CHECK USER 2 NOT ADDED ######################\n")
+    topology_st.standalone.log.info("Try to search %s" % entry_dn_2)
     try:
-        ents = topology.standalone.search_s(entry_dn_2, ldap.SCOPE_BASE, '(objectclass=*)')
+        ents = topology_st.standalone.search_s(entry_dn_2, ldap.SCOPE_BASE, '(objectclass=*)')
     except ldap.NO_SUCH_OBJECT:
-        topology.standalone.log.info("Found none")
+        topology_st.standalone.log.info("Found none")
 
-    topology.standalone.log.info("\n\n######################### DELETE USER 1 ######################\n")
+    topology_st.standalone.log.info("\n\n######################### DELETE USER 1 ######################\n")
 
-    topology.standalone.log.info("Try to delete  %s " % entry_dn_1)
-    topology.standalone.delete_s(entry_dn_1)
+    topology_st.standalone.log.info("Try to delete  %s " % entry_dn_1)
+    topology_st.standalone.delete_s(entry_dn_1)
     log.info('Testcase PASSED')
 
 

+ 31 - 73
dirsrvtests/tests/tickets/ticket47815_test.py

@@ -6,62 +6,19 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47815(topology):
+def test_ticket47815(topology_st):
     """
         Test betxn plugins reject an invalid option, and make sure that the rejected entry
         is not in the entry cache.
@@ -75,41 +32,42 @@ def test_ticket47815(topology):
     result = 0
     result2 = 0
 
-    log.info('Testing Ticket 47815 - Add entries that should be rejected by the betxn plugins, and are not left in the entry cache')
+    log.info(
+        'Testing Ticket 47815 - Add entries that should be rejected by the betxn plugins, and are not left in the entry cache')
 
     # Enabled the plugins
-    topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
-    topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
-    topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+    topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+    topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
+    topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
 
     # configure automember config entry
     log.info('Adding automember config')
     try:
-        topology.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', {
-                                     'objectclass': 'top autoMemberDefinition'.split(),
-                                     'autoMemberScope': 'dc=example,dc=com',
-                                     'autoMemberFilter': 'cn=user',
-                                     'autoMemberDefaultGroup': 'cn=group,dc=example,dc=com',
-                                     'autoMemberGroupingAttr': 'member:dn',
-                                     'cn': 'group cfg'})))
+        topology_st.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', {
+            'objectclass': 'top autoMemberDefinition'.split(),
+            'autoMemberScope': 'dc=example,dc=com',
+            'autoMemberFilter': 'cn=user',
+            'autoMemberDefaultGroup': 'cn=group,dc=example,dc=com',
+            'autoMemberGroupingAttr': 'member:dn',
+            'cn': 'group cfg'})))
     except:
         log.error('Failed to add automember config')
         exit(1)
 
-    topology.standalone.stop(timeout=120)
+    topology_st.standalone.stop(timeout=120)
     time.sleep(1)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
     time.sleep(3)
 
     # need to reopen a connection toward the instance
-    topology.standalone.open()
+    topology_st.standalone.open()
 
     # add automember group
     log.info('Adding automember group')
     try:
-        topology.standalone.add_s(Entry(('cn=group,dc=example,dc=com', {
-                                  'objectclass': 'top groupOfNames'.split(),
-                                  'cn': 'group'})))
+        topology_st.standalone.add_s(Entry(('cn=group,dc=example,dc=com', {
+            'objectclass': 'top groupOfNames'.split(),
+            'cn': 'group'})))
     except:
         log.error('Failed to add automember group')
         exit(1)
@@ -118,10 +76,10 @@ def test_ticket47815(topology):
     log.info('Adding invalid entry')
 
     try:
-        topology.standalone.add_s(Entry(('cn=user,dc=example,dc=com', {
-                                  'objectclass': 'top person'.split(),
-                                  'sn': 'user',
-                                  'cn': 'user'})))
+        topology_st.standalone.add_s(Entry(('cn=user,dc=example,dc=com', {
+            'objectclass': 'top person'.split(),
+            'sn': 'user',
+            'cn': 'user'})))
     except ldap.UNWILLING_TO_PERFORM:
         log.debug('Adding invalid entry failed as expected')
         result = 53
@@ -134,10 +92,10 @@ def test_ticket47815(topology):
 
     # Attempt to add user again, should result in error 53 again
     try:
-        topology.standalone.add_s(Entry(('cn=user,dc=example,dc=com', {
-                                  'objectclass': 'top person'.split(),
-                                  'sn': 'user',
-                                  'cn': 'user'})))
+        topology_st.standalone.add_s(Entry(('cn=user,dc=example,dc=com', {
+            'objectclass': 'top person'.split(),
+            'sn': 'user',
+            'cn': 'user'})))
     except ldap.UNWILLING_TO_PERFORM:
         log.debug('2nd add of invalid entry failed as expected')
         result2 = 53

+ 29 - 77
dirsrvtests/tests/tickets/ticket47819_test.py

@@ -6,63 +6,16 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47819(topology):
+def test_ticket47819(topology_st):
     """
         Testing precise tombstone purging:
             [1]  Make sure "nsTombstoneCSN" is added to new tombstones
@@ -78,8 +31,8 @@ def test_ticket47819(topology):
     # Setup Replication
     #
     log.info('Setting up replication...')
-    topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
-                                                  replicaId=REPLICAID_MASTER_1)
+    topology_st.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
+                                                     replicaId=REPLICAID_MASTER_1)
 
     #
     # Part 1 create a tombstone entry and make sure nsTombstoneCSN is added
@@ -87,24 +40,24 @@ def test_ticket47819(topology):
     log.info('Part 1:  Add and then delete an entry to create a tombstone...')
 
     try:
-        topology.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', {
-                                  'objectclass': 'top person'.split(),
-                                  'sn': 'user',
-                                  'cn': 'entry1'})))
+        topology_st.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', {
+            'objectclass': 'top person'.split(),
+            'sn': 'user',
+            'cn': 'entry1'})))
     except ldap.LDAPError as e:
         log.error('Failed to add entry: ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.delete_s('cn=entry1,dc=example,dc=com')
+        topology_st.standalone.delete_s('cn=entry1,dc=example,dc=com')
     except ldap.LDAPError as e:
         log.error('Failed to delete entry: ' + e.message['desc'])
         assert False
 
     log.info('Search for tombstone entries...')
     try:
-        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
-                                               '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
+        entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+                                                  '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
         if not entries:
             log.fatal('Search failed to the new tombstone(nsTombstoneCSN is probably missing).')
             assert False
@@ -127,7 +80,7 @@ def test_ticket47819(topology):
 
     args = {EXPORT_REPL_INFO: True,
             TASK_WAIT: True}
-    exportTask = Tasks(topology.standalone)
+    exportTask = Tasks(topology_st.standalone)
     try:
         exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
     except ValueError:
@@ -149,7 +102,7 @@ def test_ticket47819(topology):
 
     # import the new ldif file
     log.info('Import replication LDIF file...')
-    importTask = Tasks(topology.standalone)
+    importTask = Tasks(topology_st.standalone)
     args = {TASK_WAIT: True}
     try:
         importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
@@ -162,8 +115,8 @@ def test_ticket47819(topology):
     # Search for the tombstone again
     log.info('Search for tombstone entries...')
     try:
-        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
-                                               '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
+        entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+                                                  '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
         if not entries:
             log.fatal('Search failed to fine the new tombstone(nsTombstoneCSN is probably missing).')
             assert False
@@ -182,7 +135,7 @@ def test_ticket47819(topology):
     # so we can test if the fixup task works.
     args = {TASK_WAIT: True,
             TASK_TOMB_STRIP: True}
-    fixupTombTask = Tasks(topology.standalone)
+    fixupTombTask = Tasks(topology_st.standalone)
     try:
         fixupTombTask.fixupTombstones(DEFAULT_BENAME, args)
     except:
@@ -192,8 +145,8 @@ def test_ticket47819(topology):
     # Search for tombstones with nsTombstoneCSN - better not find any
     log.info('Search for tombstone entries...')
     try:
-        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
-                                               '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
+        entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+                                                  '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
         if entries:
             log.fatal('Search found tombstones with nsTombstoneCSN')
             assert False
@@ -201,10 +154,9 @@ def test_ticket47819(topology):
         log.fatal('Search failed: ' + e.message['desc'])
         assert False
 
-
     # Now run the fixup task
     args = {TASK_WAIT: True}
-    fixupTombTask = Tasks(topology.standalone)
+    fixupTombTask = Tasks(topology_st.standalone)
     try:
         fixupTombTask.fixupTombstones(DEFAULT_BENAME, args)
     except:
@@ -214,8 +166,8 @@ def test_ticket47819(topology):
     # Search for tombstones with nsTombstoneCSN - better find some
     log.info('Search for tombstone entries...')
     try:
-        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
-                                               '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
+        entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+                                                  '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
         if not entries:
             log.fatal('Search did not find any fixed-up tombstones')
             assert False
@@ -234,7 +186,7 @@ def test_ticket47819(topology):
             REPLICA_PURGE_DELAY: '5',
             REPLICA_PURGE_INTERVAL: '5'}
     try:
-        topology.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, args)
+        topology_st.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, args)
     except:
         log.fatal('Failed to configure replica')
         assert False
@@ -246,10 +198,10 @@ def test_ticket47819(topology):
     # Add an entry to trigger replication
     log.info('Perform an update to help trigger tombstone purging...')
     try:
-        topology.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', {
-                                  'objectclass': 'top person'.split(),
-                                  'sn': 'user',
-                                  'cn': 'entry1'})))
+        topology_st.standalone.add_s(Entry(('cn=test_entry,dc=example,dc=com', {
+            'objectclass': 'top person'.split(),
+            'sn': 'user',
+            'cn': 'entry1'})))
     except ldap.LDAPError as e:
         log.error('Failed to add entry: ' + e.message['desc'])
         assert False
@@ -261,8 +213,8 @@ def test_ticket47819(topology):
     # search for tombstones, there should be none
     log.info('Search for tombstone entries...')
     try:
-        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
-                                               '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
+        entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+                                                  '(&(nsTombstoneCSN=*)(objectclass=nsTombstone))')
         if entries:
             log.fatal('Search unexpectedly found tombstones')
             assert False

+ 364 - 379
dirsrvtests/tests/tickets/ticket47823_test.py

@@ -6,20 +6,17 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
-import pytest
 import re
 import shutil
 import subprocess
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+import time
 
+import ldap
+import pytest
+from lib389 import Entry
+from lib389._constants import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -27,11 +24,11 @@ PROVISIONING_CN = "provisioning"
 PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SUFFIX)
 
 ACTIVE_CN = "accounts"
-STAGE_CN  = "staged users"
+STAGE_CN = "staged users"
 DELETE_CN = "deleted users"
 ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SUFFIX)
-STAGE_DN  = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
-DELETE_DN  = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
+STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
+DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
 
 STAGE_USER_CN = "stage guy"
 STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN)
@@ -53,83 +50,45 @@ ALL_CONFIG_ATTRS = ['nsslapd-pluginarg0', 'nsslapd-pluginarg1', 'nsslapd-plugina
                     'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees']
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
+def _header(topology_st, label):
+    topology_st.standalone.log.info("\n\n###############################################")
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("####### %s" % label)
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("###############################################")
 
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
 
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
-    topology.standalone.log.info("\n\n###############################################")
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("####### %s" % label)
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("###############################################")
-
-
-def _uniqueness_config_entry(topology, name=None):
+def _uniqueness_config_entry(topology_st, name=None):
     if not name:
         return None
 
-    ent = topology.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE,
-                                    "(objectclass=nsSlapdPlugin)",
-                                    ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc',
-                                     'nsslapd-pluginType', 'nsslapd-pluginEnabled', 'nsslapd-plugin-depends-on-type',
-                                     'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor',
-                                     'nsslapd-pluginDescription'])
+    ent = topology_st.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE,
+                                          "(objectclass=nsSlapdPlugin)",
+                                          ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc',
+                                           'nsslapd-pluginType', 'nsslapd-pluginEnabled',
+                                           'nsslapd-plugin-depends-on-type',
+                                           'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor',
+                                           'nsslapd-pluginDescription'])
     ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN)
     return ent
 
 
-def _build_config(topology, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old', across_subtrees=False):
-    assert topology
+def _build_config(topology_st, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old',
+                  across_subtrees=False):
+    assert topology_st
     assert attr_name
     assert subtree_1
 
     if type_config == 'old':
         # enable the 'cn' uniqueness on Active
-        config = _uniqueness_config_entry(topology, attr_name)
+        config = _uniqueness_config_entry(topology_st, attr_name)
         config.setValue('nsslapd-pluginarg0', attr_name)
         config.setValue('nsslapd-pluginarg1', subtree_1)
         if subtree_2:
             config.setValue('nsslapd-pluginarg2', subtree_2)
     else:
         # prepare the config entry
-        config = _uniqueness_config_entry(topology, attr_name)
+        config = _uniqueness_config_entry(topology_st, attr_name)
         config.setValue('uniqueness-attribute-name', attr_name)
         config.setValue('uniqueness-subtrees', subtree_1)
         if subtree_2:
@@ -139,160 +98,163 @@ def _build_config(topology, attr_name='cn', subtree_1=None, subtree_2=None, type
     return config
 
 
-def _active_container_invalid_cfg_add(topology):
+def _active_container_invalid_cfg_add(topology_st):
     '''
     Check uniqueness is not enforced with ADD (invalid config)
     '''
-    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           ACTIVE_USER_1_CN,
-                                            'cn':           ACTIVE_USER_1_CN})))
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_1_CN,
+        'cn': ACTIVE_USER_1_CN})))
 
-    topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
-                                        'objectclass': "top person".split(),
-                                        'sn':           ACTIVE_USER_2_CN,
-                                        'cn':           [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_2_CN,
+        'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
 
-    topology.standalone.delete_s(ACTIVE_USER_1_DN)
-    topology.standalone.delete_s(ACTIVE_USER_2_DN)
+    topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology_st.standalone.delete_s(ACTIVE_USER_2_DN)
 
 
-def _active_container_add(topology, type_config='old'):
+def _active_container_add(topology_st, type_config='old'):
     '''
     Check uniqueness in a single container (Active)
     Add an entry with a given 'cn', then check we can not add an entry with the same 'cn' value
 
     '''
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config,
+                           across_subtrees=False)
 
     # remove the 'cn' uniqueness entry
     try:
-        topology.standalone.delete_s(config.dn)
+        topology_st.standalone.delete_s(config.dn)
 
     except ldap.NO_SUCH_OBJECT:
         pass
-    topology.standalone.restart(timeout=120)
-
-    topology.standalone.log.info('Uniqueness not enforced: create the entries')
+    topology_st.standalone.restart(timeout=120)
 
-    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           ACTIVE_USER_1_CN,
-                                            'cn':           ACTIVE_USER_1_CN})))
+    topology_st.standalone.log.info('Uniqueness not enforced: create the entries')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
-                                        'objectclass': "top person".split(),
-                                        'sn':           ACTIVE_USER_2_CN,
-                                        'cn':           [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_1_CN,
+        'cn': ACTIVE_USER_1_CN})))
 
-    topology.standalone.delete_s(ACTIVE_USER_1_DN)
-    topology.standalone.delete_s(ACTIVE_USER_2_DN)
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_2_CN,
+        'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
 
+    topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology_st.standalone.delete_s(ACTIVE_USER_2_DN)
 
-    topology.standalone.log.info('Uniqueness enforced: checks second entry is rejected')
+    topology_st.standalone.log.info('Uniqueness enforced: checks second entry is rejected')
 
     # enable the 'cn' uniqueness on Active
-    topology.standalone.add_s(config)
-    topology.standalone.restart(timeout=120)
-    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           ACTIVE_USER_1_CN,
-                                            'cn':           ACTIVE_USER_1_CN})))
+    topology_st.standalone.add_s(config)
+    topology_st.standalone.restart(timeout=120)
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_1_CN,
+        'cn': ACTIVE_USER_1_CN})))
 
     try:
-        topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
-                                        'objectclass': "top person".split(),
-                                        'sn':           ACTIVE_USER_2_CN,
-                                        'cn':           [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+        topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+            'objectclass': "top person".split(),
+            'sn': ACTIVE_USER_2_CN,
+            'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
     except ldap.CONSTRAINT_VIOLATION:
         # yes it is expected
         pass
 
     # cleanup the stuff now
-    topology.standalone.delete_s(config.dn)
-    topology.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology_st.standalone.delete_s(config.dn)
+    topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
 
 
-def _active_container_mod(topology, type_config='old'):
+def _active_container_mod(topology_st, type_config='old'):
     '''
     Check uniqueness in a single container (active)
     Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value
 
     '''
 
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config,
+                           across_subtrees=False)
 
     # enable the 'cn' uniqueness on Active
-    topology.standalone.add_s(config)
-    topology.standalone.restart(timeout=120)
+    topology_st.standalone.add_s(config)
+    topology_st.standalone.restart(timeout=120)
 
-    topology.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected')
-    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           ACTIVE_USER_1_CN,
-                                            'cn':           ACTIVE_USER_1_CN})))
+    topology_st.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected')
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_1_CN,
+        'cn': ACTIVE_USER_1_CN})))
 
-    topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
-                                    'objectclass': "top person".split(),
-                                    'sn':           ACTIVE_USER_2_CN,
-                                    'cn':           ACTIVE_USER_2_CN})))
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_2_CN,
+        'cn': ACTIVE_USER_2_CN})))
 
     try:
-        topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_ADD, 'cn', ACTIVE_USER_1_CN)])
+        topology_st.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_ADD, 'cn', ACTIVE_USER_1_CN)])
     except ldap.CONSTRAINT_VIOLATION:
         # yes it is expected
         pass
 
-    topology.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected')
+    topology_st.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected')
     try:
-        topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_REPLACE, 'cn', [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN])])
+        topology_st.standalone.modify_s(ACTIVE_USER_2_DN,
+                                        [(ldap.MOD_REPLACE, 'cn', [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN])])
     except ldap.CONSTRAINT_VIOLATION:
         # yes it is expected
         pass
 
     # cleanup the stuff now
-    topology.standalone.delete_s(config.dn)
-    topology.standalone.delete_s(ACTIVE_USER_1_DN)
-    topology.standalone.delete_s(ACTIVE_USER_2_DN)
+    topology_st.standalone.delete_s(config.dn)
+    topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology_st.standalone.delete_s(ACTIVE_USER_2_DN)
 
 
-def _active_container_modrdn(topology, type_config='old'):
+def _active_container_modrdn(topology_st, type_config='old'):
     '''
     Check uniqueness in a single container
     Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
 
     '''
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config,
+                           across_subtrees=False)
 
     # enable the 'cn' uniqueness on Active
-    topology.standalone.add_s(config)
-    topology.standalone.restart(timeout=120)
+    topology_st.standalone.add_s(config)
+    topology_st.standalone.restart(timeout=120)
 
-    topology.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected')
+    topology_st.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           ACTIVE_USER_1_CN,
-                                            'cn':           [ACTIVE_USER_1_CN, 'dummy']})))
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_1_CN,
+        'cn': [ACTIVE_USER_1_CN, 'dummy']})))
 
-    topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
-                                    'objectclass': "top person".split(),
-                                    'sn':           ACTIVE_USER_2_CN,
-                                    'cn':           ACTIVE_USER_2_CN})))
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_2_CN,
+        'cn': ACTIVE_USER_2_CN})))
 
     try:
-        topology.standalone.rename_s(ACTIVE_USER_2_DN, 'cn=dummy', delold=0)
+        topology_st.standalone.rename_s(ACTIVE_USER_2_DN, 'cn=dummy', delold=0)
     except ldap.CONSTRAINT_VIOLATION:
         # yes it is expected
         pass
 
     # cleanup the stuff now
-    topology.standalone.delete_s(config.dn)
-    topology.standalone.delete_s(ACTIVE_USER_1_DN)
-    topology.standalone.delete_s(ACTIVE_USER_2_DN)
+    topology_st.standalone.delete_s(config.dn)
+    topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology_st.standalone.delete_s(ACTIVE_USER_2_DN)
 
 
-def _active_stage_containers_add(topology, type_config='old', across_subtrees=False):
+def _active_stage_containers_add(topology_st, type_config='old', across_subtrees=False):
     '''
     Check uniqueness in several containers
     Add an entry on a container with a given 'cn'
@@ -300,104 +262,109 @@ def _active_stage_containers_add(topology, type_config='old', across_subtrees=Fa
     with across_subtrees=True check we CAN NOT add an entry with the same 'cn' value on the other container
 
     '''
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN,
+                           type_config=type_config, across_subtrees=False)
 
-    topology.standalone.add_s(config)
-    topology.standalone.restart(timeout=120)
-    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           ACTIVE_USER_1_CN,
-                                            'cn':           ACTIVE_USER_1_CN})))
+    topology_st.standalone.add_s(config)
+    topology_st.standalone.restart(timeout=120)
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_1_CN,
+        'cn': ACTIVE_USER_1_CN})))
     try:
 
         # adding an entry on a separated contains with the same 'cn'
-        topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
-                                    'objectclass': "top person".split(),
-                                    'sn':           STAGE_USER_1_CN,
-                                    'cn':           ACTIVE_USER_1_CN})))
+        topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, {
+            'objectclass': "top person".split(),
+            'sn': STAGE_USER_1_CN,
+            'cn': ACTIVE_USER_1_CN})))
     except ldap.CONSTRAINT_VIOLATION:
-            assert across_subtrees
+        assert across_subtrees
 
     # cleanup the stuff now
-    topology.standalone.delete_s(config.dn)
-    topology.standalone.delete_s(ACTIVE_USER_1_DN)
-    topology.standalone.delete_s(STAGE_USER_1_DN)
+    topology_st.standalone.delete_s(config.dn)
+    topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology_st.standalone.delete_s(STAGE_USER_1_DN)
 
 
-def _active_stage_containers_mod(topology, type_config='old', across_subtrees=False):
+def _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=False):
     '''
     Check uniqueness in a several containers
     Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container
 
     '''
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN,
+                           type_config=type_config, across_subtrees=False)
 
-    topology.standalone.add_s(config)
-    topology.standalone.restart(timeout=120)
+    topology_st.standalone.add_s(config)
+    topology_st.standalone.restart(timeout=120)
     # adding an entry on active with a different 'cn'
-    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           ACTIVE_USER_1_CN,
-                                            'cn':           ACTIVE_USER_2_CN})))
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_1_CN,
+        'cn': ACTIVE_USER_2_CN})))
 
     # adding an entry on a stage with a different 'cn'
-    topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
-                                    'objectclass': "top person".split(),
-                                    'sn':           STAGE_USER_1_CN,
-                                    'cn':           STAGE_USER_1_CN})))
+    topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': STAGE_USER_1_CN,
+        'cn': STAGE_USER_1_CN})))
 
     try:
 
         # modify add same value
-        topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ACTIVE_USER_2_CN])])
+        topology_st.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ACTIVE_USER_2_CN])])
     except ldap.CONSTRAINT_VIOLATION:
         assert across_subtrees
 
-    topology.standalone.delete_s(STAGE_USER_1_DN)
-    topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
-                                    'objectclass': "top person".split(),
-                                    'sn':           STAGE_USER_1_CN,
-                                    'cn':           STAGE_USER_2_CN})))
+    topology_st.standalone.delete_s(STAGE_USER_1_DN)
+    topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': STAGE_USER_1_CN,
+        'cn': STAGE_USER_2_CN})))
     try:
         # modify replace same value
-        topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_REPLACE, 'cn', [STAGE_USER_2_CN, ACTIVE_USER_1_CN])])
+        topology_st.standalone.modify_s(STAGE_USER_1_DN,
+                                        [(ldap.MOD_REPLACE, 'cn', [STAGE_USER_2_CN, ACTIVE_USER_1_CN])])
     except ldap.CONSTRAINT_VIOLATION:
         assert across_subtrees
 
     # cleanup the stuff now
-    topology.standalone.delete_s(config.dn)
-    topology.standalone.delete_s(ACTIVE_USER_1_DN)
-    topology.standalone.delete_s(STAGE_USER_1_DN)
+    topology_st.standalone.delete_s(config.dn)
+    topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology_st.standalone.delete_s(STAGE_USER_1_DN)
 
 
-def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False):
+def _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=False):
     '''
     Check uniqueness in a several containers
     Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container
 
     '''
 
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN,
+                           type_config=type_config, across_subtrees=False)
 
     # enable the 'cn' uniqueness on Active and Stage
-    topology.standalone.add_s(config)
-    topology.standalone.restart(timeout=120)
-    topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           ACTIVE_USER_1_CN,
-                                            'cn':           [ACTIVE_USER_1_CN, 'dummy']})))
-
-    topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
-                                    'objectclass': "top person".split(),
-                                    'sn':           STAGE_USER_1_CN,
-                                    'cn':           STAGE_USER_1_CN})))
+    topology_st.standalone.add_s(config)
+    topology_st.standalone.restart(timeout=120)
+    topology_st.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': ACTIVE_USER_1_CN,
+        'cn': [ACTIVE_USER_1_CN, 'dummy']})))
+
+    topology_st.standalone.add_s(Entry((STAGE_USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': STAGE_USER_1_CN,
+        'cn': STAGE_USER_1_CN})))
 
     try:
 
-        topology.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0)
+        topology_st.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0)
 
         # check stage entry has 'cn=dummy'
-        stage_ent = topology.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*", ['cn'])
+        stage_ent = topology_st.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*",
+                                                    ['cn'])
         assert stage_ent.hasAttr('cn')
         found = False
         for value in stage_ent.getValues('cn'):
@@ -406,7 +373,7 @@ def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees
         assert found
 
         # check active entry has 'cn=dummy'
-        active_ent = topology.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn'])
+        active_ent = topology_st.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn'])
         assert active_ent.hasAttr('cn')
         found = False
         for value in stage_ent.getValues('cn'):
@@ -414,19 +381,19 @@ def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees
                 found = True
         assert found
 
-        topology.standalone.delete_s("cn=dummy,%s" % (STAGE_DN))
+        topology_st.standalone.delete_s("cn=dummy,%s" % (STAGE_DN))
     except ldap.CONSTRAINT_VIOLATION:
         assert across_subtrees
-        topology.standalone.delete_s(STAGE_USER_1_DN)
+        topology_st.standalone.delete_s(STAGE_USER_1_DN)
 
     # cleanup the stuff now
-    topology.standalone.delete_s(config.dn)
-    topology.standalone.delete_s(ACTIVE_USER_1_DN)
+    topology_st.standalone.delete_s(config.dn)
+    topology_st.standalone.delete_s(ACTIVE_USER_1_DN)
 
 
-def _config_file(topology, action='save'):
-    dse_ldif = topology.standalone.confdir + '/dse.ldif'
-    sav_file = topology.standalone.confdir + '/dse.ldif.ticket47823'
+def _config_file(topology_st, action='save'):
+    dse_ldif = topology_st.standalone.confdir + '/dse.ldif'
+    sav_file = topology_st.standalone.confdir + '/dse.ldif.ticket47823'
     if action == 'save':
         shutil.copy(dse_ldif, sav_file)
     else:
@@ -458,513 +425,531 @@ def _pattern_errorlog(file, log_pattern):
     return found
 
 
-def test_ticket47823_init(topology):
+def test_ticket47823_init(topology_st):
     """
 
     """
 
     # Enabled the plugins
-    topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
-    topology.standalone.restart(timeout=120)
-
-    topology.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(),
-                                                       'cn': PROVISIONING_CN})))
-    topology.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(),
-                                                 'cn': ACTIVE_CN})))
-    topology.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(),
-                                                'cn': STAGE_CN})))
-    topology.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(),
-                                                 'cn': DELETE_CN})))
-    topology.standalone.errorlog_file = open(topology.standalone.errlog, "r")
-
-    topology.standalone.stop(timeout=120)
+    topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
+    topology_st.standalone.restart(timeout=120)
+
+    topology_st.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(),
+                                                          'cn': PROVISIONING_CN})))
+    topology_st.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(),
+                                                    'cn': ACTIVE_CN})))
+    topology_st.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(),
+                                                   'cn': STAGE_CN})))
+    topology_st.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(),
+                                                    'cn': DELETE_CN})))
+    topology_st.standalone.errorlog_file = open(topology_st.standalone.errlog, "r")
+
+    topology_st.standalone.stop(timeout=120)
     time.sleep(1)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
     time.sleep(3)
 
 
-def test_ticket47823_one_container_add(topology):
+def test_ticket47823_one_container_add(topology_st):
     '''
     Check uniqueness in a single container
     Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
 
     '''
-    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
+    _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
 
-    _active_container_add(topology, type_config='old')
+    _active_container_add(topology_st, type_config='old')
 
-    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
+    _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
 
-    _active_container_add(topology, type_config='new')
+    _active_container_add(topology_st, type_config='new')
 
 
-def test_ticket47823_one_container_mod(topology):
+def test_ticket47823_one_container_mod(topology_st):
     '''
     Check uniqueness in a single container
     Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value
 
     '''
-    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD)")
+    _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MOD)")
 
-    _active_container_mod(topology, type_config='old')
+    _active_container_mod(topology_st, type_config='old')
 
-    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD)")
+    _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD)")
 
-    _active_container_mod(topology, type_config='new')
+    _active_container_mod(topology_st, type_config='new')
 
 
-def test_ticket47823_one_container_modrdn(topology):
+def test_ticket47823_one_container_modrdn(topology_st):
     '''
     Check uniqueness in a single container
     Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
 
     '''
-    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
+    _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
 
-    _active_container_modrdn(topology, type_config='old')
+    _active_container_modrdn(topology_st, type_config='old')
 
-    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
+    _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
 
-    _active_container_modrdn(topology, type_config='new')
+    _active_container_modrdn(topology_st, type_config='new')
 
 
-def test_ticket47823_multi_containers_add(topology):
+def test_ticket47823_multi_containers_add(topology_st):
     '''
     Check uniqueness in a several containers
     Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
 
     '''
-    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
+    _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
 
-    _active_stage_containers_add(topology, type_config='old', across_subtrees=False)
+    _active_stage_containers_add(topology_st, type_config='old', across_subtrees=False)
 
-    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
+    _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
 
-    _active_stage_containers_add(topology, type_config='new', across_subtrees=False)
+    _active_stage_containers_add(topology_st, type_config='new', across_subtrees=False)
 
 
-def test_ticket47823_multi_containers_mod(topology):
+def test_ticket47823_multi_containers_mod(topology_st):
     '''
     Check uniqueness in a several containers
     Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container
 
     '''
-    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
+    _header(topology_st, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
 
-    topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
-    _active_stage_containers_mod(topology, type_config='old', across_subtrees=False)
+    topology_st.standalone.log.info(
+        'Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
+    _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=False)
 
-    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
+    _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
 
-    topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
-    _active_stage_containers_mod(topology, type_config='new', across_subtrees=False)
+    topology_st.standalone.log.info(
+        'Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
+    _active_stage_containers_mod(topology_st, type_config='new', across_subtrees=False)
 
 
-def test_ticket47823_multi_containers_modrdn(topology):
+def test_ticket47823_multi_containers_modrdn(topology_st):
     '''
     Check uniqueness in a several containers
     Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container
 
     '''
-    _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers")
+    _header(topology_st,
+            "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers")
 
-    topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
-    _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False)
+    topology_st.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
+    _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=False)
 
-    topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
-    _active_stage_containers_modrdn(topology, type_config='old')
+    topology_st.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
+    _active_stage_containers_modrdn(topology_st, type_config='old')
 
 
-def test_ticket47823_across_multi_containers_add(topology):
+def test_ticket47823_across_multi_containers_add(topology_st):
     '''
     Check uniqueness across several containers, uniquely with the new configuration
     Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
 
     '''
-    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers")
+    _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers")
 
-    _active_stage_containers_add(topology, type_config='old', across_subtrees=True)
+    _active_stage_containers_add(topology_st, type_config='old', across_subtrees=True)
 
 
-def test_ticket47823_across_multi_containers_mod(topology):
+def test_ticket47823_across_multi_containers_mod(topology_st):
     '''
     Check uniqueness across several containers, uniquely with the new configuration
     Add and entry with a given 'cn', then check we can not modifiy an entry with the same 'cn' value
 
     '''
-    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers")
+    _header(topology_st, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers")
 
-    _active_stage_containers_mod(topology, type_config='old', across_subtrees=True)
+    _active_stage_containers_mod(topology_st, type_config='old', across_subtrees=True)
 
 
-def test_ticket47823_across_multi_containers_modrdn(topology):
+def test_ticket47823_across_multi_containers_modrdn(topology_st):
     '''
     Check uniqueness across several containers, uniquely with the new configuration
     Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
 
     '''
-    _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers")
+    _header(topology_st,
+            "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers")
 
-    _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=True)
+    _active_stage_containers_modrdn(topology_st, type_config='old', across_subtrees=True)
 
 
-def test_ticket47823_invalid_config_1(topology):
+def test_ticket47823_invalid_config_1(topology_st):
     '''
     Check that an invalid config is detected. No uniqueness enforced
     Using old config: arg0 is missing
     '''
-    _header(topology, "Invalid config (old): arg0 is missing")
+    _header(topology_st, "Invalid config (old): arg0 is missing")
 
-    _config_file(topology, action='save')
+    _config_file(topology_st, action='save')
 
     # create an invalid config without arg0
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old',
+                           across_subtrees=False)
 
     del config.data['nsslapd-pluginarg0']
     # replace 'cn' uniqueness entry
     try:
-        topology.standalone.delete_s(config.dn)
+        topology_st.standalone.delete_s(config.dn)
 
     except ldap.NO_SUCH_OBJECT:
         pass
-    topology.standalone.add_s(config)
+    topology_st.standalone.add_s(config)
 
-    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
 
     # Check the server did not restart
-    topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
+    topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
     try:
-        topology.standalone.restart(timeout=5)
-        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.restart(timeout=5)
+        ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+                                              ALL_CONFIG_ATTRS)
         if ent:
             # be sure to restore a valid config before assert
-            _config_file(topology, action='restore')
+            _config_file(topology_st, action='restore')
         assert not ent
     except subprocess.CalledProcessError:
-            pass
+        pass
 
     # Check the expected error message
     regex = re.compile("Unable to parse old style")
-    res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+    res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
     if not res:
         # be sure to restore a valid config before assert
-        _config_file(topology, action='restore')
+        _config_file(topology_st, action='restore')
     assert res
 
     # Check we can restart the server
-    _config_file(topology, action='restore')
-    topology.standalone.start(timeout=5)
+    _config_file(topology_st, action='restore')
+    topology_st.standalone.start(timeout=5)
     try:
-        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
     except ldap.NO_SUCH_OBJECT:
         pass
 
 
-def test_ticket47823_invalid_config_2(topology):
+def test_ticket47823_invalid_config_2(topology_st):
     '''
     Check that an invalid config is detected. No uniqueness enforced
     Using old config: arg1 is missing
     '''
-    _header(topology, "Invalid config (old): arg1 is missing")
+    _header(topology_st, "Invalid config (old): arg1 is missing")
 
-    _config_file(topology, action='save')
+    _config_file(topology_st, action='save')
 
     # create an invalid config without arg0
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old',
+                           across_subtrees=False)
 
     del config.data['nsslapd-pluginarg1']
     # replace 'cn' uniqueness entry
     try:
-        topology.standalone.delete_s(config.dn)
+        topology_st.standalone.delete_s(config.dn)
 
     except ldap.NO_SUCH_OBJECT:
         pass
-    topology.standalone.add_s(config)
+    topology_st.standalone.add_s(config)
 
-    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
 
     # Check the server did not restart
     try:
-        topology.standalone.restart(timeout=5)
-        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.restart(timeout=5)
+        ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+                                              ALL_CONFIG_ATTRS)
         if ent:
             # be sure to restore a valid config before assert
-            _config_file(topology, action='restore')
+            _config_file(topology_st, action='restore')
         assert not ent
     except subprocess.CalledProcessError:
-            pass
+        pass
 
     # Check the expected error message
     regex = re.compile("No valid subtree is defined")
-    res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+    res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
     if not res:
         # be sure to restore a valid config before assert
-        _config_file(topology, action='restore')
+        _config_file(topology_st, action='restore')
     assert res
 
     # Check we can restart the server
-    _config_file(topology, action='restore')
-    topology.standalone.start(timeout=5)
+    _config_file(topology_st, action='restore')
+    topology_st.standalone.start(timeout=5)
     try:
-        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
     except ldap.NO_SUCH_OBJECT:
         pass
 
 
-def test_ticket47823_invalid_config_3(topology):
+def test_ticket47823_invalid_config_3(topology_st):
     '''
     Check that an invalid config is detected. No uniqueness enforced
     Using old config: arg0 is missing
     '''
-    _header(topology, "Invalid config (old): arg0 is missing but new config attrname exists")
+    _header(topology_st, "Invalid config (old): arg0 is missing but new config attrname exists")
 
-    _config_file(topology, action='save')
+    _config_file(topology_st, action='save')
 
     # create an invalid config without arg0
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old',
+                           across_subtrees=False)
 
     del config.data['nsslapd-pluginarg0']
     config.data['uniqueness-attribute-name'] = 'cn'
     # replace 'cn' uniqueness entry
     try:
-        topology.standalone.delete_s(config.dn)
+        topology_st.standalone.delete_s(config.dn)
 
     except ldap.NO_SUCH_OBJECT:
         pass
-    topology.standalone.add_s(config)
+    topology_st.standalone.add_s(config)
 
-    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
 
     # Check the server did not restart
-    topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
+    topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
     try:
-        topology.standalone.restart(timeout=5)
-        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.restart(timeout=5)
+        ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+                                              ALL_CONFIG_ATTRS)
         if ent:
             # be sure to restore a valid config before assert
-            _config_file(topology, action='restore')
+            _config_file(topology_st, action='restore')
         assert not ent
     except subprocess.CalledProcessError:
-            pass
+        pass
 
     # Check the expected error message
     regex = re.compile("Unable to parse old style")
-    res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+    res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
     if not res:
         # be sure to restore a valid config before assert
-        _config_file(topology, action='restore')
+        _config_file(topology_st, action='restore')
     assert res
 
     # Check we can restart the server
-    _config_file(topology, action='restore')
-    topology.standalone.start(timeout=5)
+    _config_file(topology_st, action='restore')
+    topology_st.standalone.start(timeout=5)
     try:
-        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
     except ldap.NO_SUCH_OBJECT:
         pass
 
 
-def test_ticket47823_invalid_config_4(topology):
+def test_ticket47823_invalid_config_4(topology_st):
     '''
     Check that an invalid config is detected. No uniqueness enforced
     Using old config: arg1 is missing
     '''
-    _header(topology, "Invalid config (old): arg1 is missing but new config exist")
+    _header(topology_st, "Invalid config (old): arg1 is missing but new config exist")
 
-    _config_file(topology, action='save')
+    _config_file(topology_st, action='save')
 
     # create an invalid config without arg0
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old',
+                           across_subtrees=False)
 
     del config.data['nsslapd-pluginarg1']
     config.data['uniqueness-subtrees'] = ACTIVE_DN
     # replace 'cn' uniqueness entry
     try:
-        topology.standalone.delete_s(config.dn)
+        topology_st.standalone.delete_s(config.dn)
 
     except ldap.NO_SUCH_OBJECT:
         pass
-    topology.standalone.add_s(config)
+    topology_st.standalone.add_s(config)
 
-    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
 
     # Check the server did not restart
     try:
-        topology.standalone.restart(timeout=5)
-        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.restart(timeout=5)
+        ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+                                              ALL_CONFIG_ATTRS)
         if ent:
             # be sure to restore a valid config before assert
-            _config_file(topology, action='restore')
+            _config_file(topology_st, action='restore')
         assert not ent
     except subprocess.CalledProcessError:
-            pass
+        pass
 
     # Check the expected error message
     regex = re.compile("No valid subtree is defined")
-    res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+    res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
     if not res:
         # be sure to restore a valid config before assert
-        _config_file(topology, action='restore')
+        _config_file(topology_st, action='restore')
     assert res
 
     # Check we can restart the server
-    _config_file(topology, action='restore')
-    topology.standalone.start(timeout=5)
+    _config_file(topology_st, action='restore')
+    topology_st.standalone.start(timeout=5)
     try:
-        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
     except ldap.NO_SUCH_OBJECT:
         pass
 
 
-def test_ticket47823_invalid_config_5(topology):
+def test_ticket47823_invalid_config_5(topology_st):
     '''
     Check that an invalid config is detected. No uniqueness enforced
     Using new config: uniqueness-attribute-name is missing
     '''
-    _header(topology, "Invalid config (new): uniqueness-attribute-name is missing")
+    _header(topology_st, "Invalid config (new): uniqueness-attribute-name is missing")
 
-    _config_file(topology, action='save')
+    _config_file(topology_st, action='save')
 
     # create an invalid config without arg0
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new',
+                           across_subtrees=False)
 
     del config.data['uniqueness-attribute-name']
     # replace 'cn' uniqueness entry
     try:
-        topology.standalone.delete_s(config.dn)
+        topology_st.standalone.delete_s(config.dn)
 
     except ldap.NO_SUCH_OBJECT:
         pass
-    topology.standalone.add_s(config)
+    topology_st.standalone.add_s(config)
 
-    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
 
     # Check the server did not restart
     try:
-        topology.standalone.restart(timeout=5)
-        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.restart(timeout=5)
+        ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+                                              ALL_CONFIG_ATTRS)
         if ent:
             # be sure to restore a valid config before assert
-            _config_file(topology, action='restore')
+            _config_file(topology_st, action='restore')
         assert not ent
     except subprocess.CalledProcessError:
-            pass
+        pass
 
     # Check the expected error message
     regex = re.compile("Attribute name not defined")
-    res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+    res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
     if not res:
         # be sure to restore a valid config before assert
-        _config_file(topology, action='restore')
+        _config_file(topology_st, action='restore')
     assert res
 
     # Check we can restart the server
-    _config_file(topology, action='restore')
-    topology.standalone.start(timeout=5)
+    _config_file(topology_st, action='restore')
+    topology_st.standalone.start(timeout=5)
     try:
-        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
     except ldap.NO_SUCH_OBJECT:
         pass
 
 
-def test_ticket47823_invalid_config_6(topology):
+def test_ticket47823_invalid_config_6(topology_st):
     '''
     Check that an invalid config is detected. No uniqueness enforced
     Using new config: uniqueness-subtrees is missing
     '''
-    _header(topology, "Invalid config (new): uniqueness-subtrees is missing")
+    _header(topology_st, "Invalid config (new): uniqueness-subtrees is missing")
 
-    _config_file(topology, action='save')
+    _config_file(topology_st, action='save')
 
     # create an invalid config without arg0
-    config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new',
+                           across_subtrees=False)
 
     del config.data['uniqueness-subtrees']
     # replace 'cn' uniqueness entry
     try:
-        topology.standalone.delete_s(config.dn)
+        topology_st.standalone.delete_s(config.dn)
 
     except ldap.NO_SUCH_OBJECT:
         pass
-    topology.standalone.add_s(config)
+    topology_st.standalone.add_s(config)
 
-    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
 
     # Check the server did not restart
     try:
-        topology.standalone.restart(timeout=5)
-        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.restart(timeout=5)
+        ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+                                              ALL_CONFIG_ATTRS)
         if ent:
             # be sure to restore a valid config before assert
-            _config_file(topology, action='restore')
+            _config_file(topology_st, action='restore')
         assert not ent
     except subprocess.CalledProcessError:
-            pass
+        pass
 
     # Check the expected error message
     regex = re.compile("Objectclass for subtree entries is not defined")
-    res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+    res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
     if not res:
         # be sure to restore a valid config before assert
-        _config_file(topology, action='restore')
+        _config_file(topology_st, action='restore')
     assert res
 
     # Check we can restart the server
-    _config_file(topology, action='restore')
-    topology.standalone.start(timeout=5)
+    _config_file(topology_st, action='restore')
+    topology_st.standalone.start(timeout=5)
     try:
-        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
     except ldap.NO_SUCH_OBJECT:
         pass
 
 
-def test_ticket47823_invalid_config_7(topology):
+def test_ticket47823_invalid_config_7(topology_st):
     '''
     Check that an invalid config is detected. No uniqueness enforced
     Using new config: uniqueness-subtrees is missing
     '''
-    _header(topology, "Invalid config (new): uniqueness-subtrees are invalid")
+    _header(topology_st, "Invalid config (new): uniqueness-subtrees are invalid")
 
-    _config_file(topology, action='save')
+    _config_file(topology_st, action='save')
 
     # create an invalid config without arg0
-    config = _build_config(topology, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN", type_config='new', across_subtrees=False)
+    config = _build_config(topology_st, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN",
+                           type_config='new', across_subtrees=False)
 
-    topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
+    topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '65536')])
     # replace 'cn' uniqueness entry
     try:
-        topology.standalone.delete_s(config.dn)
+        topology_st.standalone.delete_s(config.dn)
 
     except ldap.NO_SUCH_OBJECT:
         pass
-    topology.standalone.add_s(config)
+    topology_st.standalone.add_s(config)
 
-    topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+    topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
 
     # Check the server did not restart
     try:
-        topology.standalone.restart(timeout=5)
-        ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.restart(timeout=5)
+        ent = topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)",
+                                              ALL_CONFIG_ATTRS)
         if ent:
             # be sure to restore a valid config before assert
-            _config_file(topology, action='restore')
+            _config_file(topology_st, action='restore')
         assert not ent
     except subprocess.CalledProcessError:
-            pass
+        pass
 
     # Check the expected error message
     regex = re.compile("No valid subtree is defined")
-    res = _pattern_errorlog(topology.standalone.errorlog_file, regex)
+    res = _pattern_errorlog(topology_st.standalone.errorlog_file, regex)
     if not res:
         # be sure to restore a valid config before assert
-        _config_file(topology, action='restore')
+        _config_file(topology_st, action='restore')
     assert res
 
     # Check we can restart the server
-    _config_file(topology, action='restore')
-    topology.standalone.start(timeout=5)
+    _config_file(topology_st, action='restore')
+    topology_st.standalone.start(timeout=5)
     try:
-        topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+        topology_st.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
     except ldap.NO_SUCH_OBJECT:
         pass
 

+ 330 - 331
dirsrvtests/tests/tickets/ticket47828_test.py

@@ -6,18 +6,13 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
-import socket
+
+import ldap
 import pytest
-import shutil
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -32,73 +27,33 @@ ACTIVE_USER1_CN = 'active user1'
 ACTIVE_USER1_DN = 'cn=%s,%s' % (ACTIVE_USER1_CN, SUFFIX)
 STAGED_USER1_CN = 'staged user1'
 STAGED_USER1_DN = 'cn=%s,%s' % (STAGED_USER1_CN, PROVISIONING)
-DUMMY_USER1_CN  = 'dummy user1'
-DUMMY_USER1_DN  = 'cn=%s,%s' % (DUMMY_USER1_CN, DUMMY_CONTAINER)
+DUMMY_USER1_CN = 'dummy user1'
+DUMMY_USER1_DN = 'cn=%s,%s' % (DUMMY_USER1_CN, DUMMY_CONTAINER)
 
 ALLOCATED_ATTR = 'employeeNumber'
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-        At the beginning, It may exists a standalone instance.
-        It may also exists a backup for the standalone instance.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
+def _header(topology_st, label):
+    topology_st.standalone.log.info("\n\n###############################################")
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("####### %s" % label)
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("###############################################")
 
-    return TopologyStandalone(standalone)
 
-
-def _header(topology, label):
-    topology.standalone.log.info("\n\n###############################################")
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("####### %s" % label)
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("###############################################")
-
-
-def test_ticket47828_init(topology):
+def test_ticket47828_init(topology_st):
     """
     Enable DNA
     """
-    topology.standalone.plugins.enable(name=PLUGIN_DNA)
+    topology_st.standalone.plugins.enable(name=PLUGIN_DNA)
 
-    topology.standalone.add_s(Entry((PROVISIONING,{'objectclass': "top nscontainer".split(),
-                                                  'cn': 'provisioning'})))
-    topology.standalone.add_s(Entry((DUMMY_CONTAINER,{'objectclass': "top nscontainer".split(),
-                                                  'cn': 'dummy container'})))
+    topology_st.standalone.add_s(Entry((PROVISIONING, {'objectclass': "top nscontainer".split(),
+                                                       'cn': 'provisioning'})))
+    topology_st.standalone.add_s(Entry((DUMMY_CONTAINER, {'objectclass': "top nscontainer".split(),
+                                                          'cn': 'dummy container'})))
 
     dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN)
-    topology.standalone.add_s(Entry((dn_config, {'objectclass': "top extensibleObject".split(),
+    topology_st.standalone.add_s(Entry((dn_config, {'objectclass': "top extensibleObject".split(),
                                                     'cn': 'excluded scope',
                                                     'dnaType': ALLOCATED_ATTR,
                                                     'dnaNextValue': str(1000),
@@ -106,538 +61,583 @@ def test_ticket47828_init(topology):
                                                     'dnaMagicRegen': str(-1),
                                                     'dnaFilter': '(&(objectClass=person)(objectClass=organizationalPerson)(objectClass=inetOrgPerson))',
                                                     'dnaScope': SUFFIX})))
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
 
-def test_ticket47828_run_0(topology):
+def test_ticket47828_run_0(topology_st):
     """
     NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set
     """
-    _header(topology, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set')
+    _header(topology_st, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is set')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': ACTIVE_USER1_CN,
-                                                'sn': ACTIVE_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': ACTIVE_USER1_CN,
+                                 'sn': ACTIVE_USER1_CN,
+                                 ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) != str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(ACTIVE_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(ACTIVE_USER1_DN)
 
 
-def test_ticket47828_run_1(topology):
+def test_ticket47828_run_1(topology_st):
     """
     NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st, 'NO exclude scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': ACTIVE_USER1_CN,
-                                                'sn': ACTIVE_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': ACTIVE_USER1_CN,
+                                 'sn': ACTIVE_USER1_CN,
+                                 ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(ACTIVE_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(ACTIVE_USER1_DN)
 
 
-def test_ticket47828_run_2(topology):
+def test_ticket47828_run_2(topology_st):
     """
     NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is  set
     """
-    _header(topology, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is  set')
+    _header(topology_st, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is  set')
 
-    topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': STAGED_USER1_CN,
-                                                'sn': STAGED_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': STAGED_USER1_CN,
+                                 'sn': STAGED_USER1_CN,
+                                 ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) != str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(STAGED_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(STAGED_USER1_DN)
 
 
-def test_ticket47828_run_3(topology):
+def test_ticket47828_run_3(topology_st):
     """
     NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st, 'NO exclude scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': STAGED_USER1_CN,
-                                                'sn': STAGED_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': STAGED_USER1_CN,
+                                 'sn': STAGED_USER1_CN,
+                                 ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(STAGED_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(STAGED_USER1_DN)
 
 
-def test_ticket47828_run_4(topology):
+def test_ticket47828_run_4(topology_st):
     '''
     Exclude the provisioning container
     '''
-    _header(topology, 'Exclude the provisioning container')
+    _header(topology_st, 'Exclude the provisioning container')
 
     dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN)
     mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', PROVISIONING)]
-    topology.standalone.modify_s(dn_config, mod)
+    topology_st.standalone.modify_s(dn_config, mod)
 
 
-def test_ticket47828_run_5(topology):
+def test_ticket47828_run_5(topology_st):
     """
     Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set
     """
-    _header(topology, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
+    _header(topology_st, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': ACTIVE_USER1_CN,
-                                                'sn': ACTIVE_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': ACTIVE_USER1_CN,
+                                 'sn': ACTIVE_USER1_CN,
+                                 ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) != str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(ACTIVE_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(ACTIVE_USER1_DN)
 
 
-def test_ticket47828_run_6(topology):
+def test_ticket47828_run_6(topology_st):
     """
     Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Provisioning excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': ACTIVE_USER1_CN,
-                                                'sn': ACTIVE_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': ACTIVE_USER1_CN,
+                                 'sn': ACTIVE_USER1_CN,
+                                 ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(ACTIVE_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(ACTIVE_USER1_DN)
 
 
-def test_ticket47828_run_7(topology):
+def test_ticket47828_run_7(topology_st):
     """
     Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is  not set
     """
-    _header(topology, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is  not set')
+    _header(topology_st, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is  not set')
 
-    topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': STAGED_USER1_CN,
-                                                'sn': STAGED_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': STAGED_USER1_CN,
+                                 'sn': STAGED_USER1_CN,
+                                 ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(STAGED_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(STAGED_USER1_DN)
 
 
-def test_ticket47828_run_8(topology):
+def test_ticket47828_run_8(topology_st):
     """
     Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Provisioning excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': STAGED_USER1_CN,
-                                                'sn': STAGED_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': STAGED_USER1_CN,
+                                 'sn': STAGED_USER1_CN,
+                                 ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(STAGED_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(STAGED_USER1_DN)
 
 
-def test_ticket47828_run_9(topology):
+def test_ticket47828_run_9(topology_st):
     """
     Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set
     """
-    _header(topology, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
+    _header(topology_st, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
 
-    topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': DUMMY_USER1_CN,
-                                                'sn': DUMMY_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                'cn': DUMMY_USER1_CN,
+                                'sn': DUMMY_USER1_CN,
+                                ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) != str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(DUMMY_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(DUMMY_USER1_DN)
 
 
-def test_ticket47828_run_10(topology):
+def test_ticket47828_run_10(topology_st):
     """
     Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Provisioning excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': DUMMY_USER1_CN,
-                                                'sn': DUMMY_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                'cn': DUMMY_USER1_CN,
+                                'sn': DUMMY_USER1_CN,
+                                ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(DUMMY_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(DUMMY_USER1_DN)
 
 
-def test_ticket47828_run_11(topology):
+def test_ticket47828_run_11(topology_st):
     '''
     Exclude (in addition) the dummy container
     '''
-    _header(topology, 'Exclude (in addition) the dummy container')
+    _header(topology_st, 'Exclude (in addition) the dummy container')
 
     dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN)
     mod = [(ldap.MOD_ADD, 'dnaExcludeScope', DUMMY_CONTAINER)]
-    topology.standalone.modify_s(dn_config, mod)
+    topology_st.standalone.modify_s(dn_config, mod)
 
 
-def test_ticket47828_run_12(topology):
+def test_ticket47828_run_12(topology_st):
     """
     Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set
     """
-    _header(topology, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
+    _header(topology_st, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': ACTIVE_USER1_CN,
-                                                'sn': ACTIVE_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': ACTIVE_USER1_CN,
+                                 'sn': ACTIVE_USER1_CN,
+                                 ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) != str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(ACTIVE_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(ACTIVE_USER1_DN)
 
 
-def test_ticket47828_run_13(topology):
+def test_ticket47828_run_13(topology_st):
     """
     Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Provisioning/Dummy excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': ACTIVE_USER1_CN,
-                                                'sn': ACTIVE_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': ACTIVE_USER1_CN,
+                                 'sn': ACTIVE_USER1_CN,
+                                 ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(ACTIVE_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(ACTIVE_USER1_DN)
 
 
-def test_ticket47828_run_14(topology):
+def test_ticket47828_run_14(topology_st):
     """
     Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set
     """
-    _header(topology, 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set')
+    _header(topology_st,
+            'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is not set')
 
-    topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': STAGED_USER1_CN,
-                                                'sn': STAGED_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': STAGED_USER1_CN,
+                                 'sn': STAGED_USER1_CN,
+                                 ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(STAGED_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(STAGED_USER1_DN)
 
 
-def test_ticket47828_run_15(topology):
+def test_ticket47828_run_15(topology_st):
     """
     Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Provisioning/Dummy excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': STAGED_USER1_CN,
-                                                'sn': STAGED_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': STAGED_USER1_CN,
+                                 'sn': STAGED_USER1_CN,
+                                 ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(STAGED_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(STAGED_USER1_DN)
 
 
-def test_ticket47828_run_16(topology):
+def test_ticket47828_run_16(topology_st):
     """
     Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is not set
     """
-    _header(topology, 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR not is set')
+    _header(topology_st,
+            'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR not is set')
 
-    topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': DUMMY_USER1_CN,
-                                                'sn': DUMMY_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                'cn': DUMMY_USER1_CN,
+                                'sn': DUMMY_USER1_CN,
+                                ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(DUMMY_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(DUMMY_USER1_DN)
 
 
-def test_ticket47828_run_17(topology):
+def test_ticket47828_run_17(topology_st):
     """
     Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Provisioning/Dummy excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': DUMMY_USER1_CN,
-                                                'sn': DUMMY_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                'cn': DUMMY_USER1_CN,
+                                'sn': DUMMY_USER1_CN,
+                                ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(DUMMY_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(DUMMY_USER1_DN)
 
 
-def test_ticket47828_run_18(topology):
+def test_ticket47828_run_18(topology_st):
     '''
     Exclude PROVISIONING and a wrong container
     '''
-    _header(topology, 'Exclude PROVISIONING and a wrong container')
+    _header(topology_st, 'Exclude PROVISIONING and a wrong container')
 
     dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN)
     mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', PROVISIONING)]
-    topology.standalone.modify_s(dn_config, mod)
+    topology_st.standalone.modify_s(dn_config, mod)
     try:
         mod = [(ldap.MOD_ADD, 'dnaExcludeScope', "invalidDN,%s" % SUFFIX)]
-        topology.standalone.modify_s(dn_config, mod)
+        topology_st.standalone.modify_s(dn_config, mod)
         raise ValueError("invalid dnaExcludeScope value (not a DN)")
     except ldap.INVALID_SYNTAX:
         pass
 
 
-def test_ticket47828_run_19(topology):
+def test_ticket47828_run_19(topology_st):
     """
     Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set
     """
-    _header(topology, 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
+    _header(topology_st,
+            'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': ACTIVE_USER1_CN,
-                                                'sn': ACTIVE_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': ACTIVE_USER1_CN,
+                                 'sn': ACTIVE_USER1_CN,
+                                 ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) != str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(ACTIVE_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(ACTIVE_USER1_DN)
 
 
-def test_ticket47828_run_20(topology):
+def test_ticket47828_run_20(topology_st):
     """
     Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Provisioning+wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': ACTIVE_USER1_CN,
-                                                'sn': ACTIVE_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': ACTIVE_USER1_CN,
+                                 'sn': ACTIVE_USER1_CN,
+                                 ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(ACTIVE_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(ACTIVE_USER1_DN)
 
 
-def test_ticket47828_run_21(topology):
+def test_ticket47828_run_21(topology_st):
     """
     Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is  not set
     """
-    _header(topology, 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is  not set')
+    _header(topology_st,
+            'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is  not set')
 
-    topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': STAGED_USER1_CN,
-                                                'sn': STAGED_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': STAGED_USER1_CN,
+                                 'sn': STAGED_USER1_CN,
+                                 ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(STAGED_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(STAGED_USER1_DN)
 
 
-def test_ticket47828_run_22(topology):
+def test_ticket47828_run_22(topology_st):
     """
     Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Provisioning+wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': STAGED_USER1_CN,
-                                                'sn': STAGED_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': STAGED_USER1_CN,
+                                 'sn': STAGED_USER1_CN,
+                                 ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(STAGED_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(STAGED_USER1_DN)
 
 
-def test_ticket47828_run_23(topology):
+def test_ticket47828_run_23(topology_st):
     """
     Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set
     """
-    _header(topology, 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
+    _header(topology_st,
+            'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
 
-    topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': DUMMY_USER1_CN,
-                                                'sn': DUMMY_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                'cn': DUMMY_USER1_CN,
+                                'sn': DUMMY_USER1_CN,
+                                ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) != str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(DUMMY_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(DUMMY_USER1_DN)
 
 
-def test_ticket47828_run_24(topology):
+def test_ticket47828_run_24(topology_st):
     """
     Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Provisioning+wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': DUMMY_USER1_CN,
-                                                'sn': DUMMY_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                'cn': DUMMY_USER1_CN,
+                                'sn': DUMMY_USER1_CN,
+                                ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(DUMMY_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(DUMMY_USER1_DN)
 
 
-def test_ticket47828_run_25(topology):
+def test_ticket47828_run_25(topology_st):
     '''
     Exclude  a wrong container
     '''
-    _header(topology, 'Exclude a wrong container')
+    _header(topology_st, 'Exclude a wrong container')
 
     dn_config = "cn=excluded scope, cn=%s, %s" % (PLUGIN_DNA, DN_PLUGIN)
 
     try:
         mod = [(ldap.MOD_REPLACE, 'dnaExcludeScope', "invalidDN,%s" % SUFFIX)]
-        topology.standalone.modify_s(dn_config, mod)
+        topology_st.standalone.modify_s(dn_config, mod)
         raise ValueError("invalid dnaExcludeScope value (not a DN)")
     except ldap.INVALID_SYNTAX:
         pass
 
 
-def test_ticket47828_run_26(topology):
+def test_ticket47828_run_26(topology_st):
     """
     Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set
     """
-    _header(topology, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
+    _header(topology_st, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is set')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': ACTIVE_USER1_CN,
-                                                'sn': ACTIVE_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': ACTIVE_USER1_CN,
+                                 'sn': ACTIVE_USER1_CN,
+                                 ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) != str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(ACTIVE_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(ACTIVE_USER1_DN)
 
 
-def test_ticket47828_run_27(topology):
+def test_ticket47828_run_27(topology_st):
     """
     Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Wrong container excluded scope: Add an active entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': ACTIVE_USER1_CN,
-                                                'sn': ACTIVE_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((ACTIVE_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': ACTIVE_USER1_CN,
+                                 'sn': ACTIVE_USER1_CN,
+                                 ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(ACTIVE_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(ACTIVE_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (ACTIVE_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(ACTIVE_USER1_DN)
 
 
-def test_ticket47828_run_28(topology):
+def test_ticket47828_run_28(topology_st):
     """
     Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is  not set
     """
-    _header(topology, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is  not set')
+    _header(topology_st, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is  not set')
 
-    topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': STAGED_USER1_CN,
-                                                'sn': STAGED_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': STAGED_USER1_CN,
+                                 'sn': STAGED_USER1_CN,
+                                 ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(STAGED_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(STAGED_USER1_DN)
 
 
-def test_ticket47828_run_29(topology):
+def test_ticket47828_run_29(topology_st):
     """
     Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Wrong container excluded scope: Add a staged entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': STAGED_USER1_CN,
-                                                'sn': STAGED_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((STAGED_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                 'cn': STAGED_USER1_CN,
+                                 'sn': STAGED_USER1_CN,
+                                 ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(STAGED_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(STAGED_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (STAGED_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(STAGED_USER1_DN)
 
 
-def test_ticket47828_run_30(topology):
+def test_ticket47828_run_30(topology_st):
     """
     Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set
     """
-    _header(topology, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
+    _header(topology_st, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is set')
 
-    topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': DUMMY_USER1_CN,
-                                                'sn': DUMMY_USER1_CN,
-                                                ALLOCATED_ATTR: str(-1)})))
-    ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                'cn': DUMMY_USER1_CN,
+                                'sn': DUMMY_USER1_CN,
+                                ALLOCATED_ATTR: str(-1)})))
+    ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) != str(-1)
-    topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(DUMMY_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(DUMMY_USER1_DN)
 
 
-def test_ticket47828_run_31(topology):
+def test_ticket47828_run_31(topology_st):
     """
     Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)
     """
-    _header(topology, 'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
+    _header(topology_st,
+            'Wrong container excluded scope: Add an dummy entry and check its ALLOCATED_ATTR is unchanged (!= magic)')
 
-    topology.standalone.add_s(Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                                'cn': DUMMY_USER1_CN,
-                                                'sn': DUMMY_USER1_CN,
-                                                ALLOCATED_ATTR: str(20)})))
-    ent = topology.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+    topology_st.standalone.add_s(
+        Entry((DUMMY_USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                                'cn': DUMMY_USER1_CN,
+                                'sn': DUMMY_USER1_CN,
+                                ALLOCATED_ATTR: str(20)})))
+    ent = topology_st.standalone.getEntry(DUMMY_USER1_DN, ldap.SCOPE_BASE, "(objectclass=*)")
     assert ent.hasAttr(ALLOCATED_ATTR)
     assert ent.getValue(ALLOCATED_ATTR) == str(20)
-    topology.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
-    topology.standalone.delete_s(DUMMY_USER1_DN)
+    topology_st.standalone.log.debug('%s.%s=%s' % (DUMMY_USER1_CN, ALLOCATED_ATTR, ent.getValue(ALLOCATED_ATTR)))
+    topology_st.standalone.delete_s(DUMMY_USER1_DN)
 
 
 if __name__ == '__main__':
@@ -645,4 +645,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 334 - 317
dirsrvtests/tests/tickets/ticket47829_test.py

@@ -6,32 +6,29 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
-
+from lib389.topologies import topology_st
 
-SCOPE_IN_CN  = 'in'
+SCOPE_IN_CN = 'in'
 SCOPE_OUT_CN = 'out'
-SCOPE_IN_DN  = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
+SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
 SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX)
 
 PROVISIONING_CN = "provisioning"
 PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN)
 
 ACTIVE_CN = "accounts"
-STAGE_CN  = "staged users"
+STAGE_CN = "staged users"
 DELETE_CN = "deleted users"
 ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN)
-STAGE_DN  = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
-DELETE_DN  = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
+STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
+DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
 
 STAGE_USER_CN = "stage guy"
 STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN)
@@ -57,126 +54,85 @@ INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN)
 log = logging.getLogger(__name__)
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
+def _header(topology_st, label):
+    topology_st.standalone.log.info("\n\n###############################################")
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("####### %s" % label)
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("###############################################")
 
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
 
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
-    topology.standalone.log.info("\n\n###############################################")
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("####### %s" % label)
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("###############################################")
-
-
-def _add_user(topology, type='active'):
+def _add_user(topology_st, type='active'):
     if type == 'active':
-        topology.standalone.add_s(Entry((ACTIVE_USER_DN, {
-                                                'objectclass': "top person inetuser".split(),
-                                                'sn': ACTIVE_USER_CN,
-                                                'cn': ACTIVE_USER_CN})))
+        topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, {
+            'objectclass': "top person inetuser".split(),
+            'sn': ACTIVE_USER_CN,
+            'cn': ACTIVE_USER_CN})))
     elif type == 'stage':
-        topology.standalone.add_s(Entry((STAGE_USER_DN, {
-                                                'objectclass': "top person inetuser".split(),
-                                                'sn': STAGE_USER_CN,
-                                                'cn': STAGE_USER_CN})))
+        topology_st.standalone.add_s(Entry((STAGE_USER_DN, {
+            'objectclass': "top person inetuser".split(),
+            'sn': STAGE_USER_CN,
+            'cn': STAGE_USER_CN})))
     else:
-        topology.standalone.add_s(Entry((OUT_USER_DN, {
-                                        'objectclass': "top person inetuser".split(),
-                                        'sn': OUT_USER_CN,
-                                        'cn': OUT_USER_CN})))
+        topology_st.standalone.add_s(Entry((OUT_USER_DN, {
+            'objectclass': "top person inetuser".split(),
+            'sn': OUT_USER_CN,
+            'cn': OUT_USER_CN})))
 
 
-def _find_memberof(topology, user_dn=None, group_dn=None, find_result=True):
-    assert(topology)
-    assert(user_dn)
-    assert(group_dn)
-    ent = topology.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof'])
+def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True):
+    assert (topology_st)
+    assert (user_dn)
+    assert (group_dn)
+    ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof'])
     found = False
     if ent.hasAttr('memberof'):
 
         for val in ent.getValues('memberof'):
-            topology.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val))
+            topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val))
             if val == group_dn:
                 found = True
                 break
 
     if find_result:
-        assert(found)
+        assert (found)
     else:
-        assert(not found)
+        assert (not found)
 
 
-def _find_member(topology, user_dn=None, group_dn=None, find_result=True):
-    assert(topology)
-    assert(user_dn)
-    assert(group_dn)
-    ent = topology.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member'])
+def _find_member(topology_st, user_dn=None, group_dn=None, find_result=True):
+    assert (topology_st)
+    assert (user_dn)
+    assert (group_dn)
+    ent = topology_st.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member'])
     found = False
     if ent.hasAttr('member'):
 
         for val in ent.getValues('member'):
-            topology.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val))
+            topology_st.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val))
             if val == user_dn:
                 found = True
                 break
 
     if find_result:
-        assert(found)
+        assert (found)
     else:
-        assert(not found)
+        assert (not found)
 
 
-def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
-    assert topology is not None
+def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
+    assert topology_st is not None
     assert entry_dn is not None
     assert new_rdn is not None
 
-    topology.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn)
+    topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn)
     try:
         if new_superior:
-            topology.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
+            topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
         else:
-            topology.standalone.rename_s(entry_dn, new_rdn, delold=del_old)
+            topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old)
     except ldap.NO_SUCH_ATTRIBUTE:
-        topology.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds")
+        topology_st.standalone.log.info("accepted failure due to 47833: modrdn reports error.. but succeeds")
         attempt = 0
         if new_superior:
             dn = "%s,%s" % (new_rdn, new_superior)
@@ -188,289 +144,327 @@ def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_sup
 
         while attempt < 10:
             try:
-                ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
+                ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
                 break
             except ldap.NO_SUCH_OBJECT:
-                topology.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry")
+                topology_st.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry")
                 attempt += 1
                 time.sleep(1)
         if attempt == 10:
-            ent = topology.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter)
-            ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
+            ent = topology_st.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter)
+            ent = topology_st.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter)
 
 
-def _check_memberof(topology=None, action=None, user_dn=None, group_dn=None, find_result=None):
-    assert(topology)
-    assert(user_dn)
-    assert(group_dn)
+def _check_memberof(topology_st=None, action=None, user_dn=None, group_dn=None, find_result=None):
+    assert (topology_st)
+    assert (user_dn)
+    assert (group_dn)
     if action == ldap.MOD_ADD:
         txt = 'add'
     elif action == ldap.MOD_DELETE:
         txt = 'delete'
     else:
         txt = 'replace'
-    topology.standalone.log.info('\n%s entry %s' % (txt, user_dn))
-    topology.standalone.log.info('to group %s' % group_dn)
+    topology_st.standalone.log.info('\n%s entry %s' % (txt, user_dn))
+    topology_st.standalone.log.info('to group %s' % group_dn)
 
-    topology.standalone.modify_s(group_dn, [(action, 'member', user_dn)])
+    topology_st.standalone.modify_s(group_dn, [(action, 'member', user_dn)])
     time.sleep(1)
-    _find_memberof(topology, user_dn=user_dn, group_dn=group_dn, find_result=find_result)
-
-
-def test_ticket47829_init(topology):
-    topology.standalone.add_s(Entry((SCOPE_IN_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': SCOPE_IN_DN})))
-    topology.standalone.add_s(Entry((SCOPE_OUT_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': SCOPE_OUT_DN})))
-    topology.standalone.add_s(Entry((PROVISIONING_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': PROVISIONING_CN})))
-    topology.standalone.add_s(Entry((ACTIVE_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': ACTIVE_CN})))
-    topology.standalone.add_s(Entry((STAGE_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': STAGE_DN})))
-    topology.standalone.add_s(Entry((DELETE_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': DELETE_CN})))
+    _find_memberof(topology_st, user_dn=user_dn, group_dn=group_dn, find_result=find_result)
+
+
+def test_ticket47829_init(topology_st):
+    topology_st.standalone.add_s(Entry((SCOPE_IN_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': SCOPE_IN_DN})))
+    topology_st.standalone.add_s(Entry((SCOPE_OUT_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': SCOPE_OUT_DN})))
+    topology_st.standalone.add_s(Entry((PROVISIONING_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': PROVISIONING_CN})))
+    topology_st.standalone.add_s(Entry((ACTIVE_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': ACTIVE_CN})))
+    topology_st.standalone.add_s(Entry((STAGE_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': STAGE_DN})))
+    topology_st.standalone.add_s(Entry((DELETE_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': DELETE_CN})))
 
     # add groups
-    topology.standalone.add_s(Entry((ACTIVE_GROUP_DN, {
-                                                'objectclass': "top groupOfNames inetuser".split(),
-                                                'cn': ACTIVE_GROUP_CN})))
-    topology.standalone.add_s(Entry((STAGE_GROUP_DN, {
-                                                'objectclass': "top groupOfNames inetuser".split(),
-                                                'cn': STAGE_GROUP_CN})))
-    topology.standalone.add_s(Entry((OUT_GROUP_DN, {
-                                                'objectclass': "top groupOfNames inetuser".split(),
-                                                'cn': OUT_GROUP_CN})))
-    topology.standalone.add_s(Entry((INDIRECT_ACTIVE_GROUP_DN, {
-                                                'objectclass': "top groupOfNames".split(),
-                                                'cn': INDIRECT_ACTIVE_GROUP_CN})))
+    topology_st.standalone.add_s(Entry((ACTIVE_GROUP_DN, {
+        'objectclass': "top groupOfNames inetuser".split(),
+        'cn': ACTIVE_GROUP_CN})))
+    topology_st.standalone.add_s(Entry((STAGE_GROUP_DN, {
+        'objectclass': "top groupOfNames inetuser".split(),
+        'cn': STAGE_GROUP_CN})))
+    topology_st.standalone.add_s(Entry((OUT_GROUP_DN, {
+        'objectclass': "top groupOfNames inetuser".split(),
+        'cn': OUT_GROUP_CN})))
+    topology_st.standalone.add_s(Entry((INDIRECT_ACTIVE_GROUP_DN, {
+        'objectclass': "top groupOfNames".split(),
+        'cn': INDIRECT_ACTIVE_GROUP_CN})))
 
     # add users
-    _add_user(topology, 'active')
-    _add_user(topology, 'stage')
-    _add_user(topology, 'out')
+    _add_user(topology_st, 'active')
+    _add_user(topology_st, 'stage')
+    _add_user(topology_st, 'out')
 
     # enable memberof of with scope IN except provisioning
-    topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+    topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
     dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN)
-    topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', SCOPE_IN_DN)])
-    topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScopeExcludeSubtree', PROVISIONING_DN)])
+    topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', SCOPE_IN_DN)])
+    topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScopeExcludeSubtree', PROVISIONING_DN)])
 
     # enable RI with scope IN except provisioning
-    topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+    topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
     dn = "cn=%s,%s" % (PLUGIN_REFER_INTEGRITY, DN_PLUGIN)
-    topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginentryscope', SCOPE_IN_DN)])
-    topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-plugincontainerscope', SCOPE_IN_DN)])
-    topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginExcludeEntryScope', PROVISIONING_DN)])
+    topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginentryscope', SCOPE_IN_DN)])
+    topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-plugincontainerscope', SCOPE_IN_DN)])
+    topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginExcludeEntryScope', PROVISIONING_DN)])
 
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
 
-def test_ticket47829_mod_active_user_1(topology):
-    _header(topology, 'MOD: add an active user to an active group')
+def test_ticket47829_mod_active_user_1(topology_st):
+    _header(topology_st, 'MOD: add an active user to an active group')
 
     # add active user to active group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=True)
+    _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # remove active user to active group
-    _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=False)
 
 
-def test_ticket47829_mod_active_user_2(topology):
-    _header(topology, 'MOD: add an Active user to a Stage group')
+def test_ticket47829_mod_active_user_2(topology_st):
+    _header(topology_st, 'MOD: add an Active user to a Stage group')
 
     # add active user to stage group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN,
+                    find_result=False)
+    _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
 
     # remove active user to stage group
-    _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN,
+                    find_result=False)
 
 
-def test_ticket47829_mod_active_user_3(topology):
-    _header(topology, 'MOD: add an Active user to a out of scope group')
+def test_ticket47829_mod_active_user_3(topology_st):
+    _header(topology_st, 'MOD: add an Active user to a out of scope group')
 
     # add active user to out of scope group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
 
     # remove active user to out of scope  group
-    _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN,
+                    find_result=False)
 
 
-def test_ticket47829_mod_stage_user_1(topology):
-    _header(topology, 'MOD: add an Stage user to a Active group')
+def test_ticket47829_mod_stage_user_1(topology_st):
+    _header(topology_st, 'MOD: add an Stage user to a Active group')
 
     # add stage user to active group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=False)
+    _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # remove stage user to active group
-    _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=False)
 
 
-def test_ticket47829_mod_stage_user_2(topology):
-    _header(topology, 'MOD: add an Stage user to a Stage group')
+def test_ticket47829_mod_stage_user_2(topology_st):
+    _header(topology_st, 'MOD: add an Stage user to a Stage group')
 
     # add stage user to stage group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
 
     # remove stage user to stage group
-    _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN,
+                    find_result=False)
 
 
-def test_ticket47829_mod_stage_user_3(topology):
-    _header(topology, 'MOD: add an Stage user to a out of scope group')
+def test_ticket47829_mod_stage_user_3(topology_st):
+    _header(topology_st, 'MOD: add an Stage user to a out of scope group')
 
     # add stage user to an out of scope group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
 
     # remove stage user to out of scope group
-    _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN,
+                    find_result=False)
 
 
-def test_ticket47829_mod_out_user_1(topology):
-    _header(topology, 'MOD: add an out of scope user to an active group')
+def test_ticket47829_mod_out_user_1(topology_st):
+    _header(topology_st, 'MOD: add an out of scope user to an active group')
 
     # add out of scope user to active group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # remove out of scope user to active group
-    _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=False)
 
 
-def test_ticket47829_mod_out_user_2(topology):
-    _header(topology, 'MOD: add an out of scope user to a Stage group')
+def test_ticket47829_mod_out_user_2(topology_st):
+    _header(topology_st, 'MOD: add an out of scope user to a Stage group')
 
     # add out of scope user to stage group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True)
 
     # remove out of scope user to stage group
-    _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN,
+                    find_result=False)
+
 
-def test_ticket47829_mod_out_user_3(topology):
-    _header(topology, 'MOD: add an out of scope user to an out of scope group')
+def test_ticket47829_mod_out_user_3(topology_st):
+    _header(topology_st, 'MOD: add an out of scope user to an out of scope group')
 
     # add out of scope user to stage group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=True)
 
     # remove out of scope user to stage group
-    _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False)
 
 
-def test_ticket47829_mod_active_user_modrdn_active_user_1(topology):
-    _header(topology, 'add an Active user to a Active group. Then move Active user to Active')
+def test_ticket47829_mod_active_user_modrdn_active_user_1(topology_st):
+    _header(topology_st, 'add an Active user to a Active group. Then move Active user to Active')
 
     # add Active user to active group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=True)
+    _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # move the Active entry to active, expect 'member' and 'memberof'
-    _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=x%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
-    _find_memberof(topology, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_member(topology, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=x%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
+    _find_memberof(topology_st, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                   find_result=True)
+    _find_member(topology_st, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=True)
 
     # move the Active entry to active, expect  'member' and no 'memberof'
-    _modrdn_entry(topology, entry_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
-    _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _modrdn_entry(topology_st, entry_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN,
+                  new_superior=ACTIVE_DN)
+    _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                   find_result=True)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=True)
 
     # remove active user to active group
-    _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=False)
 
 
-def test_ticket47829_mod_active_user_modrdn_stage_user_1(topology):
-    _header(topology, 'add an Active user to a Active group. Then move Active user to Stage')
+def test_ticket47829_mod_active_user_modrdn_stage_user_1(topology_st):
+    _header(topology_st, 'add an Active user to a Active group. Then move Active user to Stage')
 
     # add Active user to active group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=True)
+    _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # move the Active entry to stage, expect no 'member' and 'memberof'
-    _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN)
-    _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN)
+    _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN,
+                   find_result=False)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=False)
 
     # move the Active entry to Stage, expect  'member' and no 'memberof'
-    _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
-    _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN,
+                  new_superior=ACTIVE_DN)
+    _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                   find_result=False)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=False)
 
 
-def test_ticket47829_mod_active_user_modrdn_out_user_1(topology):
-    _header(topology, 'add an Active user to a Active group. Then move Active user to out of scope')
+def test_ticket47829_mod_active_user_modrdn_out_user_1(topology_st):
+    _header(topology_st, 'add an Active user to a Active group. Then move Active user to out of scope')
 
     # add Active user to active group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=True)
+    _find_member(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # move the Active entry to out of scope, expect no 'member' and no 'memberof'
-    _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=OUT_GROUP_DN)
-    _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=OUT_GROUP_DN)
+    _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN,
+                   find_result=False)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=False)
 
     # move the Active entry to out of scope, expect  no 'member' and no 'memberof'
-    _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
-    _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), new_rdn="cn=%s" % ACTIVE_USER_CN,
+                  new_superior=ACTIVE_DN)
+    _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                   find_result=False)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=False)
 
 
-def test_ticket47829_mod_modrdn_1(topology):
-    _header(topology, 'add an Stage user to a Active group. Then move Stage user to Active')
+def test_ticket47829_mod_modrdn_1(topology_st):
+    _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Active')
 
     # add Stage user to active group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=False)
+    _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # move the Stage entry to active, expect 'member' and 'memberof'
-    _modrdn_entry(topology, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN)
-    _find_memberof(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_member(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _modrdn_entry(topology_st, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN)
+    _find_memberof(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                   find_result=True)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=True)
 
     # move the Active entry to Stage, expect no 'member' and no 'memberof'
-    _modrdn_entry(topology, entry_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN)
-    _find_memberof(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % STAGE_USER_CN,
+                  new_superior=STAGE_DN)
+    _find_memberof(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN,
+                   find_result=False)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=False)
 
 
-def test_ticket47829_mod_stage_user_modrdn_active_user_1(topology):
-    _header(topology, 'add an Stage user to a Active group. Then move Stage user to Active')
+def test_ticket47829_mod_stage_user_modrdn_active_user_1(topology_st):
+    _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Active')
 
-    stage_user_dn  = STAGE_USER_DN
+    stage_user_dn = STAGE_USER_DN
     stage_user_rdn = "cn=%s" % STAGE_USER_CN
     active_user_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN)
 
     # add Stage user to active group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN,
+                    find_result=False)
+    _find_member(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # move the Stage entry to Actve, expect  'member' and 'memberof'
-    _modrdn_entry(topology, entry_dn=stage_user_dn, new_rdn=stage_user_rdn, new_superior=ACTIVE_DN)
-    _find_memberof(topology, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_member(topology, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _modrdn_entry(topology_st, entry_dn=stage_user_dn, new_rdn=stage_user_rdn, new_superior=ACTIVE_DN)
+    _find_memberof(topology_st, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _find_member(topology_st, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # move the Active entry to Stage, expect  no 'member' and no 'memberof'
-    _modrdn_entry(topology, entry_dn=active_user_dn, new_rdn=stage_user_rdn, new_superior=STAGE_DN)
-    _find_memberof(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn=active_user_dn, new_rdn=stage_user_rdn, new_superior=STAGE_DN)
+    _find_memberof(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
 
 
-def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology):
-    _header(topology, 'add an Stage user to a Active group. Then move Stage user to Stage')
+def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology_st):
+    _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Stage')
 
-    _header(topology, 'Return because it requires a fix for 47833')
+    _header(topology_st, 'Return because it requires a fix for 47833')
     return
 
     old_stage_user_dn = STAGE_USER_DN
@@ -479,126 +473,149 @@ def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology):
     new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN)
 
     # add Stage user to active group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN,
+                    find_result=False)
+    _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # move the Stage entry to Stage, expect  no 'member' and 'memberof'
-    _modrdn_entry(topology, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN)
-    _find_memberof(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN)
+    _find_memberof(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
 
     # move the Stage entry to Stage, expect  no 'member' and no 'memberof'
-    _modrdn_entry(topology, entry_dn=new_stage_user_dn, new_rdn=old_stage_user_rdn, new_superior=STAGE_DN)
-    _find_memberof(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn=new_stage_user_dn, new_rdn=old_stage_user_rdn, new_superior=STAGE_DN)
+    _find_memberof(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
 
 
-def test_ticket47829_indirect_active_group_1(topology):
-    _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1')
+def test_ticket47829_indirect_active_group_1(topology_st):
+    _header(topology_st, 'add an Active group (G1) to an active group (G0). Then add active user to G1')
 
-    topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
+    topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
 
     # add an active user to G1. Checks that user is memberof G1
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=True)
+    _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
 
     # remove G1 from G0
-    topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
-    _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
-    _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
+    _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # remove active user from G1
-    _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=False)
 
 
-def test_ticket47829_indirect_active_group_2(topology):
-    _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to stage')
+def test_ticket47829_indirect_active_group_2(topology_st):
+    _header(topology_st,
+            'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to stage')
 
-    topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
+    topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
 
     # add an active user to G1. Checks that user is memberof G1
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=True)
+    _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
 
     # remove G1 from G0
-    topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
-    _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
-    _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
+    _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # move active user to stage
-    _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN)
+    _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN)
 
     # stage user is no long member of active group and indirect active group
-    _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN,
+                   find_result=False)
+    _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+                   find_result=False)
 
     # active group and indirect active group do no longer have stage user as member
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=False)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+                 find_result=False)
 
     # return back the entry to active. It remains not member
-    _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN,
+                  new_superior=ACTIVE_DN)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=False)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+                 find_result=False)
 
 
-def test_ticket47829_indirect_active_group_3(topology):
-    _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to out of the scope')
+def test_ticket47829_indirect_active_group_3(topology_st):
+    _header(topology_st,
+            'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to out of the scope')
 
-    topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
+    topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
 
     # add an active user to G1. Checks that user is memberof G1
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=True)
+    _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
 
     # remove G1 from G0
-    topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
-    _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
-    _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)])
+    _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_memberof(topology_st, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # move active user to out of the scope
-    _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=SCOPE_OUT_DN)
+    _modrdn_entry(topology_st, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=SCOPE_OUT_DN)
 
     # stage user is no long member of active group and indirect active group
-    _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN,
+                   find_result=False)
+    _find_memberof(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+                   find_result=False)
 
     # active group and indirect active group do no longer have stage user as member
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=False)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+                 find_result=False)
 
     # return back the entry to active. It remains not member
-    _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN)
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), new_rdn="cn=%s" % ACTIVE_USER_CN,
+                  new_superior=ACTIVE_DN)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN,
+                 find_result=False)
+    _find_member(topology_st, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN,
+                 find_result=False)
 
 
-def test_ticket47829_indirect_active_group_4(topology):
-    _header(topology, 'add an Active group (G1) to an active group (G0). Then add stage user to G1. Then move user to active. Then move it back')
+def test_ticket47829_indirect_active_group_4(topology_st):
+    _header(topology_st,
+            'add an Active group (G1) to an active group (G0). Then add stage user to G1. Then move user to active. Then move it back')
 
-    topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
+    topology_st.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)])
 
     # add stage user to active group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_member(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
-    _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
-    _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN,
+                    find_result=False)
+    _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
 
     # move stage user to active
-    _modrdn_entry(topology, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN)
+    _modrdn_entry(topology_st, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN)
     renamed_stage_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN)
-    _find_member(topology, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
-    _find_member(topology, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
-    _find_memberof(topology, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
-    _find_memberof(topology, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _find_member(topology_st, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _find_member(topology_st, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_memberof(topology_st, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True)
+    _find_memberof(topology_st, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # move back active to stage
-    _modrdn_entry(topology, entry_dn=renamed_stage_dn, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN)
-    _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
-    _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
-    _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn=renamed_stage_dn, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN)
+    _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False)
+    _find_memberof(topology_st, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False)
 
 
 if __name__ == '__main__':

+ 104 - 153
dirsrvtests/tests/tickets/ticket47833_test.py

@@ -6,33 +6,25 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
-SCOPE_IN_CN  = 'in'
+SCOPE_IN_CN = 'in'
 SCOPE_OUT_CN = 'out'
-SCOPE_IN_DN  = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
+SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
 SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX)
 
 PROVISIONING_CN = "provisioning"
 PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN)
 
 ACTIVE_CN = "accounts"
-STAGE_CN  = "staged users"
+STAGE_CN = "staged users"
 DELETE_CN = "deleted users"
 ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN)
-STAGE_DN  = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
-DELETE_DN  = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
+STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
+DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
 
 STAGE_USER_CN = "stage guy"
 STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN)
@@ -52,208 +44,167 @@ ACTIVE_GROUP_DN = "cn=%s,%s" % (ACTIVE_GROUP_CN, ACTIVE_DN)
 OUT_GROUP_CN = "out group"
 OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN)
 
-
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
 
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
+def _header(topology_st, label):
+    topology_st.standalone.log.info("\n\n###############################################")
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("####### %s" % label)
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("###############################################")
 
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
 
-    return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
-    topology.standalone.log.info("\n\n###############################################")
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("####### %s" % label)
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("###############################################")
-
-def _add_user(topology, type='active'):
+def _add_user(topology_st, type='active'):
     if type == 'active':
-        topology.standalone.add_s(Entry((ACTIVE_USER_DN, {
-                                                'objectclass': "top person inetuser".split(),
-                                                'sn': ACTIVE_USER_CN,
-                                                'cn': ACTIVE_USER_CN})))
+        topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, {
+            'objectclass': "top person inetuser".split(),
+            'sn': ACTIVE_USER_CN,
+            'cn': ACTIVE_USER_CN})))
     elif type == 'stage':
-        topology.standalone.add_s(Entry((STAGE_USER_DN, {
-                                                'objectclass': "top person inetuser".split(),
-                                                'sn': STAGE_USER_CN,
-                                                'cn': STAGE_USER_CN})))
+        topology_st.standalone.add_s(Entry((STAGE_USER_DN, {
+            'objectclass': "top person inetuser".split(),
+            'sn': STAGE_USER_CN,
+            'cn': STAGE_USER_CN})))
     else:
-        topology.standalone.add_s(Entry((OUT_USER_DN, {
-                                        'objectclass': "top person inetuser".split(),
-                                        'sn': OUT_USER_CN,
-                                        'cn': OUT_USER_CN})))
-
-def _find_memberof(topology, user_dn=None, group_dn=None, find_result=True):
-    assert(topology)
-    assert(user_dn)
-    assert(group_dn)
-    ent = topology.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof'])
+        topology_st.standalone.add_s(Entry((OUT_USER_DN, {
+            'objectclass': "top person inetuser".split(),
+            'sn': OUT_USER_CN,
+            'cn': OUT_USER_CN})))
+
+
+def _find_memberof(topology_st, user_dn=None, group_dn=None, find_result=True):
+    assert (topology_st)
+    assert (user_dn)
+    assert (group_dn)
+    ent = topology_st.standalone.getEntry(user_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['memberof'])
     found = False
     if ent.hasAttr('memberof'):
 
         for val in ent.getValues('memberof'):
-            topology.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val))
+            topology_st.standalone.log.info("!!!!!!! %s: memberof->%s" % (user_dn, val))
             if val == group_dn:
                 found = True
                 break
 
     if find_result:
-        assert(found)
+        assert (found)
     else:
-        assert(not found)
+        assert (not found)
 
-def _find_member(topology, user_dn=None, group_dn=None, find_result=True):
-    assert(topology)
-    assert(user_dn)
-    assert(group_dn)
-    ent = topology.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member'])
+
+def _find_member(topology_st, user_dn=None, group_dn=None, find_result=True):
+    assert (topology_st)
+    assert (user_dn)
+    assert (group_dn)
+    ent = topology_st.standalone.getEntry(group_dn, ldap.SCOPE_BASE, "(objectclass=*)", ['member'])
     found = False
     if ent.hasAttr('member'):
 
         for val in ent.getValues('member'):
-            topology.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val))
+            topology_st.standalone.log.info("!!!!!!! %s: member ->%s" % (group_dn, val))
             if val == user_dn:
                 found = True
                 break
 
     if find_result:
-        assert(found)
+        assert (found)
     else:
-        assert(not found)
+        assert (not found)
+
 
-def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
-    assert topology != None
+def _modrdn_entry(topology_st=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
+    assert topology_st != None
     assert entry_dn != None
     assert new_rdn != None
 
-
-    topology.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn)
+    topology_st.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn)
     if new_superior:
-        topology.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
+        topology_st.standalone.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
     else:
-        topology.standalone.rename_s(entry_dn, new_rdn, delold=del_old)
+        topology_st.standalone.rename_s(entry_dn, new_rdn, delold=del_old)
 
-def _check_memberof(topology=None, action=None, user_dn=None, group_dn=None, find_result=None):
-    assert(topology)
-    assert(user_dn)
-    assert(group_dn)
+
+def _check_memberof(topology_st=None, action=None, user_dn=None, group_dn=None, find_result=None):
+    assert (topology_st)
+    assert (user_dn)
+    assert (group_dn)
     if action == ldap.MOD_ADD:
         txt = 'add'
     elif action == ldap.MOD_DELETE:
         txt = 'delete'
     else:
         txt = 'replace'
-    topology.standalone.log.info('\n%s entry %s' % (txt, user_dn))
-    topology.standalone.log.info('to group %s' % group_dn)
+    topology_st.standalone.log.info('\n%s entry %s' % (txt, user_dn))
+    topology_st.standalone.log.info('to group %s' % group_dn)
 
-    topology.standalone.modify_s(group_dn, [(action, 'member', user_dn)])
+    topology_st.standalone.modify_s(group_dn, [(action, 'member', user_dn)])
     time.sleep(1)
-    _find_memberof(topology, user_dn=user_dn, group_dn=group_dn, find_result=find_result)
-
-
-
-
-def test_ticket47829_init(topology):
-    topology.standalone.add_s(Entry((SCOPE_IN_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': SCOPE_IN_DN})))
-    topology.standalone.add_s(Entry((SCOPE_OUT_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': SCOPE_OUT_DN})))
-    topology.standalone.add_s(Entry((PROVISIONING_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': PROVISIONING_CN})))
-    topology.standalone.add_s(Entry((ACTIVE_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': ACTIVE_CN})))
-    topology.standalone.add_s(Entry((STAGE_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': STAGE_DN})))
-    topology.standalone.add_s(Entry((DELETE_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': DELETE_CN})))
+    _find_memberof(topology_st, user_dn=user_dn, group_dn=group_dn, find_result=find_result)
+
+
+def test_ticket47829_init(topology_st):
+    topology_st.standalone.add_s(Entry((SCOPE_IN_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': SCOPE_IN_DN})))
+    topology_st.standalone.add_s(Entry((SCOPE_OUT_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': SCOPE_OUT_DN})))
+    topology_st.standalone.add_s(Entry((PROVISIONING_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': PROVISIONING_CN})))
+    topology_st.standalone.add_s(Entry((ACTIVE_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': ACTIVE_CN})))
+    topology_st.standalone.add_s(Entry((STAGE_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': STAGE_DN})))
+    topology_st.standalone.add_s(Entry((DELETE_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': DELETE_CN})))
 
     # add groups
-    topology.standalone.add_s(Entry((ACTIVE_GROUP_DN, {
-                                                'objectclass': "top groupOfNames".split(),
-                                                'cn': ACTIVE_GROUP_CN})))
-    topology.standalone.add_s(Entry((STAGE_GROUP_DN, {
-                                                'objectclass': "top groupOfNames".split(),
-                                                'cn': STAGE_GROUP_CN})))
-    topology.standalone.add_s(Entry((OUT_GROUP_DN, {
-                                                'objectclass': "top groupOfNames".split(),
-                                                'cn': OUT_GROUP_CN})))
+    topology_st.standalone.add_s(Entry((ACTIVE_GROUP_DN, {
+        'objectclass': "top groupOfNames".split(),
+        'cn': ACTIVE_GROUP_CN})))
+    topology_st.standalone.add_s(Entry((STAGE_GROUP_DN, {
+        'objectclass': "top groupOfNames".split(),
+        'cn': STAGE_GROUP_CN})))
+    topology_st.standalone.add_s(Entry((OUT_GROUP_DN, {
+        'objectclass': "top groupOfNames".split(),
+        'cn': OUT_GROUP_CN})))
 
     # add users
-    _add_user(topology, 'active')
-    _add_user(topology, 'stage')
-    _add_user(topology, 'out')
-
-
+    _add_user(topology_st, 'active')
+    _add_user(topology_st, 'stage')
+    _add_user(topology_st, 'out')
 
     # enable memberof of with scope account
-    topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+    topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
     dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN)
-    topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ACTIVE_DN)])
-
-
-
-    topology.standalone.restart(timeout=10)
-
+    topology_st.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', ACTIVE_DN)])
 
+    topology_st.standalone.restart(timeout=10)
 
 
-def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology):
-    _header(topology, 'add an Stage user to a Active group. Then move Stage user to Stage')
+def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology_st):
+    _header(topology_st, 'add an Stage user to a Active group. Then move Stage user to Stage')
 
-    old_stage_user_dn  = STAGE_USER_DN
+    old_stage_user_dn = STAGE_USER_DN
     old_stage_user_rdn = "cn=%s" % STAGE_USER_CN
     new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN
     new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN)
 
     # add Stage user to active group
-    _check_memberof(topology, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member  (topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
+    _check_memberof(topology_st, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN,
+                    find_result=False)
+    _find_member(topology_st, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True)
 
     # move the Stage entry to Stage, expect  no 'member' and 'memberof'
-    _modrdn_entry (topology, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN)
-    _find_memberof(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
-    _find_member  (topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _modrdn_entry(topology_st, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN)
+    _find_memberof(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
+    _find_member(topology_st, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False)
 
 
 if __name__ == '__main__':

File diff suppressed because it is too large
+ 277 - 296
dirsrvtests/tests/tickets/ticket47838_test.py


+ 73 - 187
dirsrvtests/tests/tickets/ticket47869MMR_test.py

@@ -6,142 +6,28 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
 TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
 ENTRY_NAME = 'test_entry'
 MAX_ENTRIES = 10
 
-BIND_NAME  = 'bind_entry'
-BIND_DN    = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW    = 'password'
-
-
-class TopologyMaster1Master2(object):
-    def __init__(self, master1, master2):
-        master1.open()
-        self.master1 = master1
-
-        master2.open()
-        self.master2 = master2
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to create a replicated topology for the 'module'.
-        The replicated topology is MASTER1 <-> Master2.
-    '''
-    global installation1_prefix
-    global installation2_prefix
-
-    # allocate master1 on a given deployement
-    master1 = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Args for the master1 instance
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_master = args_instance.copy()
-    master1.allocate(args_master)
-
-    # allocate master1 on a given deployement
-    master2 = DirSrv(verbose=False)
-    if installation2_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
-    # Args for the consumer instance
-    args_instance[SER_HOST] = HOST_MASTER_2
-    args_instance[SER_PORT] = PORT_MASTER_2
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
-    args_master = args_instance.copy()
-    master2.allocate(args_master)
-
-    # Get the status of the instance
-    instance_master1 = master1.exists()
-    instance_master2 = master2.exists()
-
-    # Remove all the instances
-    if instance_master1:
-        master1.delete()
-    if instance_master2:
-        master2.delete()
-
-    # Create the instances
-    master1.create()
-    master1.open()
-    master2.create()
-    master2.open()
-
-    #
-    # Now prepare the Master-Consumer topology
-    #
-    # First Enable replication
-    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
-    # Initialize the supplier->consumer
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
-    if not repl_agreement:
-        log.fatal("Fail to create a replica agreement")
-        sys.exit(1)
-
-    log.debug("%s created" % repl_agreement)
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
-    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    master1.waitForReplInit(repl_agreement)
-
-    # Check replication is working fine
-    if master1.testReplication(DEFAULT_SUFFIX, master2):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    def fin():
-        master1.delete()
-        master2.delete()
-    request.addfinalizer(fin)
-
-    # Here we have two instances master and consumer
-    return TopologyMaster1Master2(master1, master2)
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
 
 
-def test_ticket47869_init(topology):
+def test_ticket47869_init(topology_m2):
     """
         It adds an entry ('bind_entry') and 10 test entries
         It sets the anonymous aci
@@ -149,21 +35,21 @@ def test_ticket47869_init(topology):
     """
     # enable acl error logging
     mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))]  # REPL
-    topology.master1.modify_s(DN_CONFIG, mod)
-    topology.master2.modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
 
     # entry used to bind with
-    topology.master1.log.info("Add %s" % BIND_DN)
-    topology.master1.add_s(Entry((BIND_DN, {
-                                            'objectclass': "top person".split(),
-                                            'sn':           BIND_NAME,
-                                            'cn':           BIND_NAME,
-                                            'userpassword': BIND_PW})))
+    topology_m2.ms["master1"].log.info("Add %s" % BIND_DN)
+    topology_m2.ms["master1"].add_s(Entry((BIND_DN, {
+        'objectclass': "top person".split(),
+        'sn': BIND_NAME,
+        'cn': BIND_NAME,
+        'userpassword': BIND_PW})))
     loop = 0
     ent = None
     while loop <= 10:
         try:
-            ent = topology.master2.getEntry(BIND_DN, ldap.SCOPE_BASE, "(objectclass=*)")
+            ent = topology_m2.ms["master2"].getEntry(BIND_DN, ldap.SCOPE_BASE, "(objectclass=*)")
             break
         except ldap.NO_SUCH_OBJECT:
             time.sleep(1)
@@ -174,22 +60,22 @@ def test_ticket47869_init(topology):
     # keep anonymous ACI for use 'read-search' aci in SEARCH test
     ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)"
     mod = [(ldap.MOD_REPLACE, 'aci', ACI_ANONYMOUS)]
-    topology.master1.modify_s(SUFFIX, mod)
-    topology.master2.modify_s(SUFFIX, mod)
+    topology_m2.ms["master1"].modify_s(SUFFIX, mod)
+    topology_m2.ms["master2"].modify_s(SUFFIX, mod)
 
     # add entries
     for cpt in range(MAX_ENTRIES):
         name = "%s%d" % (ENTRY_NAME, cpt)
         mydn = "cn=%s,%s" % (name, SUFFIX)
-        topology.master1.add_s(Entry((mydn,
-                                      {'objectclass': "top person".split(),
-                                       'sn': name,
-                                       'cn': name})))
+        topology_m2.ms["master1"].add_s(Entry((mydn,
+                                               {'objectclass': "top person".split(),
+                                                'sn': name,
+                                                'cn': name})))
         loop = 0
         ent = None
         while loop <= 10:
             try:
-                ent = topology.master2.getEntry(mydn, ldap.SCOPE_BASE, "(objectclass=*)")
+                ent = topology_m2.ms["master2"].getEntry(mydn, ldap.SCOPE_BASE, "(objectclass=*)")
                 break
             except ldap.NO_SUCH_OBJECT:
                 time.sleep(1)
@@ -198,7 +84,7 @@ def test_ticket47869_init(topology):
             assert False
 
 
-def test_ticket47869_check(topology):
+def test_ticket47869_check(topology_m2):
     '''
     On Master 1 and 2:
       Bind as Directory Manager.
@@ -213,107 +99,107 @@ def test_ticket47869_check(topology):
       Search all specifying nscpEntryWsi in the attribute list.
       Check nscpEntryWsi is not returned.
     '''
-    topology.master1.log.info("\n\n######################### CHECK nscpentrywsi ######################\n")
+    topology_m2.ms["master1"].log.info("\n\n######################### CHECK nscpentrywsi ######################\n")
 
-    topology.master1.log.info("##### Master1: Bind as %s #####" % DN_DM)
-    topology.master1.simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master1"].log.info("##### Master1: Bind as %s #####" % DN_DM)
+    topology_m2.ms["master1"].simple_bind_s(DN_DM, PASSWORD)
 
-    topology.master1.log.info("Master1: Calling search_ext...")
-    msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+    topology_m2.ms["master1"].log.info("Master1: Calling search_ext...")
+    msgid = topology_m2.ms["master1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
     nscpentrywsicnt = 0
-    rtype, rdata, rmsgid = topology.master1.result2(msgid)
-    topology.master1.log.info("%d results" % len(rdata))
+    rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid)
+    topology_m2.ms["master1"].log.info("%d results" % len(rdata))
 
-    topology.master1.log.info("Results:")
+    topology_m2.ms["master1"].log.info("Results:")
     for dn, attrs in rdata:
-        topology.master1.log.info("dn: %s" % dn)
+        topology_m2.ms["master1"].log.info("dn: %s" % dn)
         if 'nscpentrywsi' in attrs:
             nscpentrywsicnt += 1
 
-    topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
+    topology_m2.ms["master1"].log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
 
-    topology.master2.log.info("##### Master2: Bind as %s #####" % DN_DM)
-    topology.master2.simple_bind_s(DN_DM, PASSWORD)
+    topology_m2.ms["master2"].log.info("##### Master2: Bind as %s #####" % DN_DM)
+    topology_m2.ms["master2"].simple_bind_s(DN_DM, PASSWORD)
 
-    topology.master2.log.info("Master2: Calling search_ext...")
-    msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+    topology_m2.ms["master2"].log.info("Master2: Calling search_ext...")
+    msgid = topology_m2.ms["master2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
     nscpentrywsicnt = 0
-    rtype, rdata, rmsgid = topology.master2.result2(msgid)
-    topology.master2.log.info("%d results" % len(rdata))
+    rtype, rdata, rmsgid = topology_m2.ms["master2"].result2(msgid)
+    topology_m2.ms["master2"].log.info("%d results" % len(rdata))
 
-    topology.master2.log.info("Results:")
+    topology_m2.ms["master2"].log.info("Results:")
     for dn, attrs in rdata:
-        topology.master2.log.info("dn: %s" % dn)
+        topology_m2.ms["master2"].log.info("dn: %s" % dn)
         if 'nscpentrywsi' in attrs:
             nscpentrywsicnt += 1
 
-    topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
+    topology_m2.ms["master2"].log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
 
     # bind as bind_entry
-    topology.master1.log.info("##### Master1: Bind as %s #####" % BIND_DN)
-    topology.master1.simple_bind_s(BIND_DN, BIND_PW)
+    topology_m2.ms["master1"].log.info("##### Master1: Bind as %s #####" % BIND_DN)
+    topology_m2.ms["master1"].simple_bind_s(BIND_DN, BIND_PW)
 
-    topology.master1.log.info("Master1: Calling search_ext...")
-    msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+    topology_m2.ms["master1"].log.info("Master1: Calling search_ext...")
+    msgid = topology_m2.ms["master1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
     nscpentrywsicnt = 0
-    rtype, rdata, rmsgid = topology.master1.result2(msgid)
-    topology.master1.log.info("%d results" % len(rdata))
+    rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid)
+    topology_m2.ms["master1"].log.info("%d results" % len(rdata))
 
     for dn, attrs in rdata:
         if 'nscpentrywsi' in attrs:
             nscpentrywsicnt += 1
     assert nscpentrywsicnt == 0
-    topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
+    topology_m2.ms["master1"].log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
 
     # bind as bind_entry
-    topology.master2.log.info("##### Master2: Bind as %s #####" % BIND_DN)
-    topology.master2.simple_bind_s(BIND_DN, BIND_PW)
+    topology_m2.ms["master2"].log.info("##### Master2: Bind as %s #####" % BIND_DN)
+    topology_m2.ms["master2"].simple_bind_s(BIND_DN, BIND_PW)
 
-    topology.master2.log.info("Master2: Calling search_ext...")
-    msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+    topology_m2.ms["master2"].log.info("Master2: Calling search_ext...")
+    msgid = topology_m2.ms["master2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
     nscpentrywsicnt = 0
-    rtype, rdata, rmsgid = topology.master2.result2(msgid)
-    topology.master2.log.info("%d results" % len(rdata))
+    rtype, rdata, rmsgid = topology_m2.ms["master2"].result2(msgid)
+    topology_m2.ms["master2"].log.info("%d results" % len(rdata))
 
     for dn, attrs in rdata:
         if 'nscpentrywsi' in attrs:
             nscpentrywsicnt += 1
     assert nscpentrywsicnt == 0
-    topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
+    topology_m2.ms["master2"].log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
 
     # bind as anonymous
-    topology.master1.log.info("##### Master1: Bind as anonymous #####")
-    topology.master1.simple_bind_s("", "")
+    topology_m2.ms["master1"].log.info("##### Master1: Bind as anonymous #####")
+    topology_m2.ms["master1"].simple_bind_s("", "")
 
-    topology.master1.log.info("Master1: Calling search_ext...")
-    msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+    topology_m2.ms["master1"].log.info("Master1: Calling search_ext...")
+    msgid = topology_m2.ms["master1"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
     nscpentrywsicnt = 0
-    rtype, rdata, rmsgid = topology.master1.result2(msgid)
-    topology.master1.log.info("%d results" % len(rdata))
+    rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid)
+    topology_m2.ms["master1"].log.info("%d results" % len(rdata))
 
     for dn, attrs in rdata:
         if 'nscpentrywsi' in attrs:
             nscpentrywsicnt += 1
     assert nscpentrywsicnt == 0
-    topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
+    topology_m2.ms["master1"].log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt)
 
     # bind as bind_entry
-    topology.master2.log.info("##### Master2: Bind as anonymous #####")
-    topology.master2.simple_bind_s("", "")
+    topology_m2.ms["master2"].log.info("##### Master2: Bind as anonymous #####")
+    topology_m2.ms["master2"].simple_bind_s("", "")
 
-    topology.master2.log.info("Master2: Calling search_ext...")
-    msgid = topology.master2.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+    topology_m2.ms["master2"].log.info("Master2: Calling search_ext...")
+    msgid = topology_m2.ms["master2"].search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
     nscpentrywsicnt = 0
-    rtype, rdata, rmsgid = topology.master2.result2(msgid)
-    topology.master2.log.info("%d results" % len(rdata))
+    rtype, rdata, rmsgid = topology_m2.ms["master2"].result2(msgid)
+    topology_m2.ms["master2"].log.info("%d results" % len(rdata))
 
     for dn, attrs in rdata:
         if 'nscpentrywsi' in attrs:
             nscpentrywsicnt += 1
     assert nscpentrywsicnt == 0
-    topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
+    topology_m2.ms["master2"].log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt)
 
-    topology.master1.log.info("##### ticket47869 was successfully verified. #####")
+    topology_m2.ms["master1"].log.info("##### ticket47869 was successfully verified. #####")
 
 
 if __name__ == '__main__':

+ 27 - 122
dirsrvtests/tests/tickets/ticket47871_test.py

@@ -11,16 +11,14 @@ Created on Nov 7, 2013
 
 @author: tbordaz
 '''
-import os
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_m1c1
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
@@ -34,143 +32,50 @@ MAX_OTHERS = 10
 ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber']
 
 
-class TopologyMasterConsumer(object):
-    def __init__(self, master, consumer):
-        master.open()
-        self.master = master
-
-        consumer.open()
-        self.consumer = consumer
-
-    def __repr__(self):
-            return "Master[%s] -> Consumer[%s" % (self.master, self.consumer)
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to create a replicated topology for the 'module'.
-        The replicated topology is MASTER -> Consumer.
-    '''
-    master   = DirSrv(verbose=False)
-    consumer = DirSrv(verbose=False)
-
-    # Args for the master instance
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_master = args_instance.copy()
-    master.allocate(args_master)
-
-    # Args for the consumer instance
-    args_instance[SER_HOST] = HOST_CONSUMER_1
-    args_instance[SER_PORT] = PORT_CONSUMER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1
-    args_consumer = args_instance.copy()
-    consumer.allocate(args_consumer)
-
-    # Get the status of the instance and restart it if it exists
-    instance_master = master.exists()
-    instance_consumer = consumer.exists()
-
-    # Remove all the instances
-    if instance_master:
-        master.delete()
-    if instance_consumer:
-        consumer.delete()
-
-    # Create the instances
-    master.create()
-    master.open()
-    consumer.create()
-    consumer.open()
-
-    #
-    # Now prepare the Master-Consumer topology
-    #
-    # First Enable replication
-    master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-    consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER)
-
-    # Initialize the supplier->consumer
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties)
-
-    if not repl_agreement:
-        log.fatal("Fail to create a replica agreement")
-        sys.exit(1)
-
-    log.debug("%s created" % repl_agreement)
-    master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1)
-    master.waitForReplInit(repl_agreement)
-
-    # Check replication is working fine
-    if master.testReplication(DEFAULT_SUFFIX, consumer):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    def fin():
-        master.delete()
-        consumer.delete()
-    request.addfinalizer(fin)
-    #
-    # Here we have two instances master and consumer
-    # with replication working. Either coming from a backup recovery
-    # or from a fresh (re)init
-    # Time to return the topology
-    return TopologyMasterConsumer(master, consumer)
-
-
-def test_ticket47871_init(topology):
+def test_ticket47871_init(topology_m1c1):
     """
         Initialize the test environment
     """
-    topology.master.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+    topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
     mod = [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', "10s"),  # 10 second triming
            (ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', "5s")]
-    topology.master.modify_s("cn=%s,%s" % (PLUGIN_RETRO_CHANGELOG, DN_PLUGIN), mod)
-    #topology.master.plugins.enable(name=PLUGIN_MEMBER_OF)
-    #topology.master.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
-    topology.master.stop(timeout=10)
-    topology.master.start(timeout=10)
+    topology_m1c1.ms["master1"].modify_s("cn=%s,%s" % (PLUGIN_RETRO_CHANGELOG, DN_PLUGIN), mod)
+    # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_MEMBER_OF)
+    # topology_m1c1.ms["master1"].plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+    topology_m1c1.ms["master1"].stop(timeout=10)
+    topology_m1c1.ms["master1"].start(timeout=10)
 
-    topology.master.log.info("test_ticket47871_init topology %r" % (topology))
+    topology_m1c1.ms["master1"].log.info("test_ticket47871_init topology_m1c1 %r" % (topology_m1c1))
     # the test case will check if a warning message is logged in the
     # error log of the supplier
-    topology.master.errorlog_file = open(topology.master.errlog, "r")
+    topology_m1c1.ms["master1"].errorlog_file = open(topology_m1c1.ms["master1"].errlog, "r")
 
 
-def test_ticket47871_1(topology):
+def test_ticket47871_1(topology_m1c1):
     '''
     ADD entries and check they are all in the retrocl
     '''
     # add dummy entries
     for cpt in range(MAX_OTHERS):
         name = "%s%d" % (OTHER_NAME, cpt)
-        topology.master.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
-                                            'objectclass': "top person".split(),
-                                            'sn': name,
-                                            'cn': name})))
+        topology_m1c1.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+            'objectclass': "top person".split(),
+            'sn': name,
+            'cn': name})))
 
-    topology.master.log.info("test_ticket47871_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1))
+    topology_m1c1.ms["master1"].log.info(
+        "test_ticket47871_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1))
 
     # Check the number of entries in the retro changelog
     time.sleep(1)
-    ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
+    ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
     assert len(ents) == MAX_OTHERS
-    topology.master.log.info("Added entries are")
+    topology_m1c1.ms["master1"].log.info("Added entries are")
     for ent in ents:
-        topology.master.log.info("%s" % ent.dn)
+        topology_m1c1.ms["master1"].log.info("%s" % ent.dn)
 
 
-def test_ticket47871_2(topology):
+def test_ticket47871_2(topology_m1c1):
     '''
     Wait until there is just a last entries
     '''
@@ -178,11 +83,11 @@ def test_ticket47871_2(topology):
     TRY_NO = 1
     while TRY_NO <= MAX_TRIES:
         time.sleep(6)  # at least 1 trimming occurred
-        ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
+        ents = topology_m1c1.ms["master1"].search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)")
         assert len(ents) <= MAX_OTHERS
-        topology.master.log.info("\nTry no %d it remains %d entries" % (TRY_NO, len(ents)))
+        topology_m1c1.ms["master1"].log.info("\nTry no %d it remains %d entries" % (TRY_NO, len(ents)))
         for ent in ents:
-            topology.master.log.info("%s" % ent.dn)
+            topology_m1c1.ms["master1"].log.info("%s" % ent.dn)
         if len(ents) > 1:
             TRY_NO += 1
         else:

+ 81 - 125
dirsrvtests/tests/tickets/ticket47900_test.py

@@ -6,70 +6,26 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
+import ldap
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
-CONFIG_DN  = 'cn=config'
+CONFIG_DN = 'cn=config'
 ADMIN_NAME = 'passwd_admin'
-ADMIN_DN   = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX)
-ADMIN_PWD  = 'adminPassword_1'
+ADMIN_DN = 'cn=%s,%s' % (ADMIN_NAME, SUFFIX)
+ADMIN_PWD = 'adminPassword_1'
 ENTRY_NAME = 'Joe Schmo'
-ENTRY_DN   = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX)
+ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX)
 INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==')
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47900(topology):
+def test_ticket47900(topology_st):
     """
         Test that password administrators/root DN can
         bypass password syntax/policy.
@@ -90,54 +46,54 @@ def test_ticket47900(topology):
     entry.setValues('cn', ADMIN_NAME)
     entry.setValues('userpassword', ADMIN_PWD)
 
-    topology.standalone.log.info("Creating Password Administator entry %s..." % ADMIN_DN)
+    topology_st.standalone.log.info("Creating Password Administator entry %s..." % ADMIN_DN)
     try:
-        topology.standalone.add_s(entry)
+        topology_st.standalone.add_s(entry)
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Unexpected result ' + e.message['desc'])
+        topology_st.standalone.log.error('Unexpected result ' + e.message['desc'])
         assert False
-        topology.standalone.log.error("Failed to add Password Administator %s, error: %s "
-                % (ADMIN_DN, e.message['desc']))
+        topology_st.standalone.log.error("Failed to add Password Administator %s, error: %s "
+                                         % (ADMIN_DN, e.message['desc']))
         assert False
 
-    topology.standalone.log.info("Configuring password policy...")
+    topology_st.standalone.log.info("Configuring password policy...")
     try:
-        topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local' , 'on'),
-                                                 (ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
-                                                 (ldap.MOD_REPLACE, 'passwordMinCategories' , '1'),
-                                                 (ldap.MOD_REPLACE, 'passwordMinTokenLength' , '1'),
-                                                 (ldap.MOD_REPLACE, 'passwordExp' , 'on'),
-                                                 (ldap.MOD_REPLACE, 'passwordMinDigits' , '1'),
-                                                 (ldap.MOD_REPLACE, 'passwordMinSpecials' , '1')])
+        topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on'),
+                                                    (ldap.MOD_REPLACE, 'passwordCheckSyntax', 'on'),
+                                                    (ldap.MOD_REPLACE, 'passwordMinCategories', '1'),
+                                                    (ldap.MOD_REPLACE, 'passwordMinTokenLength', '1'),
+                                                    (ldap.MOD_REPLACE, 'passwordExp', 'on'),
+                                                    (ldap.MOD_REPLACE, 'passwordMinDigits', '1'),
+                                                    (ldap.MOD_REPLACE, 'passwordMinSpecials', '1')])
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Failed configure password policy: ' + e.message['desc'])
+        topology_st.standalone.log.error('Failed configure password policy: ' + e.message['desc'])
         assert False
 
     #
     # Add an aci to allow everyone all access (just makes things easier)
     #
-    topology.standalone.log.info("Add aci to allow password admin to add/update entries...")
+    topology_st.standalone.log.info("Add aci to allow password admin to add/update entries...")
 
-    ACI_TARGET       = "(target = \"ldap:///%s\")" % SUFFIX
-    ACI_TARGETATTR   = "(targetattr = *)"
-    ACI_ALLOW        = "(version 3.0; acl \"Password Admin Access\"; allow (all) "
-    ACI_SUBJECT      = "(userdn = \"ldap:///anyone\");)"
-    ACI_BODY         = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
+    ACI_TARGET = "(target = \"ldap:///%s\")" % SUFFIX
+    ACI_TARGETATTR = "(targetattr = *)"
+    ACI_ALLOW = "(version 3.0; acl \"Password Admin Access\"; allow (all) "
+    ACI_SUBJECT = "(userdn = \"ldap:///anyone\");)"
+    ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_ALLOW + ACI_SUBJECT
     mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)]
     try:
-        topology.standalone.modify_s(SUFFIX, mod)
+        topology_st.standalone.modify_s(SUFFIX, mod)
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Failed to add aci for password admin: ' + e.message['desc'])
+        topology_st.standalone.log.error('Failed to add aci for password admin: ' + e.message['desc'])
         assert False
 
     #
     # Bind as the Password Admin
     #
-    topology.standalone.log.info("Bind as the Password Administator (before activating)...")
+    topology_st.standalone.log.info("Bind as the Password Administator (before activating)...")
     try:
-        topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+        topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
+        topology_st.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
         assert False
 
     #
@@ -151,49 +107,49 @@ def test_ticket47900(topology):
     #
     # Start by attempting to add an entry with an invalid password
     #
-    topology.standalone.log.info("Attempt to add entries with invalid passwords, these adds should fail...")
+    topology_st.standalone.log.info("Attempt to add entries with invalid passwords, these adds should fail...")
     for passwd in INVALID_PWDS:
         failed_as_expected = False
         entry.setValues('userpassword', passwd)
-        topology.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd))
+        topology_st.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd))
         try:
-            topology.standalone.add_s(entry)
+            topology_st.standalone.add_s(entry)
         except ldap.LDAPError as e:
             # We failed as expected
             failed_as_expected = True
-            topology.standalone.log.info('Add failed as expected: password (%s) result (%s)'
-                    % (passwd, e.message['desc']))
+            topology_st.standalone.log.info('Add failed as expected: password (%s) result (%s)'
+                                            % (passwd, e.message['desc']))
 
         if not failed_as_expected:
-            topology.standalone.log.error("We were incorrectly able to add an entry " +
-                    "with an invalid password (%s)" % (passwd))
+            topology_st.standalone.log.error("We were incorrectly able to add an entry " +
+                                             "with an invalid password (%s)" % (passwd))
             assert False
 
     #
     # Now activate a password administator, bind as root dn to do the config
     # update, then rebind as the password admin
     #
-    topology.standalone.log.info("Activate the Password Administator...")
+    topology_st.standalone.log.info("Activate the Password Administator...")
 
     # Bind as Root DN
     try:
-        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+        topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
+        topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
         assert False
 
     # Update config
     try:
-        topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
+        topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Failed to add password admin to config: ' + e.message['desc'])
+        topology_st.standalone.log.error('Failed to add password admin to config: ' + e.message['desc'])
         assert False
 
     # Bind as Password Admin
     try:
-        topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+        topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
+        topology_st.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
         assert False
 
     #
@@ -201,21 +157,21 @@ def test_ticket47900(topology):
     #
     for passwd in INVALID_PWDS:
         entry.setValues('userpassword', passwd)
-        topology.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd))
+        topology_st.standalone.log.info("Create a regular user entry %s with password (%s)..." % (ENTRY_DN, passwd))
         try:
-            topology.standalone.add_s(entry)
+            topology_st.standalone.add_s(entry)
         except ldap.LDAPError as e:
-            topology.standalone.log.error('Failed to add entry with password (%s) result (%s)'
-                    % (passwd, e.message['desc']))
+            topology_st.standalone.log.error('Failed to add entry with password (%s) result (%s)'
+                                             % (passwd, e.message['desc']))
             assert False
 
-        topology.standalone.log.info('Succesfully added entry (%s)' % ENTRY_DN)
+        topology_st.standalone.log.info('Succesfully added entry (%s)' % ENTRY_DN)
 
         # Delete entry for the next pass
         try:
-            topology.standalone.delete_s(ENTRY_DN)
+            topology_st.standalone.delete_s(ENTRY_DN)
         except ldap.LDAPError as e:
-            topology.standalone.log.error('Failed to delete entry: %s' % (e.message['desc']))
+            topology_st.standalone.log.error('Failed to delete entry: %s' % (e.message['desc']))
             assert False
 
     #
@@ -223,36 +179,36 @@ def test_ticket47900(topology):
     #
     entry.setValues('userpassword', ADMIN_PWD)
     try:
-        topology.standalone.add_s(entry)
+        topology_st.standalone.add_s(entry)
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Failed to add entry with valid password (%s) result (%s)'
-                % (passwd, e.message['desc']))
+        topology_st.standalone.log.error('Failed to add entry with valid password (%s) result (%s)'
+                                         % (passwd, e.message['desc']))
         assert False
 
     #
     # Deactivate the password admin and make sure invalid password updates fail
     #
-    topology.standalone.log.info("Deactivate Password Administator and try invalid password updates...")
+    topology_st.standalone.log.info("Deactivate Password Administator and try invalid password updates...")
 
     # Bind as root DN
     try:
-        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+        topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
+        topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
         assert False
 
     # Update config
     try:
-        topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)])
+        topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_DELETE, 'passwordAdminDN', None)])
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Failed to remove password admin from config: ' + e.message['desc'])
+        topology_st.standalone.log.error('Failed to remove password admin from config: ' + e.message['desc'])
         assert False
 
     # Bind as Password Admin
     try:
-        topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+        topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
+        topology_st.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
         assert False
 
     #
@@ -262,42 +218,42 @@ def test_ticket47900(topology):
         failed_as_expected = False
         entry.setValues('userpassword', passwd)
         try:
-            topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+            topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
         except ldap.LDAPError as e:
             # We failed as expected
             failed_as_expected = True
-            topology.standalone.log.info('Password update failed as expected: password (%s) result (%s)'
-                    % (passwd, e.message['desc']))
+            topology_st.standalone.log.info('Password update failed as expected: password (%s) result (%s)'
+                                            % (passwd, e.message['desc']))
 
         if not failed_as_expected:
-            topology.standalone.log.error("We were incorrectly able to add an invalid password (%s)"
-                    % (passwd))
+            topology_st.standalone.log.error("We were incorrectly able to add an invalid password (%s)"
+                                             % (passwd))
             assert False
 
     #
     # Now activate a password administator
     #
-    topology.standalone.log.info("Activate Password Administator and try updates again...")
+    topology_st.standalone.log.info("Activate Password Administator and try updates again...")
 
     # Bind as root DN
     try:
-        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+        topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
+        topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
         assert False
 
     # Update config
     try:
-        topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
+        topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)])
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Failed to add password admin to config: ' + e.message['desc'])
+        topology_st.standalone.log.error('Failed to add password admin to config: ' + e.message['desc'])
         assert False
 
     # Bind as Password Admin
     try:
-        topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
+        topology_st.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD)
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
+        topology_st.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc'])
         assert False
 
     #
@@ -306,12 +262,12 @@ def test_ticket47900(topology):
     for passwd in INVALID_PWDS:
         entry.setValues('userpassword', passwd)
         try:
-            topology.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+            topology_st.standalone.modify_s(ENTRY_DN, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
         except ldap.LDAPError as e:
-            topology.standalone.log.error('Password update failed unexpectedly: password (%s) result (%s)'
-                    % (passwd, e.message['desc']))
+            topology_st.standalone.log.error('Password update failed unexpectedly: password (%s) result (%s)'
+                                             % (passwd, e.message['desc']))
             assert False
-        topology.standalone.log.info('Password update succeeded (%s)' % passwd)
+        topology_st.standalone.log.info('Password update succeeded (%s)' % passwd)
 
 
 if __name__ == '__main__':

+ 16 - 63
dirsrvtests/tests/tickets/ticket47910_test.py

@@ -5,66 +5,20 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
-import pytest
-import re
 import subprocess
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
 from datetime import datetime, timedelta
 
+import pytest
+from lib389.tasks import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
 
 @pytest.fixture(scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Clear out the tmp dir
-    standalone.clearTmpDir(__file__)
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
[email protected](scope="module")
-def log_dir(topology):
+def log_dir(topology_st):
     '''
     Do a search operation
     and disable access log buffering
@@ -72,15 +26,15 @@ def log_dir(topology):
     '''
 
     log.info("Diable access log buffering")
-    topology.standalone.setAccessLogBuffering(False)
+    topology_st.standalone.setAccessLogBuffering(False)
 
     log.info("Do a ldapsearch operation")
-    topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
+    topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=*)")
 
     log.info("sleep for sometime so that access log file get generated")
-    time.sleep( 1 )
+    time.sleep(1)
 
-    return topology.standalone.accesslog
+    return topology_st.standalone.accesslog
 
 
 def format_time(local_datetime):
@@ -106,7 +60,7 @@ def execute_logconv(inst, start_time_stamp, end_time_stamp, access_log):
     return proc.returncode
 
 
-def test_ticket47910_logconv_start_end_positive(topology, log_dir):
+def test_ticket47910_logconv_start_end_positive(topology_st, log_dir):
     '''
     Execute logconv.pl with -S and -E(endtime) with random time stamp
     This is execute successfully
@@ -125,11 +79,11 @@ def test_ticket47910_logconv_start_end_positive(topology, log_dir):
     formatted_end_time_stamp = format_time(end_time_stamp)
 
     log.info("Executing logconv.pl with -S and -E")
-    result = execute_logconv(topology.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir)
+    result = execute_logconv(topology_st.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir)
     assert result == 0
 
 
-def test_ticket47910_logconv_start_end_negative(topology, log_dir):
+def test_ticket47910_logconv_start_end_negative(topology_st, log_dir):
     '''
     Execute logconv.pl with -S and -E(endtime) with random time stamp
     This is a negative test case, where endtime will be lesser than the
@@ -151,11 +105,11 @@ def test_ticket47910_logconv_start_end_negative(topology, log_dir):
     formatted_end_time_stamp = format_time(end_time_stamp)
 
     log.info("Executing logconv.pl with -S and -E")
-    result = execute_logconv(topology.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir)
+    result = execute_logconv(topology_st.standalone, formatted_start_time_stamp, formatted_end_time_stamp, log_dir)
     assert result == 1
 
 
-def test_ticket47910_logconv_start_end_invalid(topology, log_dir):
+def test_ticket47910_logconv_start_end_invalid(topology_st, log_dir):
     '''
     Execute logconv.pl with -S and -E(endtime) with invalid time stamp
     This is a negative test case, where it should give error message
@@ -169,12 +123,11 @@ def test_ticket47910_logconv_start_end_invalid(topology, log_dir):
     end_time_stamp = "invalid"
 
     log.info("Executing logconv.pl with -S and -E")
-    result = execute_logconv(topology.standalone, start_time_stamp, end_time_stamp, log_dir)
+    result = execute_logconv(topology_st.standalone, start_time_stamp, end_time_stamp, log_dir)
     assert result == 1
 
 
-def test_ticket47910_logconv_noaccesslogs(topology, log_dir):
-
+def test_ticket47910_logconv_noaccesslogs(topology_st, log_dir):
     '''
     Execute logconv.pl -S(starttime) without specify
     access logs location
@@ -189,7 +142,7 @@ def test_ticket47910_logconv_noaccesslogs(topology, log_dir):
     time_stamp = (datetime.now() - timedelta(minutes=2))
     formatted_time_stamp = format_time(time_stamp)
     log.info("Executing logconv.pl with -S current time")
-    cmd = [os.path.join(topology.standalone.get_bin_dir(), 'logconv.pl'), '-S', formatted_time_stamp]
+    cmd = [os.path.join(topology_st.standalone.get_bin_dir(), 'logconv.pl'), '-S', formatted_time_stamp]
     log.info(" ".join(cmd))
     proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     stdout, stderr = proc.communicate()

+ 46 - 88
dirsrvtests/tests/tickets/ticket47920_test.py

@@ -6,33 +6,29 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
+import ldap
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from ldap.controls.readentry import PostReadControl
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
-from ldap.controls.readentry import PreReadControl,PostReadControl
-
+from lib389.topologies import topology_st
 
-SCOPE_IN_CN  = 'in'
+SCOPE_IN_CN = 'in'
 SCOPE_OUT_CN = 'out'
-SCOPE_IN_DN  = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
+SCOPE_IN_DN = 'cn=%s,%s' % (SCOPE_IN_CN, SUFFIX)
 SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX)
 
 PROVISIONING_CN = "provisioning"
 PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN)
 
 ACTIVE_CN = "accounts"
-STAGE_CN  = "staged users"
+STAGE_CN = "staged users"
 DELETE_CN = "deleted users"
 ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SCOPE_IN_DN)
-STAGE_DN  = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
-DELETE_DN  = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
+STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
+DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
 
 STAGE_USER_CN = "stage guy"
 STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN)
@@ -60,103 +56,65 @@ FINAL_DESC = "final description"
 
 log = logging.getLogger(__name__)
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
 
-def _header(topology, label):
-    topology.standalone.log.info("\n\n###############################################")
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("####### %s" % label)
-    topology.standalone.log.info("#######")
-    topology.standalone.log.info("###############################################")
+def _header(topology_st, label):
+    topology_st.standalone.log.info("\n\n###############################################")
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("####### %s" % label)
+    topology_st.standalone.log.info("#######")
+    topology_st.standalone.log.info("###############################################")
 
 
-def _add_user(topology, type='active'):
+def _add_user(topology_st, type='active'):
     if type == 'active':
-        topology.standalone.add_s(Entry((ACTIVE_USER_DN, {
-                                                'objectclass': "top person inetuser".split(),
-                                                'sn': ACTIVE_USER_CN,
-                                                'cn': ACTIVE_USER_CN,
-                                                'description': INITIAL_DESC})))
+        topology_st.standalone.add_s(Entry((ACTIVE_USER_DN, {
+            'objectclass': "top person inetuser".split(),
+            'sn': ACTIVE_USER_CN,
+            'cn': ACTIVE_USER_CN,
+            'description': INITIAL_DESC})))
     elif type == 'stage':
-        topology.standalone.add_s(Entry((STAGE_USER_DN, {
-                                                'objectclass': "top person inetuser".split(),
-                                                'sn': STAGE_USER_CN,
-                                                'cn': STAGE_USER_CN})))
+        topology_st.standalone.add_s(Entry((STAGE_USER_DN, {
+            'objectclass': "top person inetuser".split(),
+            'sn': STAGE_USER_CN,
+            'cn': STAGE_USER_CN})))
     else:
-        topology.standalone.add_s(Entry((OUT_USER_DN, {
-                                        'objectclass': "top person inetuser".split(),
-                                        'sn': OUT_USER_CN,
-                                        'cn': OUT_USER_CN})))
+        topology_st.standalone.add_s(Entry((OUT_USER_DN, {
+            'objectclass': "top person inetuser".split(),
+            'sn': OUT_USER_CN,
+            'cn': OUT_USER_CN})))
 
 
-def test_ticket47920_init(topology):
-    topology.standalone.add_s(Entry((SCOPE_IN_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': SCOPE_IN_DN})))
-    topology.standalone.add_s(Entry((ACTIVE_DN, {
-                                                        'objectclass': "top nscontainer".split(),
-                                                        'cn': ACTIVE_CN})))
+def test_ticket47920_init(topology_st):
+    topology_st.standalone.add_s(Entry((SCOPE_IN_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': SCOPE_IN_DN})))
+    topology_st.standalone.add_s(Entry((ACTIVE_DN, {
+        'objectclass': "top nscontainer".split(),
+        'cn': ACTIVE_CN})))
 
     # add users
-    _add_user(topology, 'active')
+    _add_user(topology_st, 'active')
 
 
-def test_ticket47920_mod_readentry_ctrl(topology):
-    _header(topology, 'MOD: with a readentry control')
+def test_ticket47920_mod_readentry_ctrl(topology_st):
+    _header(topology_st, 'MOD: with a readentry control')
 
-    topology.standalone.log.info("Check the initial value of the entry")
-    ent = topology.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
+    topology_st.standalone.log.info("Check the initial value of the entry")
+    ent = topology_st.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
     assert ent.hasAttr('description')
     assert ent.getValue('description') == INITIAL_DESC
 
     pr = PostReadControl(criticality=True, attrList=['cn', 'description'])
-    _, _, _, resp_ctrls = topology.standalone.modify_ext_s(ACTIVE_USER_DN, [(ldap.MOD_REPLACE, 'description', [FINAL_DESC])], serverctrls=[pr])
+    _, _, _, resp_ctrls = topology_st.standalone.modify_ext_s(ACTIVE_USER_DN,
+                                                              [(ldap.MOD_REPLACE, 'description', [FINAL_DESC])],
+                                                              serverctrls=[pr])
 
     assert resp_ctrls[0].dn == ACTIVE_USER_DN
     assert 'description' in resp_ctrls[0].entry
     assert 'cn' in resp_ctrls[0].entry
     print(resp_ctrls[0].entry['description'])
 
-    ent = topology.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
+    ent = topology_st.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
     assert ent.hasAttr('description')
     assert ent.getValue('description') == FINAL_DESC
 

+ 28 - 70
dirsrvtests/tests/tickets/ticket47921_test.py

@@ -6,59 +6,16 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
 
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47921(topology):
+def test_ticket47921(topology_st):
     '''
     Test that indirect cos reflects the current value of the indirect entry
     '''
@@ -69,45 +26,46 @@ def test_ticket47921(topology):
 
     # Add COS definition
     try:
-        topology.standalone.add_s(Entry((INDIRECT_COS_DN,
-            {'objectclass': 'top cosSuperDefinition cosIndirectDefinition ldapSubEntry'.split(),
-             'cosIndirectSpecifier': 'manager',
-             'cosAttribute': 'roomnumber'
-            })))
+        topology_st.standalone.add_s(Entry((INDIRECT_COS_DN,
+                                            {
+                                                'objectclass': 'top cosSuperDefinition cosIndirectDefinition ldapSubEntry'.split(),
+                                                'cosIndirectSpecifier': 'manager',
+                                                'cosAttribute': 'roomnumber'
+                                                })))
     except ldap.LDAPError as e:
         log.fatal('Failed to add cos defintion, error: ' + e.message['desc'])
         assert False
 
     # Add manager entry
     try:
-        topology.standalone.add_s(Entry((MANAGER_DN,
-            {'objectclass': 'top extensibleObject'.split(),
-             'uid': 'my manager',
-             'roomnumber': '1'
-            })))
+        topology_st.standalone.add_s(Entry((MANAGER_DN,
+                                            {'objectclass': 'top extensibleObject'.split(),
+                                             'uid': 'my manager',
+                                             'roomnumber': '1'
+                                             })))
     except ldap.LDAPError as e:
         log.fatal('Failed to add manager entry, error: ' + e.message['desc'])
         assert False
 
     # Add user entry
     try:
-        topology.standalone.add_s(Entry((USER_DN,
-            {'objectclass': 'top person organizationalPerson inetorgperson'.split(),
-             'sn': 'last',
-             'cn': 'full',
-             'givenname': 'mark',
-             'uid': 'user',
-             'manager': MANAGER_DN
-            })))
+        topology_st.standalone.add_s(Entry((USER_DN,
+                                            {'objectclass': 'top person organizationalPerson inetorgperson'.split(),
+                                             'sn': 'last',
+                                             'cn': 'full',
+                                             'givenname': 'mark',
+                                             'uid': 'user',
+                                             'manager': MANAGER_DN
+                                             })))
     except ldap.LDAPError as e:
         log.fatal('Failed to add manager entry, error: ' + e.message['desc'])
         assert False
 
     # Test COS is working
     try:
-        entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
-                                             "uid=user",
-                                             ['roomnumber'])
+        entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+                                                "uid=user",
+                                                ['roomnumber'])
         if entry:
             if entry[0].getValue('roomnumber') != '1':
                 log.fatal('COS is not working.')
@@ -121,16 +79,16 @@ def test_ticket47921(topology):
 
     # Modify manager entry
     try:
-        topology.standalone.modify_s(MANAGER_DN, [(ldap.MOD_REPLACE, 'roomnumber', '2')])
+        topology_st.standalone.modify_s(MANAGER_DN, [(ldap.MOD_REPLACE, 'roomnumber', '2')])
     except ldap.LDAPError as e:
         log.error('Failed to modify manager entry: ' + e.message['desc'])
         assert False
 
     # Confirm COS is returning the new value
     try:
-        entry = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
-                                             "uid=user",
-                                             ['roomnumber'])
+        entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
+                                                "uid=user",
+                                                ['roomnumber'])
         if entry:
             if entry[0].getValue('roomnumber') != '2':
                 log.fatal('COS is not working after manager update.')

+ 102 - 137
dirsrvtests/tests/tickets/ticket47927_test.py

@@ -6,24 +6,14 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
 EXCLUDED_CONTAINER_CN = "excluded_container"
 EXCLUDED_CONTAINER_DN = "cn=%s,%s" % (EXCLUDED_CONTAINER_CN, SUFFIX)
 
@@ -43,210 +33,183 @@ USER_4_CN = "test_4"
 USER_4_DN = "cn=%s,%s" % (USER_4_CN, EXCLUDED_BIS_CONTAINER_DN)
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47927_init(topology):
-    topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
+def test_ticket47927_init(topology_st):
+    topology_st.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
     try:
-        topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
-                      [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'telephonenumber'),
-                       (ldap.MOD_REPLACE, 'uniqueness-subtrees', DEFAULT_SUFFIX),
-                      ])
+        topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
+                                        [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'telephonenumber'),
+                                         (ldap.MOD_REPLACE, 'uniqueness-subtrees', DEFAULT_SUFFIX),
+                                         ])
     except ldap.LDAPError as e:
         log.fatal('test_ticket47927: Failed to configure plugin for "telephonenumber": error ' + e.message['desc'])
         assert False
-    topology.standalone.restart(timeout=120)
-
-    topology.standalone.add_s(Entry((EXCLUDED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
-                                                             'cn': EXCLUDED_CONTAINER_CN})))
-    topology.standalone.add_s(Entry((EXCLUDED_BIS_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
-                                                                 'cn': EXCLUDED_BIS_CONTAINER_CN})))
-    topology.standalone.add_s(Entry((ENFORCED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
-                                                             'cn': ENFORCED_CONTAINER_CN})))
-
-        # adding an entry on a stage with a different 'cn'
-    topology.standalone.add_s(Entry((USER_1_DN, {
-                                    'objectclass': "top person".split(),
-                                    'sn':           USER_1_CN,
-                                    'cn':           USER_1_CN})))
-        # adding an entry on a stage with a different 'cn'
-    topology.standalone.add_s(Entry((USER_2_DN, {
-                                    'objectclass': "top person".split(),
-                                    'sn':           USER_2_CN,
-                                    'cn':           USER_2_CN})))
-    topology.standalone.add_s(Entry((USER_3_DN, {
-                                    'objectclass': "top person".split(),
-                                    'sn':           USER_3_CN,
-                                    'cn':           USER_3_CN})))
-    topology.standalone.add_s(Entry((USER_4_DN, {
-                                    'objectclass': "top person".split(),
-                                    'sn':           USER_4_CN,
-                                    'cn':           USER_4_CN})))
-
-
-def test_ticket47927_one(topology):
+    topology_st.standalone.restart(timeout=120)
+
+    topology_st.standalone.add_s(Entry((EXCLUDED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
+                                                                'cn': EXCLUDED_CONTAINER_CN})))
+    topology_st.standalone.add_s(Entry((EXCLUDED_BIS_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
+                                                                    'cn': EXCLUDED_BIS_CONTAINER_CN})))
+    topology_st.standalone.add_s(Entry((ENFORCED_CONTAINER_DN, {'objectclass': "top nscontainer".split(),
+                                                                'cn': ENFORCED_CONTAINER_CN})))
+
+    # adding an entry on a stage with a different 'cn'
+    topology_st.standalone.add_s(Entry((USER_1_DN, {
+        'objectclass': "top person".split(),
+        'sn': USER_1_CN,
+        'cn': USER_1_CN})))
+    # adding an entry on a stage with a different 'cn'
+    topology_st.standalone.add_s(Entry((USER_2_DN, {
+        'objectclass': "top person".split(),
+        'sn': USER_2_CN,
+        'cn': USER_2_CN})))
+    topology_st.standalone.add_s(Entry((USER_3_DN, {
+        'objectclass': "top person".split(),
+        'sn': USER_3_CN,
+        'cn': USER_3_CN})))
+    topology_st.standalone.add_s(Entry((USER_4_DN, {
+        'objectclass': "top person".split(),
+        'sn': USER_4_CN,
+        'cn': USER_4_CN})))
+
+
+def test_ticket47927_one(topology_st):
     '''
     Check that uniqueness is enforce on all SUFFIX
     '''
-    UNIQUE_VALUE='1234'
+    UNIQUE_VALUE = '1234'
     try:
-        topology.standalone.modify_s(USER_1_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_1_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
     except ldap.LDAPError as e:
         log.fatal('test_ticket47927_one: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc']))
         assert False
 
     # we expect to fail because user1 is in the scope of the plugin
     try:
-        topology.standalone.modify_s(USER_2_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_2_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
         log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
         assert False
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN, e.message['desc']))
+        log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (
+        USER_2_DN, e.message['desc']))
         pass
 
-
     # we expect to fail because user1 is in the scope of the plugin
     try:
-        topology.standalone.modify_s(USER_3_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_3_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
         log.fatal('test_ticket47927_one: unexpected success to set the telephonenumber for %s' % (USER_3_DN))
         assert False
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc']))
+        log.fatal('test_ticket47927_one: Failed (expected) to set the telephonenumber for %s: %s' % (
+        USER_3_DN, e.message['desc']))
         pass
 
 
-def test_ticket47927_two(topology):
+def test_ticket47927_two(topology_st):
     '''
     Exclude the EXCLUDED_CONTAINER_DN from the uniqueness plugin
     '''
     try:
-        topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
-                      [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', EXCLUDED_CONTAINER_DN)])
+        topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
+                                        [(ldap.MOD_REPLACE, 'uniqueness-exclude-subtrees', EXCLUDED_CONTAINER_DN)])
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % (EXCLUDED_CONTAINER_DN, e.message['desc']))
+        log.fatal('test_ticket47927_two: Failed to configure plugin for to exclude %s: error %s' % (
+        EXCLUDED_CONTAINER_DN, e.message['desc']))
         assert False
-    topology.standalone.restart(timeout=120)
+    topology_st.standalone.restart(timeout=120)
 
 
-def test_ticket47927_three(topology):
+def test_ticket47927_three(topology_st):
     '''
     Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
     First case: it exists an entry (with the same attribute value) in the scope
     of the plugin and we set the value in an entry that is in an excluded scope
     '''
-    UNIQUE_VALUE='9876'
+    UNIQUE_VALUE = '9876'
     try:
-        topology.standalone.modify_s(USER_1_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_1_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
     except ldap.LDAPError as e:
         log.fatal('test_ticket47927_three: Failed to set the telephonenumber ' + e.message['desc'])
         assert False
 
     # we should not be allowed to set this value (because user1 is in the scope)
     try:
-        topology.standalone.modify_s(USER_2_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_2_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
         log.fatal('test_ticket47927_three: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
         assert False
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN , e.message['desc']))
-
+        log.fatal('test_ticket47927_three: Failed (expected) to set the telephonenumber for %s: %s' % (
+        USER_2_DN, e.message['desc']))
 
     # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
     try:
-        topology.standalone.modify_s(USER_3_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_3_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
         log.fatal('test_ticket47927_three: success to set the telephonenumber for %s' % (USER_3_DN))
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc']))
+        log.fatal('test_ticket47927_three: Failed (unexpected) to set the telephonenumber for %s: %s' % (
+        USER_3_DN, e.message['desc']))
         assert False
 
 
-def test_ticket47927_four(topology):
+def test_ticket47927_four(topology_st):
     '''
     Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
     Second case: it exists an entry (with the same attribute value) in an excluded scope
     of the plugin and we set the value in an entry is in the scope
     '''
-    UNIQUE_VALUE='1111'
+    UNIQUE_VALUE = '1111'
     # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
     try:
-        topology.standalone.modify_s(USER_3_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_3_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
         log.fatal('test_ticket47927_four: success to set the telephonenumber for %s' % USER_3_DN)
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc']))
+        log.fatal('test_ticket47927_four: Failed (unexpected) to set the telephonenumber for %s: %s' % (
+        USER_3_DN, e.message['desc']))
         assert False
 
-
     # we should be allowed to set this value (because user3 is excluded from scope)
     try:
-        topology.standalone.modify_s(USER_1_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_1_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc']))
+        log.fatal(
+            'test_ticket47927_four: Failed to set the telephonenumber for %s: %s' % (USER_1_DN, e.message['desc']))
         assert False
 
     # we should not be allowed to set this value (because user1 is in the scope)
     try:
-        topology.standalone.modify_s(USER_2_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_2_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
         log.fatal('test_ticket47927_four: unexpected success to set the telephonenumber %s' % USER_2_DN)
         assert False
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN, e.message['desc']))
+        log.fatal('test_ticket47927_four: Failed (expected) to set the telephonenumber for %s: %s' % (
+        USER_2_DN, e.message['desc']))
         pass
 
 
-def test_ticket47927_five(topology):
+def test_ticket47927_five(topology_st):
     '''
     Exclude the EXCLUDED_BIS_CONTAINER_DN from the uniqueness plugin
     '''
     try:
-        topology.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
-                      [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', EXCLUDED_BIS_CONTAINER_DN)])
+        topology_st.standalone.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',
+                                        [(ldap.MOD_ADD, 'uniqueness-exclude-subtrees', EXCLUDED_BIS_CONTAINER_DN)])
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % (EXCLUDED_BIS_CONTAINER_DN, e.message['desc']))
+        log.fatal('test_ticket47927_five: Failed to configure plugin for to exclude %s: error %s' % (
+        EXCLUDED_BIS_CONTAINER_DN, e.message['desc']))
         assert False
-    topology.standalone.restart(timeout=120)
-    topology.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE)
+    topology_st.standalone.restart(timeout=120)
+    topology_st.standalone.getEntry('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config', ldap.SCOPE_BASE)
 
 
-def test_ticket47927_six(topology):
+def test_ticket47927_six(topology_st):
     '''
     Check that uniqueness is enforced on full SUFFIX except EXCLUDED_CONTAINER_DN
     and EXCLUDED_BIS_CONTAINER_DN
@@ -255,37 +218,39 @@ def test_ticket47927_six(topology):
     '''
     UNIQUE_VALUE = '222'
     try:
-        topology.standalone.modify_s(USER_1_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_1_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
     except ldap.LDAPError as e:
         log.fatal('test_ticket47927_six: Failed to set the telephonenumber ' + e.message['desc'])
         assert False
 
     # we should not be allowed to set this value (because user1 is in the scope)
     try:
-        topology.standalone.modify_s(USER_2_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_2_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
         log.fatal('test_ticket47927_six: unexpected success to set the telephonenumber for %s' % (USER_2_DN))
         assert False
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % (USER_2_DN , e.message['desc']))
-
+        log.fatal('test_ticket47927_six: Failed (expected) to set the telephonenumber for %s: %s' % (
+        USER_2_DN, e.message['desc']))
 
     # USER_3_DN is in EXCLUDED_CONTAINER_DN so update should be successful
     try:
-        topology.standalone.modify_s(USER_3_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_3_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
         log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_3_DN))
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_3_DN, e.message['desc']))
+        log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (
+        USER_3_DN, e.message['desc']))
         assert False
     # USER_4_DN is in EXCLUDED_CONTAINER_DN so update should be successful
     try:
-        topology.standalone.modify_s(USER_4_DN,
-                      [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
+        topology_st.standalone.modify_s(USER_4_DN,
+                                        [(ldap.MOD_REPLACE, 'telephonenumber', UNIQUE_VALUE)])
         log.fatal('test_ticket47927_six: success to set the telephonenumber for %s' % (USER_4_DN))
     except ldap.LDAPError as e:
-        log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (USER_4_DN, e.message['desc']))
+        log.fatal('test_ticket47927_six: Failed (unexpected) to set the telephonenumber for %s: %s' % (
+        USER_4_DN, e.message['desc']))
         assert False
 
 

+ 36 - 80
dirsrvtests/tests/tickets/ticket47931_test.py

@@ -1,16 +1,9 @@
-import os
-import sys
-import time
-import ldap
-import logging
-import pytest
 import threading
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
@@ -25,12 +18,6 @@ MEMBER_DN_COMP = "uid=member"
 TIME_OUT = 5
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
 class modifySecondBackendThread(threading.Thread):
     def __init__(self, inst, timeout):
         threading.Thread.__init__(self)
@@ -57,38 +44,7 @@ class modifySecondBackendThread(threading.Thread):
         log.info('Finished modifying second suffix')
 
 
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Delete each instance in the end
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Clear out the tmp dir
-    standalone.clearTmpDir(__file__)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47931(topology):
+def test_ticket47931(topology_st):
     """Test Retro Changelog and MemberOf deadlock fix.
        Verification steps:
            - Enable retro cl and memberOf.
@@ -105,56 +61,56 @@ def test_ticket47931(topology):
 
     # Enable dynamic plugins to make plugin configuration easier
     try:
-        topology.standalone.modify_s(DN_CONFIG,
-                                     [(ldap.MOD_REPLACE,
-                                       'nsslapd-dynamic-plugins',
-                                       'on')])
+        topology_st.standalone.modify_s(DN_CONFIG,
+                                        [(ldap.MOD_REPLACE,
+                                          'nsslapd-dynamic-plugins',
+                                          'on')])
     except ldap.LDAPError as e:
         ldap.error('Failed to enable dynamic plugins! ' + e.message['desc'])
         assert False
 
     # Enable the plugins
-    topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
-    topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+    topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+    topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
 
     # Create second backend
-    topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: SECOND_BACKEND})
-    topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=SECOND_BACKEND)
+    topology_st.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: SECOND_BACKEND})
+    topology_st.standalone.mappingtree.create(SECOND_SUFFIX, bename=SECOND_BACKEND)
 
     # Create the root node of the second backend
     try:
-        topology.standalone.add_s(Entry((SECOND_SUFFIX,
-                                  {'objectclass': 'top domain'.split(),
-                                   'dc': 'deadlock'})))
+        topology_st.standalone.add_s(Entry((SECOND_SUFFIX,
+                                            {'objectclass': 'top domain'.split(),
+                                             'dc': 'deadlock'})))
     except ldap.LDAPError as e:
         log.fatal('Failed to create suffix entry: error ' + e.message['desc'])
         assert False
 
     # Configure retrocl scope
     try:
-        topology.standalone.modify_s(RETROCL_PLUGIN_DN,
-                                     [(ldap.MOD_REPLACE,
-                                       'nsslapd-include-suffix',
-                                       DEFAULT_SUFFIX)])
+        topology_st.standalone.modify_s(RETROCL_PLUGIN_DN,
+                                        [(ldap.MOD_REPLACE,
+                                          'nsslapd-include-suffix',
+                                          DEFAULT_SUFFIX)])
     except ldap.LDAPError as e:
         ldap.error('Failed to configure retrocl plugin: ' + e.message['desc'])
         assert False
 
     # Configure memberOf group attribute
     try:
-        topology.standalone.modify_s(MEMBEROF_PLUGIN_DN,
-                                     [(ldap.MOD_REPLACE,
-                                       'memberofgroupattr',
-                                       'uniquemember')])
+        topology_st.standalone.modify_s(MEMBEROF_PLUGIN_DN,
+                                        [(ldap.MOD_REPLACE,
+                                          'memberofgroupattr',
+                                          'uniquemember')])
     except ldap.LDAPError as e:
         log.fatal('Failed to configure memberOf plugin: error ' + e.message['desc'])
         assert False
 
     # Create group
     try:
-        topology.standalone.add_s(Entry((GROUP_DN,
-                                         {'objectclass': 'top extensibleObject'.split(),
-                                          'cn': 'group'})))
+        topology_st.standalone.add_s(Entry((GROUP_DN,
+                                            {'objectclass': 'top extensibleObject'.split(),
+                                             'cn': 'group'})))
     except ldap.LDAPError as e:
         log.fatal('Failed to add grouo: error ' + e.message['desc'])
         assert False
@@ -163,27 +119,27 @@ def test_ticket47931(topology):
     for idx in range(1, 1500):
         try:
             USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
-            topology.standalone.add_s(Entry((USER_DN,
-                                             {'objectclass': 'top extensibleObject'.split(),
-                                              'uid': 'member%d' % (x)})))
+            topology_st.standalone.add_s(Entry((USER_DN,
+                                                {'objectclass': 'top extensibleObject'.split(),
+                                                 'uid': 'member%d' % (x)})))
         except ldap.LDAPError as e:
             log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc']))
             assert False
 
     # Modify second backend (separate thread)
-    mod_backend_thrd = modifySecondBackendThread(topology.standalone, TIME_OUT)
+    mod_backend_thrd = modifySecondBackendThread(topology_st.standalone, TIME_OUT)
     mod_backend_thrd.start()
 
     # Add members to the group - set timeout
     log.info('Adding members to the group...')
-    topology.standalone.set_option(ldap.OPT_TIMEOUT, TIME_OUT)
+    topology_st.standalone.set_option(ldap.OPT_TIMEOUT, TIME_OUT)
     for idx in range(1, 1500):
         try:
             MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX))
-            topology.standalone.modify_s(GROUP_DN,
-                                         [(ldap.MOD_ADD,
-                                           'uniquemember',
-                                           MEMBER_VAL)])
+            topology_st.standalone.modify_s(GROUP_DN,
+                                            [(ldap.MOD_ADD,
+                                              'uniquemember',
+                                              MEMBER_VAL)])
         except ldap.TIMEOUT:
             log.fatal('Deadlock!  Bug verification failed.')
             assert False
@@ -204,4 +160,4 @@ if __name__ == '__main__':
     # Run isolated
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
-    pytest.main("-s %s" % CURRENT_FILE)
+    pytest.main("-s %s" % CURRENT_FILE)

+ 40 - 82
dirsrvtests/tests/tickets/ticket47937_test.py

@@ -6,72 +6,29 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
+import logging
 import time
+
 import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47937(topology):
+def test_ticket47937(topology_st):
     """
         Test that DNA plugin only accepts valid attributes for "dnaType"
     """
 
     log.info("Creating \"ou=people\"...")
     try:
-        topology.standalone.add_s(Entry(('ou=people,' + SUFFIX, {
-                                         'objectclass': 'top organizationalunit'.split(),
-                                         'ou': 'people'
-                                         })))
+        topology_st.standalone.add_s(Entry(('ou=people,' + SUFFIX, {
+            'objectclass': 'top organizationalunit'.split(),
+            'ou': 'people'
+        })))
 
     except ldap.ALREADY_EXISTS:
         pass
@@ -81,10 +38,10 @@ def test_ticket47937(topology):
 
     log.info("Creating \"ou=ranges\"...")
     try:
-        topology.standalone.add_s(Entry(('ou=ranges,' + SUFFIX, {
-                                         'objectclass': 'top organizationalunit'.split(),
-                                         'ou': 'ranges'
-                                         })))
+        topology_st.standalone.add_s(Entry(('ou=ranges,' + SUFFIX, {
+            'objectclass': 'top organizationalunit'.split(),
+            'ou': 'ranges'
+        })))
 
     except ldap.LDAPError as e:
         log.error('Failed to add ou=ranges org unit: error ' + e.message['desc'])
@@ -92,10 +49,10 @@ def test_ticket47937(topology):
 
     log.info("Creating \"cn=entry\"...")
     try:
-        topology.standalone.add_s(Entry(('cn=entry,ou=people,' + SUFFIX, {
-                                         'objectclass': 'top groupofuniquenames'.split(),
-                                         'cn': 'entry'
-                                         })))
+        topology_st.standalone.add_s(Entry(('cn=entry,ou=people,' + SUFFIX, {
+            'objectclass': 'top groupofuniquenames'.split(),
+            'cn': 'entry'
+        })))
 
     except ldap.LDAPError as e:
         log.error('Failed to add test entry: error ' + e.message['desc'])
@@ -103,13 +60,13 @@ def test_ticket47937(topology):
 
     log.info("Creating DNA shared config entry...")
     try:
-        topology.standalone.add_s(Entry(('dnaHostname=localhost.localdomain+dnaPortNum=389,ou=ranges,%s' % SUFFIX, {
-                                         'objectclass': 'top dnaSharedConfig'.split(),
-                                         'dnaHostname': 'localhost.localdomain',
-                                         'dnaPortNum': '389',
-                                         'dnaSecurePortNum': '636',
-                                         'dnaRemainingValues': '9501'
-                                         })))
+        topology_st.standalone.add_s(Entry(('dnaHostname=localhost.localdomain+dnaPortNum=389,ou=ranges,%s' % SUFFIX, {
+            'objectclass': 'top dnaSharedConfig'.split(),
+            'dnaHostname': 'localhost.localdomain',
+            'dnaPortNum': '389',
+            'dnaSecurePortNum': '636',
+            'dnaRemainingValues': '9501'
+        })))
 
     except ldap.LDAPError as e:
         log.error('Failed to add shared config entry: error ' + e.message['desc'])
@@ -117,16 +74,17 @@ def test_ticket47937(topology):
 
     log.info("Add dna plugin config entry...")
     try:
-        topology.standalone.add_s(Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', {
-                                         'objectclass': 'top dnaPluginConfig'.split(),
-                                         'dnaType': 'description',
-                                         'dnaMaxValue': '10000',
-                                         'dnaMagicRegen': '0',
-                                         'dnaFilter': '(objectclass=top)',
-                                         'dnaScope': 'ou=people,%s' % SUFFIX,
-                                         'dnaNextValue': '500',
-                                         'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX
-                                         })))
+        topology_st.standalone.add_s(
+            Entry(('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config', {
+                'objectclass': 'top dnaPluginConfig'.split(),
+                'dnaType': 'description',
+                'dnaMaxValue': '10000',
+                'dnaMagicRegen': '0',
+                'dnaFilter': '(objectclass=top)',
+                'dnaScope': 'ou=people,%s' % SUFFIX,
+                'dnaNextValue': '500',
+                'dnaSharedCfgDN': 'ou=ranges,%s' % SUFFIX
+            })))
 
     except ldap.LDAPError as e:
         log.error('Failed to add DNA config entry: error ' + e.message['desc'])
@@ -134,22 +92,22 @@ def test_ticket47937(topology):
 
     log.info("Enable the DNA plugin...")
     try:
-        topology.standalone.plugins.enable(name=PLUGIN_DNA)
+        topology_st.standalone.plugins.enable(name=PLUGIN_DNA)
     except e:
         log.error("Failed to enable DNA Plugin: error " + e.message['desc'])
         assert False
 
     log.info("Restarting the server...")
-    topology.standalone.stop(timeout=120)
+    topology_st.standalone.stop(timeout=120)
     time.sleep(1)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
     time.sleep(3)
 
     log.info("Apply an invalid attribute to the DNA config(dnaType: foo)...")
 
     try:
-        topology.standalone.modify_s('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config',
-                                     [(ldap.MOD_REPLACE, 'dnaType', 'foo')])
+        topology_st.standalone.modify_s('cn=dna config,cn=Distributed Numeric Assignment Plugin,cn=plugins,cn=config',
+                                        [(ldap.MOD_REPLACE, 'dnaType', 'foo')])
     except ldap.LDAPError as e:
         log.info('Operation failed as expected (error: %s)' % e.message['desc'])
     else:

+ 24 - 71
dirsrvtests/tests/tickets/ticket47950_test.py

@@ -6,17 +6,11 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -24,48 +18,7 @@ USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX
 USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47950(topology):
+def test_ticket47950(topology_st):
     """
         Testing nsslapd-plugin-binddn-tracking does not cause issues around
         access control and reconfiguring replication/repl agmt.
@@ -77,7 +30,7 @@ def test_ticket47950(topology):
     # Turn on bind dn tracking
     #
     try:
-        topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-plugin-binddn-tracking', 'on')])
+        topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-plugin-binddn-tracking', 'on')])
         log.info('nsslapd-plugin-binddn-tracking enabled.')
     except ldap.LDAPError as e:
         log.error('Failed to enable bind dn tracking: ' + e.message['desc'])
@@ -87,21 +40,21 @@ def test_ticket47950(topology):
     # Add two users
     #
     try:
-        topology.standalone.add_s(Entry((USER1_DN, {
-                                        'objectclass': "top person inetuser".split(),
-                                        'userpassword': "password",
-                                        'sn': "1",
-                                        'cn': "user 1"})))
+        topology_st.standalone.add_s(Entry((USER1_DN, {
+            'objectclass': "top person inetuser".split(),
+            'userpassword': "password",
+            'sn': "1",
+            'cn': "user 1"})))
         log.info('Added test user %s' % USER1_DN)
     except ldap.LDAPError as e:
         log.error('Failed to add %s: %s' % (USER1_DN, e.message['desc']))
         assert False
 
     try:
-        topology.standalone.add_s(Entry((USER2_DN, {
-                                        'objectclass': "top person inetuser".split(),
-                                        'sn': "2",
-                                        'cn': "user 2"})))
+        topology_st.standalone.add_s(Entry((USER2_DN, {
+            'objectclass': "top person inetuser".split(),
+            'sn': "2",
+            'cn': "user 2"})))
         log.info('Added test user %s' % USER2_DN)
     except ldap.LDAPError as e:
         log.error('Failed to add user1: ' + e.message['desc'])
@@ -112,9 +65,9 @@ def test_ticket47950(topology):
     #
     try:
         acival = '(targetattr ="cn")(version 3.0;acl "Test bind dn tracking"' + \
-             ';allow (all) (userdn = "ldap:///%s");)' % USER1_DN
+                 ';allow (all) (userdn = "ldap:///%s");)' % USER1_DN
 
-        topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', acival)])
+        topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', acival)])
         log.info('Added aci')
     except ldap.LDAPError as e:
         log.error('Failed to add aci: ' + e.message['desc'])
@@ -124,14 +77,14 @@ def test_ticket47950(topology):
     # Make modification as user
     #
     try:
-        topology.standalone.simple_bind_s(USER1_DN, "password")
+        topology_st.standalone.simple_bind_s(USER1_DN, "password")
         log.info('Bind as user %s successful' % USER1_DN)
     except ldap.LDAPError as e:
         log.error('Failed to bind as user1: ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.modify_s(USER2_DN, [(ldap.MOD_REPLACE, 'cn', 'new value')])
+        topology_st.standalone.modify_s(USER2_DN, [(ldap.MOD_REPLACE, 'cn', 'new value')])
         log.info('%s successfully modified user %s' % (USER1_DN, USER2_DN))
     except ldap.LDAPError as e:
         log.error('Failed to update user2: ' + e.message['desc'])
@@ -141,15 +94,15 @@ def test_ticket47950(topology):
     # Setup replica and create a repl agmt
     #
     try:
-        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+        topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
         log.info('Bind as %s successful' % DN_DM)
     except ldap.LDAPError as e:
         log.error('Failed to bind as rootDN: ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
-                                                  replicaId=REPLICAID_MASTER_1)
+        topology_st.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
+                                                         replicaId=REPLICAID_MASTER_1)
         log.info('Successfully enabled replication.')
     except ValueError:
         log.error('Failed to enable replication')
@@ -162,8 +115,8 @@ def test_ticket47950(topology):
                   RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
 
     try:
-        repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX, host="127.0.0.1",
-                                                          port="7777", properties=properties)
+        repl_agreement = topology_st.standalone.agreement.create(suffix=DEFAULT_SUFFIX, host="127.0.0.1",
+                                                                 port="7777", properties=properties)
         log.info('Successfully created replication agreement')
     except InvalidArgumentError as e:
         log.error('Failed to create replication agreement: ' + e.message['desc'])
@@ -174,7 +127,7 @@ def test_ticket47950(topology):
     #
     try:
         properties = {REPLICA_ID: "7"}
-        topology.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, properties)
+        topology_st.standalone.replica.setProperties(DEFAULT_SUFFIX, None, None, properties)
         log.info('Successfully modified replica')
     except ldap.LDAPError as e:
         log.error('Failed to update replica config: ' + e.message['desc'])
@@ -185,7 +138,7 @@ def test_ticket47950(topology):
     #
     try:
         properties = {RA_CONSUMER_PORT: "8888"}
-        topology.standalone.agreement.setProperties(None, repl_agreement, None, properties)
+        topology_st.standalone.agreement.setProperties(None, repl_agreement, None, properties)
         log.info('Successfully modified replication agreement')
     except ValueError:
         log.error('Failed to update replica agreement: ' + repl_agreement)

+ 8 - 54
dirsrvtests/tests/tickets/ticket47953_test.py

@@ -6,63 +6,17 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
-import pytest
 import shutil
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    #request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
 
-def test_ticket47953(topology):
+def test_ticket47953(topology_st):
     """
         Test that we can delete an aci that has an invalid syntax.
         Sart by importing an ldif with a "bad" aci, then simply try
@@ -74,16 +28,16 @@ def test_ticket47953(topology):
     #
     # Import an invalid ldif
     #
-    ldif_file = (topology.standalone.getDir(__file__, DATA_DIR) +
+    ldif_file = (topology_st.standalone.getDir(__file__, DATA_DIR) +
                  "ticket47953/ticket47953.ldif")
     try:
-        ldif_dir = topology.standalone.get_ldif_dir()
+        ldif_dir = topology_st.standalone.get_ldif_dir()
         shutil.copy(ldif_file, ldif_dir)
         ldif_file = ldif_dir + '/ticket47953.ldif'
     except:
         log.fatal('Failed to copy ldif to instance ldif dir')
         assert False
-    importTask = Tasks(topology.standalone)
+    importTask = Tasks(topology_st.standalone)
     args = {TASK_WAIT: True}
     try:
         importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
@@ -100,7 +54,7 @@ def test_ticket47953(topology):
 
     log.info('Attempting to remove invalid aci...')
     try:
-        topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', acival)])
+        topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_DELETE, 'aci', acival)])
         log.info('Removed invalid aci.')
     except ldap.LDAPError as e:
         log.error('Failed to remove invalid aci: ' + e.message['desc'])

+ 30 - 71
dirsrvtests/tests/tickets/ticket47963_test.py

@@ -6,58 +6,17 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
 
-def test_ticket47963(topology):
+def test_ticket47963(topology_st):
     '''
     Test that the memberOf plugin works correctly after setting:
 
@@ -73,53 +32,53 @@ def test_ticket47963(topology):
     #
     # Enable the plugin and configure the skiop nest attribute, then restart the server
     #
-    topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+    topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
     try:
-        topology.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofskipnested', 'on')])
+        topology_st.standalone.modify_s(PLUGIN_DN, [(ldap.MOD_REPLACE, 'memberofskipnested', 'on')])
     except ldap.LDAPError as e:
         log.error('test_automember: Failed to modify config entry: error ' + e.message['desc'])
         assert False
 
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
     #
     # Add our groups, users, memberships, etc
     #
     try:
-        topology.standalone.add_s(Entry((USER_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'test_user'
-                          })))
+        topology_st.standalone.add_s(Entry((USER_DN, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'test_user'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add teset user: error ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.add_s(Entry((GROUP_DN1, {
-                          'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
-                          'cn': 'group1',
-                          'member': USER_DN
-                          })))
+        topology_st.standalone.add_s(Entry((GROUP_DN1, {
+            'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+            'cn': 'group1',
+            'member': USER_DN
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add group1: error ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.add_s(Entry((GROUP_DN2, {
-                          'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
-                          'cn': 'group2',
-                          'member': USER_DN
-                          })))
+        topology_st.standalone.add_s(Entry((GROUP_DN2, {
+            'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+            'cn': 'group2',
+            'member': USER_DN
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add group2: error ' + e.message['desc'])
         assert False
 
     # Add group with no member(yet)
     try:
-        topology.standalone.add_s(Entry((GROUP_DN3, {
-                          'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
-                          'cn': 'group'
-                          })))
+        topology_st.standalone.add_s(Entry((GROUP_DN3, {
+            'objectclass': 'top groupOfNames groupOfUniqueNames extensibleObject'.split(),
+            'cn': 'group'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add group3: error ' + e.message['desc'])
         assert False
@@ -130,7 +89,7 @@ def test_ticket47963(topology):
     #
     try:
         member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 + '))')
-        entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
+        entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
         if not entries:
             log.fatal('User is missing expected memberOf attrs')
             assert False
@@ -140,7 +99,7 @@ def test_ticket47963(topology):
 
     # Add the user to the group
     try:
-        topology.standalone.modify_s(GROUP_DN3, [(ldap.MOD_ADD, 'member', USER_DN)])
+        topology_st.standalone.modify_s(GROUP_DN3, [(ldap.MOD_ADD, 'member', USER_DN)])
     except ldap.LDAPError as e:
         log.error('Failed to member to group: error ' + e.message['desc'])
         assert False
@@ -149,8 +108,8 @@ def test_ticket47963(topology):
     # Check that the test user is a "memberOf" all three groups
     try:
         member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN2 +
-                        ')(memberOf=' + GROUP_DN3 + '))')
-        entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
+                         ')(memberOf=' + GROUP_DN3 + '))')
+        entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
         if not entries:
             log.fatal('User is missing expected memberOf attrs')
             assert False
@@ -162,7 +121,7 @@ def test_ticket47963(topology):
     # Delete group2, and check memberOf values in the user entry
     #
     try:
-        topology.standalone.delete_s(GROUP_DN2)
+        topology_st.standalone.delete_s(GROUP_DN2)
     except ldap.LDAPError as e:
         log.error('Failed to delete test group2: ' + e.message['desc'])
         assert False
@@ -170,7 +129,7 @@ def test_ticket47963(topology):
 
     try:
         member_filter = ('(&(memberOf=' + GROUP_DN1 + ')(memberOf=' + GROUP_DN3 + '))')
-        entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
+        entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, member_filter)
         if not entries:
             log.fatal('User incorrect memberOf attrs')
             assert False

+ 5 - 109
dirsrvtests/tests/tickets/ticket47966_test.py

@@ -6,128 +6,24 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
-installation1_prefix = None
-m1_m2_agmt = ""
-
-
-class TopologyReplication(object):
-    def __init__(self, master1, master2):
-        master1.open()
-        self.master1 = master1
-        master2.open()
-        self.master2 = master2
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating master 1...
-    master1 = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_master = args_instance.copy()
-    master1.allocate(args_master)
-    instance_master1 = master1.exists()
-    if instance_master1:
-        master1.delete()
-    master1.create()
-    master1.open()
-    master1.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
-    # Creating master 2...
-    master2 = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_MASTER_2
-    args_instance[SER_PORT] = PORT_MASTER_2
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_master = args_instance.copy()
-    master2.allocate(args_master)
-    instance_master2 = master2.exists()
-    if instance_master2:
-        master2.delete()
-    master2.create()
-    master2.open()
-    master2.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
-    #
-    # Create all the agreements
-    #
-    # Creating agreement from master 1 to master 2
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    global m1_m2_agmt
-    m1_m2_agmt = master1.agreement.create(suffix=DEFAULT_SUFFIX, host=master2.host, port=master2.port, properties=properties)
-    if not m1_m2_agmt:
-        log.fatal("Fail to create a master -> master replica agreement")
-        sys.exit(1)
-    log.debug("%s created" % m1_m2_agmt)
-
-    # Creating agreement from master 2 to master 1
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    m2_m1_agmt = master2.agreement.create(suffix=DEFAULT_SUFFIX, host=master1.host, port=master1.port, properties=properties)
-    if not m2_m1_agmt:
-        log.fatal("Fail to create a master -> master replica agreement")
-        sys.exit(1)
-    log.debug("%s created" % m2_m1_agmt)
-
-    # Allow the replicas to get situated with the new agreements...
-    time.sleep(5)
-
-    #
-    # Initialize all the agreements
-    #
-    master1.agreement.init(DEFAULT_SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    master1.waitForReplInit(m1_m2_agmt)
-
-    # Check replication is working...
-    if master1.testReplication(DEFAULT_SUFFIX, master2):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    def fin():
-        master1.delete()
-        master2.delete()
-    request.addfinalizer(fin)
-
-    return TopologyReplication(master1, master2)
 
 
-def test_ticket47966(topology):
+def test_ticket47966(topology_m2):
     '''
     Testing bulk import when the backend with VLV was recreated.
     If the test passes without the server crash, 47966 is verified.
     '''
     log.info('Testing Ticket 47966 - [VLV] slapd crashes during Dogtag clone reinstallation')
-    M1 = topology.master1
-    M2 = topology.master2
+    M1 = topology_m2.ms["master1"]
+    M2 = topology_m2.ms["master2"]
+    m1_m2_agmt = topology_m2.ms["master1_agmts"]["m1_m2"]
 
     log.info('0. Create a VLV index on Master 2.')
     # get the backend entry

+ 10 - 57
dirsrvtests/tests/tickets/ticket47970_test.py

@@ -6,18 +6,12 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import ldap.sasl
 import logging
+
+import ldap.sasl
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -25,48 +19,7 @@ USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX
 USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47970(topology):
+def test_ticket47970(topology_st):
     """
         Testing that a failed SASL bind does not trigger account lockout -
         which would attempt to update the passwordRetryCount on the root dse entry
@@ -78,14 +31,14 @@ def test_ticket47970(topology):
     # Enable account lockout
     #
     try:
-        topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', 'on')])
+        topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', 'on')])
         log.info('account lockout enabled.')
     except ldap.LDAPError as e:
         log.error('Failed to enable account lockout: ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', '5')])
+        topology_st.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', '5')])
         log.info('passwordMaxFailure set.')
     except ldap.LDAPError as e:
         log.error('Failed to to set passwordMaxFailure: ' + e.message['desc'])
@@ -99,7 +52,7 @@ def test_ticket47970(topology):
         user_name = "mark"
         pw = "secret"
         auth_tokens = ldap.sasl.digest_md5(user_name, pw)
-        topology.standalone.sasl_interactive_bind_s("", auth_tokens)
+        topology_st.standalone.sasl_interactive_bind_s("", auth_tokens)
     except ldap.INVALID_CREDENTIALS as e:
         log.info("SASL Bind failed as expected")
         failed_as_expected = True
@@ -112,9 +65,9 @@ def test_ticket47970(topology):
     # Check that passwordRetryCount was not set on the root dse entry
     #
     try:
-        entry = topology.standalone.search_s("", ldap.SCOPE_BASE,
-                                             "passwordRetryCount=*",
-                                             ['passwordRetryCount'])
+        entry = topology_st.standalone.search_s("", ldap.SCOPE_BASE,
+                                                "passwordRetryCount=*",
+                                                ['passwordRetryCount'])
     except ldap.LDAPError as e:
         log.error('Failed to search Root DSE entry: ' + e.message['desc'])
         assert False

+ 16 - 60
dirsrvtests/tests/tickets/ticket47973_test.py

@@ -6,15 +6,12 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import time
-import ldap
-import ldap.sasl
 import logging
+
+import ldap.sasl
 import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -22,47 +19,6 @@ USER_DN = 'uid=user1,%s' % (DEFAULT_SUFFIX)
 SCHEMA_RELOAD_COUNT = 10
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
 def task_complete(conn, task_dn):
     finished = False
 
@@ -81,7 +37,7 @@ def task_complete(conn, task_dn):
     return finished
 
 
-def test_ticket47973(topology):
+def test_ticket47973(topology_st):
     """
         During the schema reload task there is a small window where the new schema is not loaded
         into the asi hashtables - this results in searches not returning entries.
@@ -93,10 +49,10 @@ def test_ticket47973(topology):
     # Add a user
     #
     try:
-        topology.standalone.add_s(Entry((USER_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+        topology_st.standalone.add_s(Entry((USER_DN, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add user1: error ' + e.message['desc'])
         assert False
@@ -113,10 +69,10 @@ def test_ticket47973(topology):
 
         TASK_DN = 'cn=task-' + str(task_count) + ',cn=schema reload task, cn=tasks, cn=config'
         try:
-            topology.standalone.add_s(Entry((TASK_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'cn': 'task-' + str(task_count)
-                          })))
+            topology_st.standalone.add_s(Entry((TASK_DN, {
+                'objectclass': 'top extensibleObject'.split(),
+                'cn': 'task-' + str(task_count)
+            })))
         except ldap.LDAPError as e:
             log.error('Failed to add task entry: error ' + e.message['desc'])
             assert False
@@ -130,9 +86,9 @@ def test_ticket47973(topology):
             # Now check the user is still being returned
             #
             try:
-                entries = topology.standalone.search_s(DEFAULT_SUFFIX,
-                                                      ldap.SCOPE_SUBTREE,
-                                                      '(uid=user1)')
+                entries = topology_st.standalone.search_s(DEFAULT_SUFFIX,
+                                                          ldap.SCOPE_SUBTREE,
+                                                          '(uid=user1)')
                 if not entries or not entries[0]:
                     log.fatal('User was not returned from search!')
                     assert False
@@ -143,7 +99,7 @@ def test_ticket47973(topology):
             #
             # Check if task is complete
             #
-            if task_complete(topology.standalone, TASK_DN):
+            if task_complete(topology_st.standalone, TASK_DN):
                 break
 
             search_count += 1

+ 75 - 122
dirsrvtests/tests/tickets/ticket47976_test.py

@@ -1,162 +1,115 @@
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
-PEOPLE_OU='people'
+PEOPLE_OU = 'people'
 PEOPLE_DN = "ou=%s,%s" % (PEOPLE_OU, SUFFIX)
-GROUPS_OU='groups'
+GROUPS_OU = 'groups'
 GROUPS_DN = "ou=%s,%s" % (GROUPS_OU, SUFFIX)
-DEFINITIONS_CN='definitions'
+DEFINITIONS_CN = 'definitions'
 DEFINITIONS_DN = "cn=%s,%s" % (DEFINITIONS_CN, SUFFIX)
-TEMPLATES_CN='templates'
+TEMPLATES_CN = 'templates'
 TEMPLATES_DN = "cn=%s,%s" % (TEMPLATES_CN, SUFFIX)
-MANAGED_GROUP_TEMPLATES_CN='managed group templates'
-MANAGED_GROUP_TEMPLATES_DN='cn=%s,%s' % (MANAGED_GROUP_TEMPLATES_CN, TEMPLATES_DN)
-MANAGED_GROUP_MEP_TMPL_CN='UPG'
-MANAGED_GROUP_MEP_TMPL_DN='cn=%s,%s' % (MANAGED_GROUP_MEP_TMPL_CN, MANAGED_GROUP_TEMPLATES_DN)
-MANAGED_GROUP_DEF_CN='managed group definition'
-MANAGED_GROUP_DEF_DN='cn=%s,%s' % (MANAGED_GROUP_DEF_CN, DEFINITIONS_DN)
+MANAGED_GROUP_TEMPLATES_CN = 'managed group templates'
+MANAGED_GROUP_TEMPLATES_DN = 'cn=%s,%s' % (MANAGED_GROUP_TEMPLATES_CN, TEMPLATES_DN)
+MANAGED_GROUP_MEP_TMPL_CN = 'UPG'
+MANAGED_GROUP_MEP_TMPL_DN = 'cn=%s,%s' % (MANAGED_GROUP_MEP_TMPL_CN, MANAGED_GROUP_TEMPLATES_DN)
+MANAGED_GROUP_DEF_CN = 'managed group definition'
+MANAGED_GROUP_DEF_DN = 'cn=%s,%s' % (MANAGED_GROUP_DEF_CN, DEFINITIONS_DN)
 
-MAX_ACCOUNTS=2
+MAX_ACCOUNTS = 2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
 
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Delete each instance in the end
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47976_init(topology):
+def test_ticket47976_init(topology_st):
     """Create mep definitions and templates"""
 
     try:
-        topology.standalone.add_s(Entry((PEOPLE_DN, {
-                                            'objectclass': "top extensibleObject".split(),
-                                            'ou': 'people'})))
+        topology_st.standalone.add_s(Entry((PEOPLE_DN, {
+            'objectclass': "top extensibleObject".split(),
+            'ou': 'people'})))
     except ldap.ALREADY_EXISTS:
         pass
     try:
-        topology.standalone.add_s(Entry((GROUPS_DN, {
-                                            'objectclass': "top extensibleObject".split(),
-                                            'ou': GROUPS_OU})))
+        topology_st.standalone.add_s(Entry((GROUPS_DN, {
+            'objectclass': "top extensibleObject".split(),
+            'ou': GROUPS_OU})))
     except ldap.ALREADY_EXISTS:
         pass
-    topology.standalone.add_s(Entry((DEFINITIONS_DN, {
-                                            'objectclass': "top nsContainer".split(),
-                                            'cn': DEFINITIONS_CN})))
-    topology.standalone.add_s(Entry((TEMPLATES_DN, {
-                                            'objectclass': "top nsContainer".split(),
-                                            'cn': TEMPLATES_CN})))
-    topology.standalone.add_s(Entry((MANAGED_GROUP_DEF_DN, {
-                                        'objectclass': "top extensibleObject".split(),
-                                        'cn': MANAGED_GROUP_DEF_CN,
-                                        'originScope': PEOPLE_DN,
-                                        'originFilter': '(objectclass=posixAccount)',
-                                        'managedBase': GROUPS_DN,
-                                        'managedTemplate': MANAGED_GROUP_MEP_TMPL_DN})))
-
-    topology.standalone.add_s(Entry((MANAGED_GROUP_TEMPLATES_DN, {
-                                            'objectclass': "top nsContainer".split(),
-                                            'cn': MANAGED_GROUP_TEMPLATES_CN})))
-
-    topology.standalone.add_s(Entry((MANAGED_GROUP_MEP_TMPL_DN, {
-                                            'objectclass': "top mepTemplateEntry".split(),
-                                            'cn': MANAGED_GROUP_MEP_TMPL_CN,
-                                            'mepRDNAttr': 'cn',
-                                            'mepStaticAttr': ['objectclass: posixGroup',
-                                                              'objectclass: extensibleObject'],
-                                            'mepMappedAttr': ['cn: $cn|uid: $cn',
-                                                              'gidNumber: $uidNumber']})))
-
-
-    topology.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY)
-    topology.standalone.restart(timeout=10)
-
-
-def test_ticket47976_1(topology):
+    topology_st.standalone.add_s(Entry((DEFINITIONS_DN, {
+        'objectclass': "top nsContainer".split(),
+        'cn': DEFINITIONS_CN})))
+    topology_st.standalone.add_s(Entry((TEMPLATES_DN, {
+        'objectclass': "top nsContainer".split(),
+        'cn': TEMPLATES_CN})))
+    topology_st.standalone.add_s(Entry((MANAGED_GROUP_DEF_DN, {
+        'objectclass': "top extensibleObject".split(),
+        'cn': MANAGED_GROUP_DEF_CN,
+        'originScope': PEOPLE_DN,
+        'originFilter': '(objectclass=posixAccount)',
+        'managedBase': GROUPS_DN,
+        'managedTemplate': MANAGED_GROUP_MEP_TMPL_DN})))
+
+    topology_st.standalone.add_s(Entry((MANAGED_GROUP_TEMPLATES_DN, {
+        'objectclass': "top nsContainer".split(),
+        'cn': MANAGED_GROUP_TEMPLATES_CN})))
+
+    topology_st.standalone.add_s(Entry((MANAGED_GROUP_MEP_TMPL_DN, {
+        'objectclass': "top mepTemplateEntry".split(),
+        'cn': MANAGED_GROUP_MEP_TMPL_CN,
+        'mepRDNAttr': 'cn',
+        'mepStaticAttr': ['objectclass: posixGroup',
+                          'objectclass: extensibleObject'],
+        'mepMappedAttr': ['cn: $cn|uid: $cn',
+                          'gidNumber: $uidNumber']})))
+
+    topology_st.standalone.plugins.enable(name=PLUGIN_MANAGED_ENTRY)
+    topology_st.standalone.restart(timeout=10)
+
+
+def test_ticket47976_1(topology_st):
     mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginConfigArea', DEFINITIONS_DN)]
-    topology.standalone.modify_s('cn=%s,cn=plugins,cn=config' % PLUGIN_MANAGED_ENTRY, mod)
-    topology.standalone.stop(timeout=10)
-    topology.standalone.start(timeout=10)
+    topology_st.standalone.modify_s('cn=%s,cn=plugins,cn=config' % PLUGIN_MANAGED_ENTRY, mod)
+    topology_st.standalone.stop(timeout=10)
+    topology_st.standalone.start(timeout=10)
     for cpt in range(MAX_ACCOUNTS):
         name = "user%d" % (cpt)
-        topology.standalone.add_s(Entry(("uid=%s,%s" %(name, PEOPLE_DN), {
-                          'objectclass': 'top posixAccount extensibleObject'.split(),
-                          'uid': name,
-                          'cn': name,
-                          'uidNumber': '1',
-                          'gidNumber': '1',
-                          'homeDirectory': '/home/%s' % name
-                          })))
+        topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, PEOPLE_DN), {
+            'objectclass': 'top posixAccount extensibleObject'.split(),
+            'uid': name,
+            'cn': name,
+            'uidNumber': '1',
+            'gidNumber': '1',
+            'homeDirectory': '/home/%s' % name
+        })))
 
 
-def test_ticket47976_2(topology):
+def test_ticket47976_2(topology_st):
     """It reimports the database with a very large page size
     so all the entries (user and its private group).
     """
 
     log.info('Test complete')
-    mod = [(ldap.MOD_REPLACE, 'nsslapd-db-page-size', str(128*1024))]
-    topology.standalone.modify_s(DN_LDBM, mod)
+    mod = [(ldap.MOD_REPLACE, 'nsslapd-db-page-size', str(128 * 1024))]
+    topology_st.standalone.modify_s(DN_LDBM, mod)
 
     # Get the the full path and name for our LDIF we will be exporting
     log.info('Export LDIF file...')
-    ldif_dir = topology.standalone.get_ldif_dir()
+    ldif_dir = topology_st.standalone.get_ldif_dir()
     ldif_file = ldif_dir + "/export.ldif"
     args = {EXPORT_REPL_INFO: False,
             TASK_WAIT: True}
-    exportTask = Tasks(topology.standalone)
+    exportTask = Tasks(topology_st.standalone)
     try:
         exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
     except ValueError:
         assert False
     # import the new ldif file
     log.info('Import LDIF file...')
-    importTask = Tasks(topology.standalone)
+    importTask = Tasks(topology_st.standalone)
     args = {TASK_WAIT: True}
     try:
         importTask.importLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
@@ -166,19 +119,19 @@ def test_ticket47976_2(topology):
         assert False
 
 
-def test_ticket47976_3(topology):
+def test_ticket47976_3(topology_st):
     """A single delete of a user should hit 47976, because mep post op will
     delete its related group.
     """
 
     log.info('Testing if the delete will hang or not')
-    #log.info("\n\nAttach\n\n debugger")
-    #time.sleep(60)
-    topology.standalone.set_option(ldap.OPT_TIMEOUT, 5)
+    # log.info("\n\nAttach\n\n debugger")
+    # time.sleep(60)
+    topology_st.standalone.set_option(ldap.OPT_TIMEOUT, 5)
     try:
         for cpt in range(MAX_ACCOUNTS):
             name = "user%d" % (cpt)
-            topology.standalone.delete_s("uid=%s,%s" %(name, PEOPLE_DN))
+            topology_st.standalone.delete_s("uid=%s,%s" % (name, PEOPLE_DN))
     except ldap.TIMEOUT as e:
         log.fatal('Timeout... likely it hangs (47976)')
         assert False
@@ -187,13 +140,13 @@ def test_ticket47976_3(topology):
     for cpt in range(MAX_ACCOUNTS):
         try:
             name = "user%d" % (cpt)
-            topology.standalone.getEntry("uid=%s,%s" %(name, PEOPLE_DN), ldap.SCOPE_BASE, 'objectclass=*')
+            topology_st.standalone.getEntry("uid=%s,%s" % (name, PEOPLE_DN), ldap.SCOPE_BASE, 'objectclass=*')
             assert False
         except ldap.NO_SUCH_OBJECT:
             log.info('%s was correctly deleted' % name)
             pass
 
-    assert cpt == (MAX_ACCOUNTS -1)
+    assert cpt == (MAX_ACCOUNTS - 1)
 
 
 if __name__ == '__main__':

+ 233 - 280
dirsrvtests/tests/tickets/ticket47980_test.py

@@ -6,18 +6,12 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import ldap.sasl
 import logging
+
+import ldap.sasl
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -36,89 +30,48 @@ USER6_DN = 'uid=user6,%s' % (BRANCH6)
 
 BRANCH1_CONTAINER = 'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com'
 BRANCH1_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-                  'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com'
+              'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com'
 BRANCH1_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-                  'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com'
+                   'cn=nsPwPolicyContainer,ou=level1,dc=example,dc=com'
 BRANCH1_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level1,dc=example,dc=com'
 
 BRANCH2_CONTAINER = 'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com'
 BRANCH2_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-             'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com'
+              'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com'
 BRANCH2_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-                  'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com'
+                   'cn=nsPwPolicyContainer,ou=level2,ou=level1,dc=example,dc=com'
 BRANCH2_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level2,ou=level1,dc=example,dc=com'
 
 BRANCH3_CONTAINER = 'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
 BRANCH3_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-             'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
+              'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
 BRANCH3_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlevel3\2Cou\3Dlevel2\2Cou\3Dlevel1\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-                  'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
+                   'cn=nsPwPolicyContainer,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
 BRANCH3_COS_DEF = 'cn=nsPwPolicy_CoS,ou=level3,ou=level2,ou=level1,dc=example,dc=com'
 
 BRANCH4_CONTAINER = 'cn=nsPwPolicyContainer,ou=people,dc=example,dc=com'
 BRANCH4_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-             'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+              'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
 BRANCH4_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-                  'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
+                   'cn=nsPwPolicyContainer,ou=People,dc=example,dc=com'
 BRANCH4_COS_DEF = 'cn=nsPwPolicy_CoS,ou=people,dc=example,dc=com'
 
 BRANCH5_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=people,dc=example,dc=com'
 BRANCH5_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-             'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com'
+              'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com'
 BRANCH5_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-                  'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com'
+                   'cn=nsPwPolicyContainer,ou=lower,ou=People,dc=example,dc=com'
 BRANCH5_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=People,dc=example,dc=com'
 
 BRANCH6_CONTAINER = 'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com'
 BRANCH6_PWP = 'cn=cn\3DnsPwPolicyEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-             'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com'
+              'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com'
 BRANCH6_COS_TMPL = 'cn=cn\3DnsPwTemplateEntry\2Cou\3Dlower\2Cou\3Dlower\2Cou\3DPeople\2Cdc\3Dexample\2Cdc\3Dcom,' + \
-                  'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com'
+                   'cn=nsPwPolicyContainer,ou=lower,ou=lower,ou=People,dc=example,dc=com'
 BRANCH6_COS_DEF = 'cn=nsPwPolicy_CoS,ou=lower,ou=lower,ou=People,dc=example,dc=com'
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket47980(topology):
+def test_ticket47980(topology_st):
     """
         Multiple COS pointer definitions that use the same attribute are not correctly ordered.
         The cos plugin was incorrectly sorting the attribute indexes based on subtree, which lead
@@ -129,38 +82,38 @@ def test_ticket47980(topology):
 
     # Add our nested branches
     try:
-        topology.standalone.add_s(Entry((BRANCH1, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'ou': 'level1'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH1, {
+            'objectclass': 'top extensibleObject'.split(),
+            'ou': 'level1'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add level1: error ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.add_s(Entry((BRANCH2, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'ou': 'level2'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH2, {
+            'objectclass': 'top extensibleObject'.split(),
+            'ou': 'level2'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add level2: error ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.add_s(Entry((BRANCH3, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'level3'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH3, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'level3'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add level3: error ' + e.message['desc'])
         assert False
 
     # People branch, might already exist
     try:
-        topology.standalone.add_s(Entry((BRANCH4, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'ou': 'level4'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH4, {
+            'objectclass': 'top extensibleObject'.split(),
+            'ou': 'level4'
+        })))
     except ldap.ALREADY_EXISTS:
         pass
     except ldap.LDAPError as e:
@@ -168,81 +121,81 @@ def test_ticket47980(topology):
         assert False
 
     try:
-        topology.standalone.add_s(Entry((BRANCH5, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'ou': 'level5'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH5, {
+            'objectclass': 'top extensibleObject'.split(),
+            'ou': 'level5'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add level5: error ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.add_s(Entry((BRANCH6, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'level6'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH6, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'level6'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add level6: error ' + e.message['desc'])
         assert False
 
     # Add users to each branch
     try:
-        topology.standalone.add_s(Entry((USER1_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+        topology_st.standalone.add_s(Entry((USER1_DN, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add user1: error ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.add_s(Entry((USER2_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user2'
-                          })))
+        topology_st.standalone.add_s(Entry((USER2_DN, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user2'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add user2: error ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.add_s(Entry((USER3_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user3'
-                          })))
+        topology_st.standalone.add_s(Entry((USER3_DN, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user3'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add user3: error ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.add_s(Entry((USER4_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user4'
-                          })))
+        topology_st.standalone.add_s(Entry((USER4_DN, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user4'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add user4: error ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.add_s(Entry((USER5_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user5'
-                          })))
+        topology_st.standalone.add_s(Entry((USER5_DN, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user5'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add user5: error ' + e.message['desc'])
         assert False
 
     try:
-        topology.standalone.add_s(Entry((USER6_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user6'
-                          })))
+        topology_st.standalone.add_s(Entry((USER6_DN, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user6'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add user6: error ' + e.message['desc'])
         assert False
 
     # Enable password policy
     try:
-        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+        topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
     except ldap.LDAPError as e:
         log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
         assert False
@@ -252,51 +205,51 @@ def test_ticket47980(topology):
     #
     # Add the container
     try:
-        topology.standalone.add_s(Entry((BRANCH1_CONTAINER, {
-                          'objectclass': 'top nsContainer'.split(),
-                          'cn': 'nsPwPolicyContainer'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH1_CONTAINER, {
+            'objectclass': 'top nsContainer'.split(),
+            'cn': 'nsPwPolicyContainer'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add subtree container for level1: error ' + e.message['desc'])
         assert False
 
     # Add the password policy subentry
     try:
-        topology.standalone.add_s(Entry((BRANCH1_PWP, {
-                          'objectclass': 'top ldapsubentry passwordpolicy'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
-                          'passwordMustChange': 'off',
-                          'passwordExp': 'off',
-                          'passwordHistory': 'off',
-                          'passwordMinAge': '0',
-                          'passwordChange': 'off',
-                          'passwordStorageScheme': 'ssha'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH1_PWP, {
+            'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
+            'passwordMustChange': 'off',
+            'passwordExp': 'off',
+            'passwordHistory': 'off',
+            'passwordMinAge': '0',
+            'passwordChange': 'off',
+            'passwordStorageScheme': 'ssha'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add passwordpolicy for level1: error ' + e.message['desc'])
         assert False
 
     # Add the COS template
     try:
-        topology.standalone.add_s(Entry((BRANCH1_COS_TMPL, {
-                          'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
-                          'cosPriority': '1',
-                          'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com',
-                          'pwdpolicysubentry': BRANCH1_PWP
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH1_COS_TMPL, {
+            'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
+            'cosPriority': '1',
+            'cn': 'cn=nsPwTemplateEntry,ou=level1,dc=example,dc=com',
+            'pwdpolicysubentry': BRANCH1_PWP
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS template for level1: error ' + e.message['desc'])
         assert False
 
     # Add the COS definition
     try:
-        topology.standalone.add_s(Entry((BRANCH1_COS_DEF, {
-                          'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
-                          'costemplatedn': BRANCH1_COS_TMPL,
-                          'cosAttribute': 'pwdpolicysubentry default operational-default'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH1_COS_DEF, {
+            'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=level1,dc=example,dc=com',
+            'costemplatedn': BRANCH1_COS_TMPL,
+            'cosAttribute': 'pwdpolicysubentry default operational-default'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS def for level1: error ' + e.message['desc'])
         assert False
@@ -306,51 +259,51 @@ def test_ticket47980(topology):
     #
     # Add the container
     try:
-        topology.standalone.add_s(Entry((BRANCH2_CONTAINER, {
-                          'objectclass': 'top nsContainer'.split(),
-                          'cn': 'nsPwPolicyContainer'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH2_CONTAINER, {
+            'objectclass': 'top nsContainer'.split(),
+            'cn': 'nsPwPolicyContainer'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add subtree container for level2: error ' + e.message['desc'])
         assert False
 
     # Add the password policy subentry
     try:
-        topology.standalone.add_s(Entry((BRANCH2_PWP, {
-                          'objectclass': 'top ldapsubentry passwordpolicy'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
-                          'passwordMustChange': 'off',
-                          'passwordExp': 'off',
-                          'passwordHistory': 'off',
-                          'passwordMinAge': '0',
-                          'passwordChange': 'off',
-                          'passwordStorageScheme': 'ssha'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH2_PWP, {
+            'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
+            'passwordMustChange': 'off',
+            'passwordExp': 'off',
+            'passwordHistory': 'off',
+            'passwordMinAge': '0',
+            'passwordChange': 'off',
+            'passwordStorageScheme': 'ssha'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add passwordpolicy for level2: error ' + e.message['desc'])
         assert False
 
     # Add the COS template
     try:
-        topology.standalone.add_s(Entry((BRANCH2_COS_TMPL, {
-                          'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
-                          'cosPriority': '1',
-                          'cn': 'cn=nsPwTemplateEntry,ou=level2,dc=example,dc=com',
-                          'pwdpolicysubentry': BRANCH2_PWP
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH2_COS_TMPL, {
+            'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
+            'cosPriority': '1',
+            'cn': 'cn=nsPwTemplateEntry,ou=level2,dc=example,dc=com',
+            'pwdpolicysubentry': BRANCH2_PWP
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS template for level2: error ' + e.message['desc'])
         assert False
 
     # Add the COS definition
     try:
-        topology.standalone.add_s(Entry((BRANCH2_COS_DEF, {
-                          'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
-                          'costemplatedn': BRANCH2_COS_TMPL,
-                          'cosAttribute': 'pwdpolicysubentry default operational-default'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH2_COS_DEF, {
+            'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=level2,dc=example,dc=com',
+            'costemplatedn': BRANCH2_COS_TMPL,
+            'cosAttribute': 'pwdpolicysubentry default operational-default'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS def for level2: error ' + e.message['desc'])
         assert False
@@ -360,51 +313,51 @@ def test_ticket47980(topology):
     #
     # Add the container
     try:
-        topology.standalone.add_s(Entry((BRANCH3_CONTAINER, {
-                          'objectclass': 'top nsContainer'.split(),
-                          'cn': 'nsPwPolicyContainer'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH3_CONTAINER, {
+            'objectclass': 'top nsContainer'.split(),
+            'cn': 'nsPwPolicyContainer'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add subtree container for level3: error ' + e.message['desc'])
         assert False
 
     # Add the password policy subentry
     try:
-        topology.standalone.add_s(Entry((BRANCH3_PWP, {
-                          'objectclass': 'top ldapsubentry passwordpolicy'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
-                          'passwordMustChange': 'off',
-                          'passwordExp': 'off',
-                          'passwordHistory': 'off',
-                          'passwordMinAge': '0',
-                          'passwordChange': 'off',
-                          'passwordStorageScheme': 'ssha'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH3_PWP, {
+            'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
+            'passwordMustChange': 'off',
+            'passwordExp': 'off',
+            'passwordHistory': 'off',
+            'passwordMinAge': '0',
+            'passwordChange': 'off',
+            'passwordStorageScheme': 'ssha'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add passwordpolicy for level3: error ' + e.message['desc'])
         assert False
 
     # Add the COS template
     try:
-        topology.standalone.add_s(Entry((BRANCH3_COS_TMPL, {
-                          'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
-                          'cosPriority': '1',
-                          'cn': 'cn=nsPwTemplateEntry,ou=level3,dc=example,dc=com',
-                          'pwdpolicysubentry': BRANCH3_PWP
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH3_COS_TMPL, {
+            'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
+            'cosPriority': '1',
+            'cn': 'cn=nsPwTemplateEntry,ou=level3,dc=example,dc=com',
+            'pwdpolicysubentry': BRANCH3_PWP
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS template for level3: error ' + e.message['desc'])
         assert False
 
     # Add the COS definition
     try:
-        topology.standalone.add_s(Entry((BRANCH3_COS_DEF, {
-                          'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
-                          'costemplatedn': BRANCH3_COS_TMPL,
-                          'cosAttribute': 'pwdpolicysubentry default operational-default'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH3_COS_DEF, {
+            'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
+            'costemplatedn': BRANCH3_COS_TMPL,
+            'cosAttribute': 'pwdpolicysubentry default operational-default'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS def for level3: error ' + e.message['desc'])
         assert False
@@ -414,51 +367,51 @@ def test_ticket47980(topology):
     #
     # Add the container
     try:
-        topology.standalone.add_s(Entry((BRANCH4_CONTAINER, {
-                          'objectclass': 'top nsContainer'.split(),
-                          'cn': 'nsPwPolicyContainer'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH4_CONTAINER, {
+            'objectclass': 'top nsContainer'.split(),
+            'cn': 'nsPwPolicyContainer'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add subtree container for level3: error ' + e.message['desc'])
         assert False
 
     # Add the password policy subentry
     try:
-        topology.standalone.add_s(Entry((BRANCH4_PWP, {
-                          'objectclass': 'top ldapsubentry passwordpolicy'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
-                          'passwordMustChange': 'off',
-                          'passwordExp': 'off',
-                          'passwordHistory': 'off',
-                          'passwordMinAge': '0',
-                          'passwordChange': 'off',
-                          'passwordStorageScheme': 'ssha'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH4_PWP, {
+            'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+            'passwordMustChange': 'off',
+            'passwordExp': 'off',
+            'passwordHistory': 'off',
+            'passwordMinAge': '0',
+            'passwordChange': 'off',
+            'passwordStorageScheme': 'ssha'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add passwordpolicy for branch4: error ' + e.message['desc'])
         assert False
 
     # Add the COS template
     try:
-        topology.standalone.add_s(Entry((BRANCH4_COS_TMPL, {
-                          'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
-                          'cosPriority': '1',
-                          'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
-                          'pwdpolicysubentry': BRANCH4_PWP
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH4_COS_TMPL, {
+            'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+            'cosPriority': '1',
+            'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
+            'pwdpolicysubentry': BRANCH4_PWP
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS template for level3: error ' + e.message['desc'])
         assert False
 
     # Add the COS definition
     try:
-        topology.standalone.add_s(Entry((BRANCH4_COS_DEF, {
-                          'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
-                          'costemplatedn': BRANCH4_COS_TMPL,
-                          'cosAttribute': 'pwdpolicysubentry default operational-default'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH4_COS_DEF, {
+            'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+            'costemplatedn': BRANCH4_COS_TMPL,
+            'cosAttribute': 'pwdpolicysubentry default operational-default'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS def for branch4: error ' + e.message['desc'])
         assert False
@@ -468,51 +421,51 @@ def test_ticket47980(topology):
     #
     # Add the container
     try:
-        topology.standalone.add_s(Entry((BRANCH5_CONTAINER, {
-                          'objectclass': 'top nsContainer'.split(),
-                          'cn': 'nsPwPolicyContainer'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH5_CONTAINER, {
+            'objectclass': 'top nsContainer'.split(),
+            'cn': 'nsPwPolicyContainer'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add subtree container for branch5: error ' + e.message['desc'])
         assert False
 
     # Add the password policy subentry
     try:
-        topology.standalone.add_s(Entry((BRANCH5_PWP, {
-                          'objectclass': 'top ldapsubentry passwordpolicy'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
-                          'passwordMustChange': 'off',
-                          'passwordExp': 'off',
-                          'passwordHistory': 'off',
-                          'passwordMinAge': '0',
-                          'passwordChange': 'off',
-                          'passwordStorageScheme': 'ssha'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH5_PWP, {
+            'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
+            'passwordMustChange': 'off',
+            'passwordExp': 'off',
+            'passwordHistory': 'off',
+            'passwordMinAge': '0',
+            'passwordChange': 'off',
+            'passwordStorageScheme': 'ssha'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add passwordpolicy for branch5: error ' + e.message['desc'])
         assert False
 
     # Add the COS template
     try:
-        topology.standalone.add_s(Entry((BRANCH5_COS_TMPL, {
-                          'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
-                          'cosPriority': '1',
-                          'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=people,dc=example,dc=com',
-                          'pwdpolicysubentry': BRANCH5_PWP
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH5_COS_TMPL, {
+            'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
+            'cosPriority': '1',
+            'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=people,dc=example,dc=com',
+            'pwdpolicysubentry': BRANCH5_PWP
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS template for branch5: error ' + e.message['desc'])
         assert False
 
     # Add the COS definition
     try:
-        topology.standalone.add_s(Entry((BRANCH5_COS_DEF, {
-                          'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
-                          'costemplatedn': BRANCH5_COS_TMPL,
-                          'cosAttribute': 'pwdpolicysubentry default operational-default'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH5_COS_DEF, {
+            'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=people,dc=example,dc=com',
+            'costemplatedn': BRANCH5_COS_TMPL,
+            'cosAttribute': 'pwdpolicysubentry default operational-default'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS def for level3: error ' + e.message['desc'])
         assert False
@@ -522,51 +475,51 @@ def test_ticket47980(topology):
     #
     # Add the container
     try:
-        topology.standalone.add_s(Entry((BRANCH6_CONTAINER, {
-                          'objectclass': 'top nsContainer'.split(),
-                          'cn': 'nsPwPolicyContainer'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH6_CONTAINER, {
+            'objectclass': 'top nsContainer'.split(),
+            'cn': 'nsPwPolicyContainer'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add subtree container for branch6: error ' + e.message['desc'])
         assert False
 
     # Add the password policy subentry
     try:
-        topology.standalone.add_s(Entry((BRANCH6_PWP, {
-                          'objectclass': 'top ldapsubentry passwordpolicy'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
-                          'passwordMustChange': 'off',
-                          'passwordExp': 'off',
-                          'passwordHistory': 'off',
-                          'passwordMinAge': '0',
-                          'passwordChange': 'off',
-                          'passwordStorageScheme': 'ssha'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH6_PWP, {
+            'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=level3,dc=example,dc=com',
+            'passwordMustChange': 'off',
+            'passwordExp': 'off',
+            'passwordHistory': 'off',
+            'passwordMinAge': '0',
+            'passwordChange': 'off',
+            'passwordStorageScheme': 'ssha'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add passwordpolicy for branch6: error ' + e.message['desc'])
         assert False
 
     # Add the COS template
     try:
-        topology.standalone.add_s(Entry((BRANCH6_COS_TMPL, {
-                          'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
-                          'cosPriority': '1',
-                          'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
-                          'pwdpolicysubentry': BRANCH6_PWP
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH6_COS_TMPL, {
+            'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
+            'cosPriority': '1',
+            'cn': 'cn=nsPwTemplateEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
+            'pwdpolicysubentry': BRANCH6_PWP
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS template for branch6: error ' + e.message['desc'])
         assert False
 
     # Add the COS definition
     try:
-        topology.standalone.add_s(Entry((BRANCH6_COS_DEF, {
-                          'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
-                          'costemplatedn': BRANCH6_COS_TMPL,
-                          'cosAttribute': 'pwdpolicysubentry default operational-default'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH6_COS_DEF, {
+            'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=lower,ou=lower,ou=people,dc=example,dc=com',
+            'costemplatedn': BRANCH6_COS_TMPL,
+            'cosAttribute': 'pwdpolicysubentry default operational-default'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS def for branch6: error ' + e.message['desc'])
         assert False
@@ -577,7 +530,7 @@ def test_ticket47980(topology):
     # Now check that each user has its expected passwordPolicy subentry
     #
     try:
-        entries = topology.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+        entries = topology_st.standalone.search_s(USER1_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
         if not entries[0].hasValue('pwdpolicysubentry', BRANCH1_PWP):
             log.fatal('User %s does not have expected pwdpolicysubentry!')
             assert False
@@ -586,7 +539,7 @@ def test_ticket47980(topology):
         assert False
 
     try:
-        entries = topology.standalone.search_s(USER2_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+        entries = topology_st.standalone.search_s(USER2_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
         if not entries[0].hasValue('pwdpolicysubentry', BRANCH2_PWP):
             log.fatal('User %s does not have expected pwdpolicysubentry!' % USER2_DN)
             assert False
@@ -595,7 +548,7 @@ def test_ticket47980(topology):
         assert False
 
     try:
-        entries = topology.standalone.search_s(USER3_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+        entries = topology_st.standalone.search_s(USER3_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
         if not entries[0].hasValue('pwdpolicysubentry', BRANCH3_PWP):
             log.fatal('User %s does not have expected pwdpolicysubentry!' % USER3_DN)
             assert False
@@ -604,7 +557,7 @@ def test_ticket47980(topology):
         assert False
 
     try:
-        entries = topology.standalone.search_s(USER4_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+        entries = topology_st.standalone.search_s(USER4_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
         if not entries[0].hasValue('pwdpolicysubentry', BRANCH4_PWP):
             log.fatal('User %s does not have expected pwdpolicysubentry!' % USER4_DN)
             assert False
@@ -613,7 +566,7 @@ def test_ticket47980(topology):
         assert False
 
     try:
-        entries = topology.standalone.search_s(USER5_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+        entries = topology_st.standalone.search_s(USER5_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
         if not entries[0].hasValue('pwdpolicysubentry', BRANCH5_PWP):
             log.fatal('User %s does not have expected pwdpolicysubentry!' % USER5_DN)
             assert False
@@ -622,7 +575,7 @@ def test_ticket47980(topology):
         assert False
 
     try:
-        entries = topology.standalone.search_s(USER6_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+        entries = topology_st.standalone.search_s(USER6_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
         if not entries[0].hasValue('pwdpolicysubentry', BRANCH6_PWP):
             log.fatal('User %s does not have expected pwdpolicysubentry!' % USER6_DN)
             assert False

+ 50 - 97
dirsrvtests/tests/tickets/ticket47981_test.py

@@ -6,18 +6,12 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import ldap.sasl
 import logging
+
+import ldap.sasl
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -33,56 +27,15 @@ SECOND_SUFFIX = 'o=netscaperoot'
 BE_NAME = 'netscaperoot'
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
 def addSubtreePwPolicy(inst):
     #
     # Add subtree policy to the people branch
     #
     try:
         inst.add_s(Entry((BRANCH_CONTAINER, {
-                          'objectclass': 'top nsContainer'.split(),
-                          'cn': 'nsPwPolicyContainer'
-                          })))
+            'objectclass': 'top nsContainer'.split(),
+            'cn': 'nsPwPolicyContainer'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add subtree container for ou=people: error ' + e.message['desc'])
         assert False
@@ -90,15 +43,15 @@ def addSubtreePwPolicy(inst):
     # Add the password policy subentry
     try:
         inst.add_s(Entry((BRANCH_PWP, {
-                          'objectclass': 'top ldapsubentry passwordpolicy'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
-                          'passwordMustChange': 'off',
-                          'passwordExp': 'off',
-                          'passwordHistory': 'off',
-                          'passwordMinAge': '0',
-                          'passwordChange': 'off',
-                          'passwordStorageScheme': 'ssha'
-                          })))
+            'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+            'passwordMustChange': 'off',
+            'passwordExp': 'off',
+            'passwordHistory': 'off',
+            'passwordMinAge': '0',
+            'passwordChange': 'off',
+            'passwordStorageScheme': 'ssha'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add passwordpolicy: error ' + e.message['desc'])
         assert False
@@ -106,12 +59,12 @@ def addSubtreePwPolicy(inst):
     # Add the COS template
     try:
         inst.add_s(Entry((BRANCH_COS_TMPL, {
-                          'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
-                          'cosPriority': '1',
-                          'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
-                          'pwdpolicysubentry': BRANCH_PWP
-                          })))
+            'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+            'cosPriority': '1',
+            'cn': 'cn=nsPwTemplateEntry,ou=people,dc=example,dc=com',
+            'pwdpolicysubentry': BRANCH_PWP
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS template: error ' + e.message['desc'])
         assert False
@@ -119,11 +72,11 @@ def addSubtreePwPolicy(inst):
     # Add the COS definition
     try:
         inst.add_s(Entry((BRANCH_COS_DEF, {
-                          'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
-                          'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
-                          'costemplatedn': BRANCH_COS_TMPL,
-                          'cosAttribute': 'pwdpolicysubentry default operational-default'
-                          })))
+            'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+            'cn': 'cn=nsPwPolicyEntry,ou=people,dc=example,dc=com',
+            'costemplatedn': BRANCH_COS_TMPL,
+            'cosAttribute': 'pwdpolicysubentry default operational-default'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add COS def: error ' + e.message['desc'])
         assert False
@@ -157,7 +110,7 @@ def delSubtreePwPolicy(inst):
     time.sleep(0.5)
 
 
-def test_ticket47981(topology):
+def test_ticket47981(topology_st):
     """
         If there are multiple suffixes, and the last suffix checked does not contain any COS entries,
         while other suffixes do, then the vattr cache is not invalidated as it should be.  Then any
@@ -171,12 +124,12 @@ def test_ticket47981(topology):
     #
     log.info('Adding second suffix that will not contain any COS entries...\n')
 
-    topology.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME})
-    topology.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME)
+    topology_st.standalone.backend.create(SECOND_SUFFIX, {BACKEND_NAME: BE_NAME})
+    topology_st.standalone.mappingtree.create(SECOND_SUFFIX, bename=BE_NAME)
     try:
-        topology.standalone.add_s(Entry((SECOND_SUFFIX, {
-                          'objectclass': 'top organization'.split(),
-                          'o': BE_NAME})))
+        topology_st.standalone.add_s(Entry((SECOND_SUFFIX, {
+            'objectclass': 'top organization'.split(),
+            'o': BE_NAME})))
     except ldap.ALREADY_EXISTS:
         pass
     except ldap.LDAPError as e:
@@ -189,10 +142,10 @@ def test_ticket47981(topology):
     log.info('Add our test entries to the default suffix, and proceed with the test...')
 
     try:
-        topology.standalone.add_s(Entry((BRANCH, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'ou': 'level4'
-                          })))
+        topology_st.standalone.add_s(Entry((BRANCH, {
+            'objectclass': 'top extensibleObject'.split(),
+            'ou': 'level4'
+        })))
     except ldap.ALREADY_EXISTS:
         pass
     except ldap.LDAPError as e:
@@ -203,10 +156,10 @@ def test_ticket47981(topology):
     # Add a user to the branch
     #
     try:
-        topology.standalone.add_s(Entry((USER_DN, {
-                          'objectclass': 'top extensibleObject'.split(),
-                          'uid': 'user1'
-                          })))
+        topology_st.standalone.add_s(Entry((USER_DN, {
+            'objectclass': 'top extensibleObject'.split(),
+            'uid': 'user1'
+        })))
     except ldap.LDAPError as e:
         log.error('Failed to add user1: error ' + e.message['desc'])
         assert False
@@ -215,21 +168,21 @@ def test_ticket47981(topology):
     # Enable password policy and add the subtree policy
     #
     try:
-        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+        topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
     except ldap.LDAPError as e:
         log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
         assert False
 
-    addSubtreePwPolicy(topology.standalone)
+    addSubtreePwPolicy(topology_st.standalone)
 
     #
     # Now check the user has its expected passwordPolicy subentry
     #
     try:
-        entries = topology.standalone.search_s(USER_DN,
-                                              ldap.SCOPE_BASE,
-                                              '(objectclass=top)',
-                                              ['pwdpolicysubentry', 'dn'])
+        entries = topology_st.standalone.search_s(USER_DN,
+                                                  ldap.SCOPE_BASE,
+                                                  '(objectclass=top)',
+                                                  ['pwdpolicysubentry', 'dn'])
         if not entries[0].hasAttr('pwdpolicysubentry'):
             log.fatal('User does not have expected pwdpolicysubentry!')
             assert False
@@ -240,9 +193,9 @@ def test_ticket47981(topology):
     #
     # Delete the password policy and make sure it is removed from the same user
     #
-    delSubtreePwPolicy(topology.standalone)
+    delSubtreePwPolicy(topology_st.standalone)
     try:
-        entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+        entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
         if entries[0].hasAttr('pwdpolicysubentry'):
             log.fatal('User unexpectedly does have the pwdpolicysubentry!')
             assert False
@@ -253,9 +206,9 @@ def test_ticket47981(topology):
     #
     # Add the subtree policvy back and see if the user now has it
     #
-    addSubtreePwPolicy(topology.standalone)
+    addSubtreePwPolicy(topology_st.standalone)
     try:
-        entries = topology.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
+        entries = topology_st.standalone.search_s(USER_DN, ldap.SCOPE_BASE, '(objectclass=top)', ['pwdpolicysubentry'])
         if not entries[0].hasAttr('pwdpolicysubentry'):
             log.fatal('User does not have expected pwdpolicysubentry!')
             assert False

+ 142 - 255
dirsrvtests/tests/tickets/ticket47988_test.py

@@ -11,51 +11,43 @@ Created on Nov 7, 2013
 
 @author: tbordaz
 '''
-import os
-import sys
-import time
-import ldap
 import logging
-import pytest
-import tarfile
-import stat
 import shutil
+import stat
+import tarfile
+import time
 from random import randint
-from lib389 import DirSrv, Entry, tools
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 
+import ldap
+import pytest
+from lib389 import Entry
+from lib389._constants import *
+from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-#
-# important part. We can deploy Master1 and Master2 on different versions
-#
-installation1_prefix = None
-installation2_prefix = None
-
 TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
 OC_NAME = 'OCticket47988'
 MUST = "(postalAddress $ postalCode)"
-MAY  = "(member $ street)"
+MAY = "(member $ street)"
 
 OTHER_NAME = 'other_entry'
 MAX_OTHERS = 10
 
-BIND_NAME  = 'bind_entry'
-BIND_DN    = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW    = 'password'
+BIND_NAME = 'bind_entry'
+BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
+BIND_PW = 'password'
 
 ENTRY_NAME = 'test_entry'
-ENTRY_DN   = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
-ENTRY_OC   = "top person %s" % OC_NAME
+ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
+ENTRY_OC = "top person %s" % OC_NAME
+
 
 def _oc_definition(oid_ext, name, must=None, may=None):
-    oid  = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
+    oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext
     desc = 'To test ticket 47490'
-    sup  = 'person'
+    sup = 'person'
     if not must:
         must = MUST
     if not may:
@@ -63,120 +55,14 @@ def _oc_definition(oid_ext, name, must=None, may=None):
 
     new_oc = "( %s  NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may)
     return new_oc
-class TopologyMaster1Master2(object):
-    def __init__(self, master1, master2):
-        master1.open()
-        self.master1 = master1
-
-        master2.open()
-        self.master2 = master2
 
 
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to create a replicated topology for the 'module'.
-        The replicated topology is MASTER1 <-> Master2.
-    '''
-    global installation1_prefix
-    global installation2_prefix
-
-    #os.environ['USE_VALGRIND'] = '1'
-
-    # allocate master1 on a given deployement
-    master1 = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Args for the master1 instance
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_master = args_instance.copy()
-    master1.allocate(args_master)
-
-    # allocate master1 on a given deployement
-    master2 = DirSrv(verbose=False)
-    if installation2_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation2_prefix
-
-    # Args for the consumer instance
-    args_instance[SER_HOST] = HOST_MASTER_2
-    args_instance[SER_PORT] = PORT_MASTER_2
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
-    args_master = args_instance.copy()
-    master2.allocate(args_master)
-
-    # Get the status of the instance and restart it if it exists
-    instance_master1 = master1.exists()
-    instance_master2 = master2.exists()
-
-    # Remove all the instances
-    if instance_master1:
-        master1.delete()
-    if instance_master2:
-        master2.delete()
-
-    # Create the instances
-    master1.create()
-    master1.open()
-    master2.create()
-    master2.open()
-
-    def fin():
-        master1.delete()
-        master2.delete()
-    request.addfinalizer(fin)
-
-    #
-    # Now prepare the Master-Consumer topology
-    #
-    # First Enable replication
-    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
-    # Initialize the supplier->consumer
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-
-    if not repl_agreement:
-        log.fatal("Fail to create a replica agreement")
-        sys.exit(1)
-
-    log.debug("%s created" % repl_agreement)
-
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-
-    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    master1.waitForReplInit(repl_agreement)
-
-    # Check replication is working fine
-    if master1.testReplication(DEFAULT_SUFFIX, master2):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    # Here we have two instances master and consumer
-    return TopologyMaster1Master2(master1, master2)
-
-
-def _header(topology, label):
-    topology.master1.log.info("\n\n###############################################")
-    topology.master1.log.info("#######")
-    topology.master1.log.info("####### %s" % label)
-    topology.master1.log.info("#######")
-    topology.master1.log.info("###################################################")
+def _header(topology_m2, label):
+    topology_m2.ms["master1"].log.info("\n\n###############################################")
+    topology_m2.ms["master1"].log.info("#######")
+    topology_m2.ms["master1"].log.info("####### %s" % label)
+    topology_m2.ms["master1"].log.info("#######")
+    topology_m2.ms["master1"].log.info("###################################################")
 
 
 def _install_schema(server, tarFile):
@@ -216,7 +102,7 @@ def _install_schema(server, tarFile):
     os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP)
 
 
-def test_ticket47988_init(topology):
+def test_ticket47988_init(topology_m2):
     """
         It adds
            - Objectclass with MAY 'member'
@@ -225,48 +111,48 @@ def test_ticket47988_init(topology):
 
     """
 
-    _header(topology, 'test_ticket47988_init')
+    _header(topology_m2, 'test_ticket47988_init')
 
     # enable acl error logging
     mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))]  # REPL
-    topology.master1.modify_s(DN_CONFIG, mod)
-    topology.master2.modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
 
     mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(260))]  # Internal op
-    topology.master1.modify_s(DN_CONFIG, mod)
-    topology.master2.modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
 
     # add dummy entries
     for cpt in range(MAX_OTHERS):
         name = "%s%d" % (OTHER_NAME, cpt)
-        topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
-                                            'objectclass': "top person".split(),
-                                            'sn': name,
-                                            'cn': name})))
+        topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+            'objectclass': "top person".split(),
+            'sn': name,
+            'cn': name})))
 
     # check that entry 0 is replicated before
     loop = 0
     entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
     while loop <= 10:
         try:
-            ent = topology.master2.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
+            ent = topology_m2.ms["master2"].getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber'])
             break
         except ldap.NO_SUCH_OBJECT:
             time.sleep(1)
         loop += 1
     assert (loop <= 10)
 
-    topology.master1.stop(timeout=10)
-    topology.master2.stop(timeout=10)
+    topology_m2.ms["master1"].stop(timeout=10)
+    topology_m2.ms["master2"].stop(timeout=10)
 
-    #install the specific schema M1: ipa3.3, M2: ipa4.1
-    schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz")
-    _install_schema(topology.master1, schema_file)
-    schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz")
-    _install_schema(topology.master2, schema_file)
+    # install the specific schema M1: ipa3.3, M2: ipa4.1
+    schema_file = os.path.join(topology_m2.ms["master1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz")
+    _install_schema(topology_m2.ms["master1"], schema_file)
+    schema_file = os.path.join(topology_m2.ms["master1"].getDir(__file__, DATA_DIR), "ticket47988/schema_ipa4.1.tar.gz")
+    _install_schema(topology_m2.ms["master2"], schema_file)
 
-    topology.master1.start(timeout=10)
-    topology.master2.start(timeout=10)
+    topology_m2.ms["master1"].start(timeout=10)
+    topology_m2.ms["master2"].start(timeout=10)
 
 
 def _do_update_schema(server, range=3999):
@@ -276,7 +162,8 @@ def _do_update_schema(server, range=3999):
     postfix = str(randint(range, range + 1000))
     OID = '2.16.840.1.113730.3.8.12.%s' % postfix
     NAME = 'thierry%s' % postfix
-    value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % (OID, NAME)
+    value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % (
+    OID, NAME)
     mod = [(ldap.MOD_ADD, 'objectclasses', value)]
     server.modify_s('cn=schema', mod)
 
@@ -286,8 +173,8 @@ def _do_update_entry(supplier=None, consumer=None, attempts=10):
     This is doing an update on M2 (IPA4.1) and checks the update has been
     propagated to M1 (IPA3.3)
     '''
-    assert(supplier)
-    assert(consumer)
+    assert (supplier)
+    assert (consumer)
     entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX)
     value = str(randint(100, 200))
     mod = [(ldap.MOD_REPLACE, 'telephonenumber', value)]
@@ -306,170 +193,170 @@ def _do_update_entry(supplier=None, consumer=None, attempts=10):
     assert (loop <= attempts)
 
 
-def _pause_M2_to_M1(topology):
-    topology.master1.log.info("\n\n######################### Pause RA M2->M1 ######################\n")
-    ents = topology.master2.agreement.list(suffix=SUFFIX)
+def _pause_M2_to_M1(topology_m2):
+    topology_m2.ms["master1"].log.info("\n\n######################### Pause RA M2->M1 ######################\n")
+    ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
-    topology.master2.agreement.pause(ents[0].dn)
+    topology_m2.ms["master2"].agreement.pause(ents[0].dn)
 
 
-def _resume_M1_to_M2(topology):
-    topology.master1.log.info("\n\n######################### resume RA M1->M2 ######################\n")
-    ents = topology.master1.agreement.list(suffix=SUFFIX)
+def _resume_M1_to_M2(topology_m2):
+    topology_m2.ms["master1"].log.info("\n\n######################### resume RA M1->M2 ######################\n")
+    ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
-    topology.master1.agreement.resume(ents[0].dn)
+    topology_m2.ms["master1"].agreement.resume(ents[0].dn)
 
 
-def _pause_M1_to_M2(topology):
-    topology.master1.log.info("\n\n######################### Pause RA M1->M2 ######################\n")
-    ents = topology.master1.agreement.list(suffix=SUFFIX)
+def _pause_M1_to_M2(topology_m2):
+    topology_m2.ms["master1"].log.info("\n\n######################### Pause RA M1->M2 ######################\n")
+    ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
-    topology.master1.agreement.pause(ents[0].dn)
+    topology_m2.ms["master1"].agreement.pause(ents[0].dn)
 
 
-def _resume_M2_to_M1(topology):
-    topology.master1.log.info("\n\n######################### resume RA M2->M1 ######################\n")
-    ents = topology.master2.agreement.list(suffix=SUFFIX)
+def _resume_M2_to_M1(topology_m2):
+    topology_m2.ms["master1"].log.info("\n\n######################### resume RA M2->M1 ######################\n")
+    ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
-    topology.master2.agreement.resume(ents[0].dn)
+    topology_m2.ms["master2"].agreement.resume(ents[0].dn)
 
 
-def test_ticket47988_1(topology):
+def test_ticket47988_1(topology_m2):
     '''
     Check that replication is working and pause replication M2->M1
     '''
-    _header(topology, 'test_ticket47988_1')
+    _header(topology_m2, 'test_ticket47988_1')
 
-    topology.master1.log.debug("\n\nCheck that replication is working and pause replication M2->M1\n")
-    _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
-    _pause_M2_to_M1(topology)
+    topology_m2.ms["master1"].log.debug("\n\nCheck that replication is working and pause replication M2->M1\n")
+    _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=5)
+    _pause_M2_to_M1(topology_m2)
 
 
-def test_ticket47988_2(topology):
+def test_ticket47988_2(topology_m2):
     '''
     Update M1 schema and trigger update M1->M2
     So M1 should learn new/extended definitions that are in M2 schema
     '''
-    _header(topology, 'test_ticket47988_2')
+    _header(topology_m2, 'test_ticket47988_2')
 
-    topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
-    master1_schema_csn = topology.master1.schema.get_schema_csn()
-    master2_schema_csn = topology.master2.schema.get_schema_csn()
-    topology.master1.log.debug("\nBefore updating the schema on M1\n")
-    topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
-    topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+    topology_m2.ms["master1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n")
+    master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+    master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+    topology_m2.ms["master1"].log.debug("\nBefore updating the schema on M1\n")
+    topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+    topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
 
     # Here M1 should no, should check M2 schema and learn
-    _do_update_schema(topology.master1)
-    master1_schema_csn = topology.master1.schema.get_schema_csn()
-    master2_schema_csn = topology.master2.schema.get_schema_csn()
-    topology.master1.log.debug("\nAfter updating the schema on M1\n")
-    topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
-    topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+    _do_update_schema(topology_m2.ms["master1"])
+    master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+    master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+    topology_m2.ms["master1"].log.debug("\nAfter updating the schema on M1\n")
+    topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+    topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
     assert (master1_schema_csn)
 
     # to avoid linger effect where a replication session is reused without checking the schema
-    _pause_M1_to_M2(topology)
-    _resume_M1_to_M2(topology)
-
-    #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
-    #time.sleep(60)
-    _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=15)
-    master1_schema_csn = topology.master1.schema.get_schema_csn()
-    master2_schema_csn = topology.master2.schema.get_schema_csn()
-    topology.master1.log.debug("\nAfter a full replication session\n")
-    topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
-    topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+    _pause_M1_to_M2(topology_m2)
+    _resume_M1_to_M2(topology_m2)
+
+    # topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
+    # time.sleep(60)
+    _do_update_entry(supplier=topology_m2.ms["master1"], consumer=topology_m2.ms["master2"], attempts=15)
+    master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+    master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+    topology_m2.ms["master1"].log.debug("\nAfter a full replication session\n")
+    topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+    topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
     assert (master1_schema_csn)
     assert (master2_schema_csn)
 
 
-def test_ticket47988_3(topology):
+def test_ticket47988_3(topology_m2):
     '''
     Resume replication M2->M1 and check replication is still working
     '''
-    _header(topology, 'test_ticket47988_3')
+    _header(topology_m2, 'test_ticket47988_3')
 
-    _resume_M2_to_M1(topology)
-    _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
-    _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
+    _resume_M2_to_M1(topology_m2)
+    _do_update_entry(supplier=topology_m2.ms["master1"], consumer=topology_m2.ms["master2"], attempts=5)
+    _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=5)
 
 
-def test_ticket47988_4(topology):
+def test_ticket47988_4(topology_m2):
     '''
     Check schemaCSN is identical on both server
     And save the nsschemaCSN to later check they do not change unexpectedly
     '''
-    _header(topology, 'test_ticket47988_4')
+    _header(topology_m2, 'test_ticket47988_4')
 
-    master1_schema_csn = topology.master1.schema.get_schema_csn()
-    master2_schema_csn = topology.master2.schema.get_schema_csn()
-    topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
-    topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
+    master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+    master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+    topology_m2.ms["master1"].log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
+    topology_m2.ms["master1"].log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
     assert (master1_schema_csn)
     assert (master2_schema_csn)
     assert (master1_schema_csn == master2_schema_csn)
 
-    topology.master1.saved_schema_csn = master1_schema_csn
-    topology.master2.saved_schema_csn = master2_schema_csn
+    topology_m2.ms["master1"].saved_schema_csn = master1_schema_csn
+    topology_m2.ms["master2"].saved_schema_csn = master2_schema_csn
 
 
-def test_ticket47988_5(topology):
+def test_ticket47988_5(topology_m2):
     '''
     Check schemaCSN  do not change unexpectedly
     '''
-    _header(topology, 'test_ticket47988_5')
-
-    _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5)
-    _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5)
-    master1_schema_csn = topology.master1.schema.get_schema_csn()
-    master2_schema_csn = topology.master2.schema.get_schema_csn()
-    topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
-    topology.master1.log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
+    _header(topology_m2, 'test_ticket47988_5')
+
+    _do_update_entry(supplier=topology_m2.ms["master1"], consumer=topology_m2.ms["master2"], attempts=5)
+    _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=5)
+    master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+    master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+    topology_m2.ms["master1"].log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn)
+    topology_m2.ms["master1"].log.debug("\n\nMaster2 nsschemaCSN: %s" % master2_schema_csn)
     assert (master1_schema_csn)
     assert (master2_schema_csn)
     assert (master1_schema_csn == master2_schema_csn)
 
-    assert (topology.master1.saved_schema_csn == master1_schema_csn)
-    assert (topology.master2.saved_schema_csn == master2_schema_csn)
+    assert (topology_m2.ms["master1"].saved_schema_csn == master1_schema_csn)
+    assert (topology_m2.ms["master2"].saved_schema_csn == master2_schema_csn)
 
 
-def test_ticket47988_6(topology):
+def test_ticket47988_6(topology_m2):
     '''
     Update M1 schema and trigger update M2->M1
     So M2 should learn new/extended definitions that are in M1 schema
     '''
 
-    _header(topology, 'test_ticket47988_6')
+    _header(topology_m2, 'test_ticket47988_6')
 
-    topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n")
-    master1_schema_csn = topology.master1.schema.get_schema_csn()
-    master2_schema_csn = topology.master2.schema.get_schema_csn()
-    topology.master1.log.debug("\nBefore updating the schema on M1\n")
-    topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
-    topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+    topology_m2.ms["master1"].log.debug("\n\nUpdate M1 schema and an entry on M1\n")
+    master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+    master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+    topology_m2.ms["master1"].log.debug("\nBefore updating the schema on M1\n")
+    topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+    topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
 
     # Here M1 should no, should check M2 schema and learn
-    _do_update_schema(topology.master1, range=5999)
-    master1_schema_csn = topology.master1.schema.get_schema_csn()
-    master2_schema_csn = topology.master2.schema.get_schema_csn()
-    topology.master1.log.debug("\nAfter updating the schema on M1\n")
-    topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
-    topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+    _do_update_schema(topology_m2.ms["master1"], range=5999)
+    master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+    master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+    topology_m2.ms["master1"].log.debug("\nAfter updating the schema on M1\n")
+    topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+    topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
     assert (master1_schema_csn)
 
     # to avoid linger effect where a replication session is reused without checking the schema
-    _pause_M1_to_M2(topology)
-    _resume_M1_to_M2(topology)
-
-    #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
-    #time.sleep(60)
-    _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=15)
-    master1_schema_csn = topology.master1.schema.get_schema_csn()
-    master2_schema_csn = topology.master2.schema.get_schema_csn()
-    topology.master1.log.debug("\nAfter a full replication session\n")
-    topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
-    topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
+    _pause_M1_to_M2(topology_m2)
+    _resume_M1_to_M2(topology_m2)
+
+    # topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify")
+    # time.sleep(60)
+    _do_update_entry(supplier=topology_m2.ms["master2"], consumer=topology_m2.ms["master1"], attempts=15)
+    master1_schema_csn = topology_m2.ms["master1"].schema.get_schema_csn()
+    master2_schema_csn = topology_m2.ms["master2"].schema.get_schema_csn()
+    topology_m2.ms["master1"].log.debug("\nAfter a full replication session\n")
+    topology_m2.ms["master1"].log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn)
+    topology_m2.ms["master1"].log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn)
     assert (master1_schema_csn)
     assert (master2_schema_csn)
 

+ 72 - 110
dirsrvtests/tests/tickets/ticket48005_test.py

@@ -6,68 +6,29 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import time
-import ldap
 import logging
-import pytest
 import re
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
+
+import pytest
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    #request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
 
-def test_ticket48005_setup(topology):
+def test_ticket48005_setup(topology_st):
     '''
     allow dump core
     generate a test ldif file using dbgen.pl
     import the ldif
     '''
     log.info("Ticket 48005 setup...")
-    if hasattr(topology.standalone, 'prefix'):
-        prefix = topology.standalone.prefix
+    if hasattr(topology_st.standalone, 'prefix'):
+        prefix = topology_st.standalone.prefix
     else:
         prefix = None
-    sysconfig_dirsrv = os.path.join(topology.standalone.get_initconfig_dir(), 'dirsrv')
+    sysconfig_dirsrv = os.path.join(topology_st.standalone.get_initconfig_dir(), 'dirsrv')
     cmdline = 'egrep "ulimit -c unlimited" %s' % sysconfig_dirsrv
     p = os.popen(cmdline, "r")
     ulimitc = p.readline()
@@ -85,13 +46,13 @@ def test_ticket48005_setup(topology):
         log.info('Adding it')
         cmdline = 'echo LimitCORE=infinity >> %s' % sysconfig_dirsrv_systemd
 
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
-    ldif_file = topology.standalone.get_ldif_dir() + "/ticket48005.ldif"
+    ldif_file = topology_st.standalone.get_ldif_dir() + "/ticket48005.ldif"
     os.system('ls %s' % ldif_file)
     os.system('rm -f %s' % ldif_file)
-    if hasattr(topology.standalone, 'prefix'):
-        prefix = topology.standalone.prefix
+    if hasattr(topology_st.standalone, 'prefix'):
+        prefix = topology_st.standalone.prefix
     else:
         prefix = None
     dbgen_prog = prefix + '/bin/dbgen.pl'
@@ -103,13 +64,13 @@ def test_ticket48005_setup(topology):
     num = int(dnnumstr)
     log.info("We have %d entries.\n", num)
 
-    importTask = Tasks(topology.standalone)
+    importTask = Tasks(topology_st.standalone)
     args = {TASK_WAIT: True}
     importTask.importLDIF(SUFFIX, None, ldif_file, args)
     log.info('Importing %s complete.' % ldif_file)
 
 
-def test_ticket48005_memberof(topology):
+def test_ticket48005_memberof(topology_st):
     '''
     Enable memberof and referint plugin
     Run fixmemberof task without waiting
@@ -118,22 +79,22 @@ def test_ticket48005_memberof(topology):
     If no core was found, this test case was successful.
     '''
     log.info("Ticket 48005 memberof test...")
-    topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
-    topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+    topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)
+    topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
 
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
     try:
         # run the fixup task
-        topology.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: False})
+        topology_st.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: False})
     except ValueError:
         log.error('Some problem occured with a value that was provided')
         assert False
 
-    topology.standalone.stop(timeout=10)
+    topology_st.standalone.stop(timeout=10)
 
     mytmp = '/tmp'
-    logdir = re.sub('errors', '', topology.standalone.errlog)
+    logdir = re.sub('errors', '', topology_st.standalone.errlog)
     cmdline = 'ls ' + logdir + 'core*'
     p = os.popen(cmdline, "r")
     lcore = p.readline()
@@ -143,17 +104,17 @@ def test_ticket48005_memberof(topology):
         assert False
     log.info('No core files are found')
 
-    topology.standalone.start(timeout=10)
+    topology_st.standalone.start(timeout=10)
 
-    topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
-    topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
+    topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+    topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)
 
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
     log.info("Ticket 48005 memberof test complete")
 
 
-def test_ticket48005_automember(topology):
+def test_ticket48005_automember(topology_st):
     '''
     Enable automember and referint plugin
     1. Run automember rebuild membership task without waiting
@@ -170,36 +131,36 @@ def test_ticket48005_automember(topology):
     If no core was found, this test case was successful.
     '''
     log.info("Ticket 48005 automember test...")
-    topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
-    topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
+    topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
+    topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)
 
     # configure automember config entry
     log.info('Adding automember config')
     try:
-        topology.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', {
-                                         'objectclass': 'top autoMemberDefinition'.split(),
-                                         'autoMemberScope': 'dc=example,dc=com',
-                                         'autoMemberFilter': 'objectclass=inetorgperson',
-                                         'autoMemberDefaultGroup': 'cn=group0,dc=example,dc=com',
-                                         'autoMemberGroupingAttr': 'uniquemember:dn',
-                                         'cn': 'group cfg'})))
+        topology_st.standalone.add_s(Entry(('cn=group cfg,cn=Auto Membership Plugin,cn=plugins,cn=config', {
+            'objectclass': 'top autoMemberDefinition'.split(),
+            'autoMemberScope': 'dc=example,dc=com',
+            'autoMemberFilter': 'objectclass=inetorgperson',
+            'autoMemberDefaultGroup': 'cn=group0,dc=example,dc=com',
+            'autoMemberGroupingAttr': 'uniquemember:dn',
+            'cn': 'group cfg'})))
     except ValueError:
         log.error('Failed to add automember config')
         assert False
 
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
     try:
         # run the automember rebuild task
-        topology.standalone.tasks.automemberRebuild(suffix=SUFFIX, args={TASK_WAIT: False})
+        topology_st.standalone.tasks.automemberRebuild(suffix=SUFFIX, args={TASK_WAIT: False})
     except ValueError:
         log.error('Automember rebuild task failed.')
         assert False
 
-    topology.standalone.stop(timeout=10)
+    topology_st.standalone.stop(timeout=10)
 
     mytmp = '/tmp'
-    logdir = re.sub('errors', '', topology.standalone.errlog)
+    logdir = re.sub('errors', '', topology_st.standalone.errlog)
     cmdline = 'ls ' + logdir + 'core*'
     p = os.popen(cmdline, "r")
     lcore = p.readline()
@@ -209,19 +170,19 @@ def test_ticket48005_automember(topology):
         assert False
     log.info('No core files are found')
 
-    topology.standalone.start(timeout=10)
+    topology_st.standalone.start(timeout=10)
 
     ldif_out_file = mytmp + "/ticket48005_automember_exported.ldif"
     try:
         # run the automember export task
-        topology.standalone.tasks.automemberExport(suffix=SUFFIX, ldif_out=ldif_out_file, args={TASK_WAIT: False})
+        topology_st.standalone.tasks.automemberExport(suffix=SUFFIX, ldif_out=ldif_out_file, args={TASK_WAIT: False})
     except ValueError:
         log.error('Automember Export task failed.')
         assert False
 
-    topology.standalone.stop(timeout=10)
+    topology_st.standalone.stop(timeout=10)
 
-    logdir = re.sub('errors', '', topology.standalone.errlog)
+    logdir = re.sub('errors', '', topology_st.standalone.errlog)
     cmdline = 'ls ' + logdir + 'core*'
     p = os.popen(cmdline, "r")
     lcore = p.readline()
@@ -231,20 +192,21 @@ def test_ticket48005_automember(topology):
         assert False
     log.info('No core files are found')
 
-    topology.standalone.start(timeout=10)
+    topology_st.standalone.start(timeout=10)
 
-    ldif_in_file = topology.standalone.get_ldif_dir() + "/ticket48005.ldif"
+    ldif_in_file = topology_st.standalone.get_ldif_dir() + "/ticket48005.ldif"
     ldif_out_file = mytmp + "/ticket48005_automember_map.ldif"
     try:
         # run the automember map task
-        topology.standalone.tasks.automemberMap(ldif_in=ldif_in_file, ldif_out=ldif_out_file, args={TASK_WAIT: False})
+        topology_st.standalone.tasks.automemberMap(ldif_in=ldif_in_file, ldif_out=ldif_out_file,
+                                                   args={TASK_WAIT: False})
     except ValueError:
         log.error('Automember Map task failed.')
         assert False
 
-    topology.standalone.stop(timeout=10)
+    topology_st.standalone.stop(timeout=10)
 
-    logdir = re.sub('errors', '', topology.standalone.errlog)
+    logdir = re.sub('errors', '', topology_st.standalone.errlog)
     cmdline = 'ls ' + logdir + 'core*'
     p = os.popen(cmdline, "r")
     lcore = p.readline()
@@ -254,17 +216,17 @@ def test_ticket48005_automember(topology):
         assert False
     log.info('No core files are found')
 
-    topology.standalone.start(timeout=10)
+    topology_st.standalone.start(timeout=10)
 
-    topology.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
-    topology.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
+    topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)
+    topology_st.standalone.plugins.enable(name=PLUGIN_AUTOMEMBER)
 
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
     log.info("Ticket 48005 automember test complete")
 
 
-def test_ticket48005_syntaxvalidate(topology):
+def test_ticket48005_syntaxvalidate(topology_st):
     '''
     Run syntax validate task without waiting
     Shutdown the server
@@ -275,15 +237,15 @@ def test_ticket48005_syntaxvalidate(topology):
 
     try:
         # run the fixup task
-        topology.standalone.tasks.syntaxValidate(suffix=SUFFIX, args={TASK_WAIT: False})
+        topology_st.standalone.tasks.syntaxValidate(suffix=SUFFIX, args={TASK_WAIT: False})
     except ValueError:
         log.error('Some problem occured with a value that was provided')
         assert False
 
-    topology.standalone.stop(timeout=10)
+    topology_st.standalone.stop(timeout=10)
 
     mytmp = '/tmp'
-    logdir = re.sub('errors', '', topology.standalone.errlog)
+    logdir = re.sub('errors', '', topology_st.standalone.errlog)
     cmdline = 'ls ' + logdir + 'core*'
     p = os.popen(cmdline, "r")
     lcore = p.readline()
@@ -293,12 +255,12 @@ def test_ticket48005_syntaxvalidate(topology):
         assert False
     log.info('No core files are found')
 
-    topology.standalone.start(timeout=10)
+    topology_st.standalone.start(timeout=10)
 
     log.info("Ticket 48005 syntax validate test complete")
 
 
-def test_ticket48005_usn(topology):
+def test_ticket48005_usn(topology_st):
     '''
     Enable entryusn
     Delete all user entries.
@@ -308,19 +270,19 @@ def test_ticket48005_usn(topology):
     If no core was found, this test case was successful.
     '''
     log.info("Ticket 48005 usn test...")
-    topology.standalone.plugins.enable(name=PLUGIN_USN)
+    topology_st.standalone.plugins.enable(name=PLUGIN_USN)
 
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
     try:
-        entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=inetorgperson)")
+        entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, "(objectclass=inetorgperson)")
         if len(entries) == 0:
             log.info("No user entries.")
         else:
             for i in range(len(entries)):
                 # log.info('Deleting %s' % entries[i].dn)
                 try:
-                    topology.standalone.delete_s(entries[i].dn)
+                    topology_st.standalone.delete_s(entries[i].dn)
                 except ValueError:
                     log.error('delete_s %s failed.' % entries[i].dn)
                     assert False
@@ -330,15 +292,15 @@ def test_ticket48005_usn(topology):
 
     try:
         # run the usn tombstone cleanup
-        topology.standalone.tasks.usnTombstoneCleanup(suffix=SUFFIX, bename="userRoot", args={TASK_WAIT: False})
+        topology_st.standalone.tasks.usnTombstoneCleanup(suffix=SUFFIX, bename="userRoot", args={TASK_WAIT: False})
     except ValueError:
         log.error('Some problem occured with a value that was provided')
         assert False
 
-    topology.standalone.stop(timeout=10)
+    topology_st.standalone.stop(timeout=10)
 
     mytmp = '/tmp'
-    logdir = re.sub('errors', '', topology.standalone.errlog)
+    logdir = re.sub('errors', '', topology_st.standalone.errlog)
     cmdline = 'ls ' + logdir + 'core*'
     p = os.popen(cmdline, "r")
     lcore = p.readline()
@@ -348,16 +310,16 @@ def test_ticket48005_usn(topology):
         assert False
     log.info('No core files are found')
 
-    topology.standalone.start(timeout=10)
+    topology_st.standalone.start(timeout=10)
 
-    topology.standalone.plugins.disable(name=PLUGIN_USN)
+    topology_st.standalone.plugins.disable(name=PLUGIN_USN)
 
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
     log.info("Ticket 48005 usn test complete")
 
 
-def test_ticket48005_schemareload(topology):
+def test_ticket48005_schemareload(topology_st):
     '''
     Run schema reload task without waiting
     Shutdown the server
@@ -368,14 +330,14 @@ def test_ticket48005_schemareload(topology):
 
     try:
         # run the schema reload task
-        topology.standalone.tasks.schemaReload(args={TASK_WAIT: False})
+        topology_st.standalone.tasks.schemaReload(args={TASK_WAIT: False})
     except ValueError:
         log.error('Schema Reload task failed.')
         assert False
 
-    topology.standalone.stop(timeout=10)
+    topology_st.standalone.stop(timeout=10)
 
-    logdir = re.sub('errors', '', topology.standalone.errlog)
+    logdir = re.sub('errors', '', topology_st.standalone.errlog)
     cmdline = 'ls ' + logdir + 'core*'
     p = os.popen(cmdline, "r")
     lcore = p.readline()
@@ -386,7 +348,7 @@ def test_ticket48005_schemareload(topology):
         assert False
     log.info('No core files are found')
 
-    topology.standalone.start(timeout=10)
+    topology_st.standalone.start(timeout=10)
 
     log.info("Ticket 48005 schema reload test complete")
 

+ 6 - 48
dirsrvtests/tests/tickets/ticket48013_test.py

@@ -6,31 +6,16 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import time
-import ldap
-import logging
-import pytest
 import ldapurl
+import pytest
 from ldap.ldapobject import SimpleLDAPObject
 from ldap.syncrepl import SyncreplConsumer
-from lib389 import DirSrv
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
 
 class SyncObject(SimpleLDAPObject, SyncreplConsumer):
     def __init__(self, uri):
@@ -46,34 +31,7 @@ class SyncObject(SimpleLDAPObject, SyncreplConsumer):
         self.syncrepl_poll(all=1)
 
 
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket48013(topology):
+def test_ticket48013(topology_st):
     '''
     Content Synchonization: Test that invalid cookies are caught
     '''
@@ -82,16 +40,16 @@ def test_ticket48013(topology):
 
     # Enable dynamic plugins
     try:
-        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
+        topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])
     except ldap.LDAPError as e:
         ldap.error('Failed to enable dynamic plugin!' + e.message['desc'])
         assert False
 
     # Enable retro changelog
-    topology.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
+    topology_st.standalone.plugins.enable(name=PLUGIN_RETRO_CHANGELOG)
 
     # Enbale content sync plugin
-    topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
+    topology_st.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
 
     # Set everything up
     ldap_url = ldapurl.LDAPUrl('ldap://%s:%s' % (HOST_STANDALONE,

+ 27 - 67
dirsrvtests/tests/tickets/ticket48026_test.py

@@ -6,65 +6,24 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
 USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
 USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket48026(topology):
+def test_ticket48026(topology_st):
     '''
     Test that multiple attribute uniqueness works correctly.
     '''
     # Configure the plugin
-    inst = topology.standalone
+    inst = topology_st.standalone
     inst.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
 
     try:
@@ -73,7 +32,7 @@ def test_ticket48026(topology):
                       [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail'),
                        (ldap.MOD_ADD, 'uniqueness-attribute-name',
                         'mailAlternateAddress'),
-                      ])
+                       ])
     except ldap.LDAPError as e:
         log.fatal('test_ticket48026: Failed to configure plugin for "mail": error ' + e.message['desc'])
         assert False
@@ -87,7 +46,7 @@ def test_ticket48026(topology):
                                      'cn': 'user 1',
                                      'uid': 'user1',
                                      'mail': '[email protected]',
-                                     'mailAlternateAddress' : '[email protected]',
+                                     'mailAlternateAddress': '[email protected]',
                                      'userpassword': 'password'})))
     except ldap.LDAPError as e:
         log.fatal('test_ticket48026: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])
@@ -95,11 +54,11 @@ def test_ticket48026(topology):
 
     try:
         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '2',
-                                 'cn': 'user 2',
-                                 'uid': 'user2',
-                                 'mail': '[email protected]',
-                                 'userpassword': 'password'})))
+                                     'sn': '2',
+                                     'cn': 'user 2',
+                                     'uid': 'user2',
+                                     'mail': '[email protected]',
+                                     'userpassword': 'password'})))
     except ldap.CONSTRAINT_VIOLATION:
         pass
     else:
@@ -108,24 +67,25 @@ def test_ticket48026(topology):
 
     try:
         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '2',
-                                 'cn': 'user 2',
-                                 'uid': 'user2',
-                                 'mailAlternateAddress': '[email protected]',
-                                 'userpassword': 'password'})))
+                                     'sn': '2',
+                                     'cn': 'user 2',
+                                     'uid': 'user2',
+                                     'mailAlternateAddress': '[email protected]',
+                                     'userpassword': 'password'})))
     except ldap.CONSTRAINT_VIOLATION:
         pass
     else:
-        log.error('test_ticket48026: Adding of 2nd entry(mailAlternateAddress v mailAlternateAddress) incorrectly succeeded')
+        log.error(
+            'test_ticket48026: Adding of 2nd entry(mailAlternateAddress v mailAlternateAddress) incorrectly succeeded')
         assert False
 
     try:
         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '2',
-                                 'cn': 'user 2',
-                                 'uid': 'user2',
-                                 'mail': '[email protected]',
-                                 'userpassword': 'password'})))
+                                     'sn': '2',
+                                     'cn': 'user 2',
+                                     'uid': 'user2',
+                                     'mail': '[email protected]',
+                                     'userpassword': 'password'})))
     except ldap.CONSTRAINT_VIOLATION:
         pass
     else:
@@ -134,11 +94,11 @@ def test_ticket48026(topology):
 
     try:
         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),
-                                 'sn': '2',
-                                 'cn': 'user 2',
-                                 'uid': 'user2',
-                                 'mailAlternateAddress': '[email protected]',
-                                 'userpassword': 'password'})))
+                                     'sn': '2',
+                                     'cn': 'user 2',
+                                     'uid': 'user2',
+                                     'mailAlternateAddress': '[email protected]',
+                                     'userpassword': 'password'})))
     except ldap.CONSTRAINT_VIOLATION:
         pass
     else:

+ 82 - 135
dirsrvtests/tests/tickets/ticket48109_test.py

@@ -6,71 +6,18 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
 UID_INDEX = 'cn=uid,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
 
 
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Delete each instance in the end
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Clear out the tmp dir
-    standalone.clearTmpDir(__file__)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket48109(topology):
+def test_ticket48109(topology_st):
     '''
     Set SubStr lengths to cn=uid,cn=index,...
       objectClass: extensibleObject
@@ -81,54 +28,54 @@ def test_ticket48109(topology):
     log.info('Test case 0')
     # add substr setting to UID_INDEX
     try:
-        topology.standalone.modify_s(UID_INDEX,
-                                     [(ldap.MOD_ADD, 'objectClass', 'extensibleObject'),
-                                      (ldap.MOD_ADD, 'nsIndexType', 'sub'),
-                                      (ldap.MOD_ADD, 'nsSubStrBegin', '2'),
-                                      (ldap.MOD_ADD, 'nsSubStrEnd', '2')])
+        topology_st.standalone.modify_s(UID_INDEX,
+                                        [(ldap.MOD_ADD, 'objectClass', 'extensibleObject'),
+                                         (ldap.MOD_ADD, 'nsIndexType', 'sub'),
+                                         (ldap.MOD_ADD, 'nsSubStrBegin', '2'),
+                                         (ldap.MOD_ADD, 'nsSubStrEnd', '2')])
     except ldap.LDAPError as e:
         log.error('Failed to add substr lengths: error ' + e.message['desc'])
         assert False
 
     # restart the server to apply the indexing
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
     # add a test user
     UID = 'auser0'
     USER_DN = 'uid=%s,%s' % (UID, SUFFIX)
     try:
-        topology.standalone.add_s(Entry((USER_DN, {
-                                         'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
-                                         'cn': 'a user0',
-                                         'sn': 'user0',
-                                         'givenname': 'a',
-                                         'mail': UID})))
+        topology_st.standalone.add_s(Entry((USER_DN, {
+            'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
+            'cn': 'a user0',
+            'sn': 'user0',
+            'givenname': 'a',
+            'mail': UID})))
     except ldap.LDAPError as e:
         log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc'])
         assert False
 
-    entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=a*)')
+    entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=a*)')
     assert len(entries) == 1
 
     # restart the server to check the access log
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
-    cmdline = 'egrep %s %s | egrep "uid=a\*"' % (SUFFIX, topology.standalone.accesslog)
+    cmdline = 'egrep %s %s | egrep "uid=a\*"' % (SUFFIX, topology_st.standalone.accesslog)
     p = os.popen(cmdline, "r")
     l0 = p.readline()
     if l0 == "":
-        log.error('Search with "(uid=a*)" is not logged in ' + topology.standalone.accesslog)
+        log.error('Search with "(uid=a*)" is not logged in ' + topology_st.standalone.accesslog)
         assert False
     else:
-        #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
+        # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
         regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*')
         match = regex.match(l0)
         log.info('match: %s' % match.group(1))
-        cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog)
+        cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog)
         p = os.popen(cmdline, "r")
         l1 = p.readline()
         if l1 == "":
-            log.error('Search result of "(uid=a*)" is not logged in ' + topology.standalone.accesslog)
+            log.error('Search result of "(uid=a*)" is not logged in ' + topology_st.standalone.accesslog)
             assert False
         else:
             log.info('l1: %s' % l1)
@@ -150,11 +97,11 @@ def test_ticket48109(topology):
 
     # clean up substr setting to UID_INDEX
     try:
-        topology.standalone.modify_s(UID_INDEX,
-                                     [(ldap.MOD_DELETE, 'objectClass', 'extensibleObject'),
-                                      (ldap.MOD_DELETE, 'nsIndexType', 'sub'),
-                                      (ldap.MOD_DELETE, 'nsSubStrBegin', '2'),
-                                      (ldap.MOD_DELETE, 'nsSubStrEnd', '2')])
+        topology_st.standalone.modify_s(UID_INDEX,
+                                        [(ldap.MOD_DELETE, 'objectClass', 'extensibleObject'),
+                                         (ldap.MOD_DELETE, 'nsIndexType', 'sub'),
+                                         (ldap.MOD_DELETE, 'nsSubStrBegin', '2'),
+                                         (ldap.MOD_DELETE, 'nsSubStrEnd', '2')])
     except ldap.LDAPError as e:
         log.error('Failed to delete substr lengths: error ' + e.message['desc'])
         assert False
@@ -168,53 +115,53 @@ def test_ticket48109(topology):
     log.info('Test case 1')
     # add substr setting to UID_INDEX
     try:
-        topology.standalone.modify_s(UID_INDEX,
-                                     [(ldap.MOD_ADD, 'nsIndexType', 'sub'),
-                                      (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=2'),
-                                      (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=2')])
+        topology_st.standalone.modify_s(UID_INDEX,
+                                        [(ldap.MOD_ADD, 'nsIndexType', 'sub'),
+                                         (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=2'),
+                                         (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=2')])
     except ldap.LDAPError as e:
         log.error('Failed to add substr lengths: error ' + e.message['desc'])
         assert False
 
     # restart the server to apply the indexing
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
     # add a test user
     UID = 'buser1'
     USER_DN = 'uid=%s,%s' % (UID, SUFFIX)
     try:
-        topology.standalone.add_s(Entry((USER_DN, {
-                                         'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
-                                         'cn': 'b user1',
-                                         'sn': 'user1',
-                                         'givenname': 'b',
-                                         'mail': UID})))
+        topology_st.standalone.add_s(Entry((USER_DN, {
+            'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
+            'cn': 'b user1',
+            'sn': 'user1',
+            'givenname': 'b',
+            'mail': UID})))
     except ldap.LDAPError as e:
         log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc'])
         assert False
 
-    entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=b*)')
+    entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=b*)')
     assert len(entries) == 1
 
     # restart the server to check the access log
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
-    cmdline = 'egrep %s %s | egrep "uid=b\*"' % (SUFFIX, topology.standalone.accesslog)
+    cmdline = 'egrep %s %s | egrep "uid=b\*"' % (SUFFIX, topology_st.standalone.accesslog)
     p = os.popen(cmdline, "r")
     l0 = p.readline()
     if l0 == "":
-        log.error('Search with "(uid=b*)" is not logged in ' + topology.standalone.accesslog)
+        log.error('Search with "(uid=b*)" is not logged in ' + topology_st.standalone.accesslog)
         assert False
     else:
-        #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
+        # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
         regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*')
         match = regex.match(l0)
         log.info('match: %s' % match.group(1))
-        cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog)
+        cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog)
         p = os.popen(cmdline, "r")
         l1 = p.readline()
         if l1 == "":
-            log.error('Search result of "(uid=*b)" is not logged in ' + topology.standalone.accesslog)
+            log.error('Search result of "(uid=*b)" is not logged in ' + topology_st.standalone.accesslog)
             assert False
         else:
             log.info('l1: %s' % l1)
@@ -236,10 +183,10 @@ def test_ticket48109(topology):
 
     # clean up substr setting to UID_INDEX
     try:
-        topology.standalone.modify_s(UID_INDEX,
-                                     [(ldap.MOD_DELETE, 'nsIndexType', 'sub'),
-                                      (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=2'),
-                                      (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=2')])
+        topology_st.standalone.modify_s(UID_INDEX,
+                                        [(ldap.MOD_DELETE, 'nsIndexType', 'sub'),
+                                         (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=2'),
+                                         (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=2')])
     except ldap.LDAPError as e:
         log.error('Failed to delete substr lengths: error ' + e.message['desc'])
         assert False
@@ -258,59 +205,59 @@ def test_ticket48109(topology):
 
     # add substr setting to UID_INDEX
     try:
-        topology.standalone.modify_s(UID_INDEX,
-                                     [(ldap.MOD_ADD, 'nsIndexType', 'sub'),
-                                      (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=3'),
-                                      (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=3'),
-                                      (ldap.MOD_ADD, 'objectClass', 'extensibleObject'),
-                                      (ldap.MOD_ADD, 'nsSubStrBegin', '2'),
-                                      (ldap.MOD_ADD, 'nsSubStrEnd', '2')])
+        topology_st.standalone.modify_s(UID_INDEX,
+                                        [(ldap.MOD_ADD, 'nsIndexType', 'sub'),
+                                         (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrbegin=3'),
+                                         (ldap.MOD_ADD, 'nsMatchingRule', 'nssubstrend=3'),
+                                         (ldap.MOD_ADD, 'objectClass', 'extensibleObject'),
+                                         (ldap.MOD_ADD, 'nsSubStrBegin', '2'),
+                                         (ldap.MOD_ADD, 'nsSubStrEnd', '2')])
     except ldap.LDAPError as e:
         log.error('Failed to add substr lengths: error ' + e.message['desc'])
         assert False
 
     # restart the server to apply the indexing
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
     # add a test user
     UID = 'cuser2'
     USER_DN = 'uid=%s,%s' % (UID, SUFFIX)
     try:
-        topology.standalone.add_s(Entry((USER_DN, {
-                                         'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
-                                         'cn': 'c user2',
-                                         'sn': 'user2',
-                                         'givenname': 'c',
-                                         'mail': UID})))
+        topology_st.standalone.add_s(Entry((USER_DN, {
+            'objectclass': 'top person organizationalPerson inetOrgPerson'.split(),
+            'cn': 'c user2',
+            'sn': 'user2',
+            'givenname': 'c',
+            'mail': UID})))
     except ldap.LDAPError as e:
         log.error('Failed to add ' + USER_DN + ': error ' + e.message['desc'])
         assert False
 
-    entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=c*)')
+    entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=c*)')
     assert len(entries) == 1
 
-    entries = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*2)')
+    entries = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*2)')
     assert len(entries) == 1
 
     # restart the server to check the access log
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
-    cmdline = 'egrep %s %s | egrep "uid=c\*"' % (SUFFIX, topology.standalone.accesslog)
+    cmdline = 'egrep %s %s | egrep "uid=c\*"' % (SUFFIX, topology_st.standalone.accesslog)
     p = os.popen(cmdline, "r")
     l0 = p.readline()
     if l0 == "":
-        log.error('Search with "(uid=c*)" is not logged in ' + topology.standalone.accesslog)
+        log.error('Search with "(uid=c*)" is not logged in ' + topology_st.standalone.accesslog)
         assert False
     else:
-        #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
+        # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
         regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*')
         match = regex.match(l0)
         log.info('match: %s' % match.group(1))
-        cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog)
+        cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog)
         p = os.popen(cmdline, "r")
         l1 = p.readline()
         if l1 == "":
-            log.error('Search result of "(uid=c*)" is not logged in ' + topology.standalone.accesslog)
+            log.error('Search result of "(uid=c*)" is not logged in ' + topology_st.standalone.accesslog)
             assert False
         else:
             log.info('l1: %s' % l1)
@@ -330,22 +277,22 @@ def test_ticket48109(topology):
                 else:
                     log.info('Test case 2-1 - OK - correct substr index used')
 
-    cmdline = 'egrep %s %s | egrep "uid=\*2"' % (SUFFIX, topology.standalone.accesslog)
+    cmdline = 'egrep %s %s | egrep "uid=\*2"' % (SUFFIX, topology_st.standalone.accesslog)
     p = os.popen(cmdline, "r")
     l0 = p.readline()
     if l0 == "":
-        log.error('Search with "(uid=*2)" is not logged in ' + topology.standalone.accesslog)
+        log.error('Search with "(uid=*2)" is not logged in ' + topology_st.standalone.accesslog)
         assert False
     else:
-        #regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
+        # regex = re.compile('\(conn=[0-9]* op=[0-9]*\) SRCH .*')
         regex = re.compile(r'.*\s+(conn=\d+ op=\d+)\s+SRCH .*')
         match = regex.match(l0)
         log.info('match: %s' % match.group(1))
-        cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology.standalone.accesslog)
+        cmdline = 'egrep "%s" %s | egrep "RESULT"' % (match.group(1), topology_st.standalone.accesslog)
         p = os.popen(cmdline, "r")
         l1 = p.readline()
         if l1 == "":
-            log.error('Search result of "(uid=*2)" is not logged in ' + topology.standalone.accesslog)
+            log.error('Search result of "(uid=*2)" is not logged in ' + topology_st.standalone.accesslog)
             assert False
         else:
             log.info('l1: %s' % l1)
@@ -367,13 +314,13 @@ def test_ticket48109(topology):
 
     # clean up substr setting to UID_INDEX
     try:
-        topology.standalone.modify_s(UID_INDEX,
-                                     [(ldap.MOD_DELETE, 'nsIndexType', 'sub'),
-                                      (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=3'),
-                                      (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=3'),
-                                      (ldap.MOD_DELETE, 'objectClass', 'extensibleObject'),
-                                      (ldap.MOD_DELETE, 'nsSubStrBegin', '2'),
-                                      (ldap.MOD_DELETE, 'nsSubStrEnd', '2')])
+        topology_st.standalone.modify_s(UID_INDEX,
+                                        [(ldap.MOD_DELETE, 'nsIndexType', 'sub'),
+                                         (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrbegin=3'),
+                                         (ldap.MOD_DELETE, 'nsMatchingRule', 'nssubstrend=3'),
+                                         (ldap.MOD_DELETE, 'objectClass', 'extensibleObject'),
+                                         (ldap.MOD_DELETE, 'nsSubStrBegin', '2'),
+                                         (ldap.MOD_DELETE, 'nsSubStrEnd', '2')])
     except ldap.LDAPError as e:
         log.error('Failed to delete substr lengths: error ' + e.message['desc'])
         assert False

+ 3 - 47
dirsrvtests/tests/tickets/ticket48170_test.py

@@ -6,59 +6,15 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
 
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket48170(topology):
+def test_ticket48170(topology_st):
     '''
     Attempt to add a nsIndexType wikth an invalid value:  "eq,pres"
     '''
@@ -66,7 +22,7 @@ def test_ticket48170(topology):
     INDEX_DN = 'cn=cn,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config'
     REJECTED = False
     try:
-        topology.standalone.modify_s(INDEX_DN, [(ldap.MOD_ADD, 'nsINdexType', 'eq,pres')])
+        topology_st.standalone.modify_s(INDEX_DN, [(ldap.MOD_ADD, 'nsINdexType', 'eq,pres')])
     except ldap.UNWILLING_TO_PERFORM:
         log.info('Index update correctly rejected')
         REJECTED = True

+ 166 - 205
dirsrvtests/tests/tickets/ticket48194_test.py

@@ -6,19 +6,15 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
+import logging
 import subprocess
 import time
+
 import ldap
-import logging
 import pytest
-import shutil
-from lib389 import DirSrv, Entry, tools
-from lib389 import DirSrvTools
-from lib389.tools import DirSrvTools
+from lib389 import Entry
 from lib389._constants import *
-from lib389.properties import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -34,63 +30,20 @@ plus_all_ecount_noweak = 0
 plus_all_dcount_noweak = 0
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def _header(topology, label):
-    topology.standalone.log.info("\n\n###############################################")
-    topology.standalone.log.info("####### %s" % label)
-    topology.standalone.log.info("###############################################")
+def _header(topology_st, label):
+    topology_st.standalone.log.info("\n\n###############################################")
+    topology_st.standalone.log.info("####### %s" % label)
+    topology_st.standalone.log.info("###############################################")
 
 
-def test_init(topology):
+def test_init(topology_st):
     """
     Generate self signed cert and import it to the DS cert db.
     Enable SSL
     """
-    _header(topology, 'Testing Ticket 48194 - harden the list of ciphers available by default')
+    _header(topology_st, 'Testing Ticket 48194 - harden the list of ciphers available by default')
 
-    conf_dir = topology.standalone.confdir
+    conf_dir = topology_st.standalone.confdir
 
     log.info("\n######################### Checking existing certs ######################\n")
     os.system('certutil -L -d %s -n "CA certificate"' % conf_dir)
@@ -121,8 +74,9 @@ def test_init(topology):
     os.system('certutil -G -d %s -z %s -f %s' % (conf_dir, noisefile, pwdfile))
 
     log.info("\n######################### Creating self-signed CA certificate ######################\n")
-    os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' %
-              (conf_dir, noisefile, pwdfile))
+    os.system(
+        '( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' %
+        (conf_dir, noisefile, pwdfile))
 
     log.info("\n######################### Exporting the CA certificate to cacert.asc ######################\n")
     cafile = '%s/cacert.asc' % conf_dir
@@ -138,8 +92,9 @@ def test_init(topology):
     log.info("\n######################### Generate the server certificate ######################\n")
     ohostname = os.popen('hostname --fqdn', "r")
     myhostname = ohostname.readline()
-    os.system('certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' %
-              (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile))
+    os.system(
+        'certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' %
+        (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile))
 
     log.info("\n######################### create the pin file ######################\n")
     pinfile = '%s/pin.txt' % (conf_dir)
@@ -150,25 +105,25 @@ def test_init(topology):
     time.sleep(1)
 
     log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n")
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'),
-                                                 (ldap.MOD_REPLACE, 'nsTLS1', 'on'),
-                                                 (ldap.MOD_REPLACE, 'nsSSLClientAuth', 'allowed'),
-                                                 (ldap.MOD_REPLACE, 'allowWeakCipher', 'on'),
-                                                 (ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3', 'off'),
+                                                    (ldap.MOD_REPLACE, 'nsTLS1', 'on'),
+                                                    (ldap.MOD_REPLACE, 'nsSSLClientAuth', 'allowed'),
+                                                    (ldap.MOD_REPLACE, 'allowWeakCipher', 'on'),
+                                                    (ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all')])
 
-    topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'),
-                                             (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'),
-                                             (ldap.MOD_REPLACE, 'nsslapd-secureport', LDAPSPORT)])
+    topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-security', 'on'),
+                                                (ldap.MOD_REPLACE, 'nsslapd-ssl-check-hostname', 'off'),
+                                                (ldap.MOD_REPLACE, 'nsslapd-secureport', LDAPSPORT)])
 
-    topology.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(),
-                                              'cn': RSA,
-                                              'nsSSLPersonalitySSL': SERVERCERT,
-                                              'nsSSLToken': 'internal (software)',
-                                              'nsSSLActivation': 'on'})))
+    topology_st.standalone.add_s(Entry((RSA_DN, {'objectclass': "top nsEncryptionModule".split(),
+                                                 'cn': RSA,
+                                                 'nsSSLPersonalitySSL': SERVERCERT,
+                                                 'nsSSLToken': 'internal (software)',
+                                                 'nsSSLActivation': 'on'})))
 
 
-def connectWithOpenssl(topology, cipher, expect):
+def connectWithOpenssl(topology_st, cipher, expect):
     """
     Connect with the given cipher
     Condition:
@@ -177,7 +132,7 @@ def connectWithOpenssl(topology, cipher, expect):
        access log: "Cannot communicate securely with peer:
                    no common encryption algorithm(s)."
     """
-    log.info("Testing %s -- expect to handshake %s", cipher,"successfully" if expect else "failed")
+    log.info("Testing %s -- expect to handshake %s", cipher, "successfully" if expect else "failed")
 
     myurl = 'localhost:%s' % LDAPSPORT
     cmdline = ['/usr/bin/openssl', 's_client', '-connect', myurl, '-cipher', cipher]
@@ -211,232 +166,237 @@ def connectWithOpenssl(topology, cipher, expect):
                     assert False
 
 
-def test_run_0(topology):
+def test_run_0(topology_st):
     """
     Check nsSSL3Ciphers: +all
     All ciphers are enabled except null.
     Note: allowWeakCipher: on
     """
-    _header(topology, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on')
+    _header(topology_st, 'Test Case 1 - Check the ciphers availability for "+all"; allowWeakCipher: on')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.restart(timeout=120)
+    topology_st.standalone.restart(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', True)
-    connectWithOpenssl(topology, 'AES256-SHA256', True)
+    connectWithOpenssl(topology_st, 'RC4-SHA', True)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', True)
 
 
-def test_run_1(topology):
+def test_run_1(topology_st):
     """
     Check nsSSL3Ciphers: +all
     All ciphers are enabled except null.
     Note: default allowWeakCipher (i.e., off) for +all
     """
-    _header(topology, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers')
+    _header(topology_st, 'Test Case 2 - Check the ciphers availability for "+all" with default allowWeakCiphers')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
     # Make sure allowWeakCipher is not set.
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)])
+    topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.48194_0' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.48194_0' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
     time.sleep(2)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', False)
-    connectWithOpenssl(topology, 'AES256-SHA256', True)
+    connectWithOpenssl(topology_st, 'RC4-SHA', False)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', True)
 
 
-def test_run_2(topology):
+def test_run_2(topology_st):
     """
     Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha
     rsa_aes_128_sha, tls_rsa_aes_128_sha, rsa_aes_256_sha, tls_rsa_aes_256_sha are enabled.
     default allowWeakCipher
     """
-    _header(topology, 'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher')
+    _header(topology_st,
+            'Test Case 3 - Check the ciphers availability for "+rsa_aes_128_sha,+rsa_aes_256_sha" with default allowWeakCipher')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+rsa_aes_128_sha,+rsa_aes_256_sha')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(ENCRYPTION_DN,
+                                    [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+rsa_aes_128_sha,+rsa_aes_256_sha')])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.48194_1' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.48194_1' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
     time.sleep(2)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', False)
-    connectWithOpenssl(topology, 'AES256-SHA256', False)
-    connectWithOpenssl(topology, 'AES128-SHA', True)
-    connectWithOpenssl(topology, 'AES256-SHA', True)
+    connectWithOpenssl(topology_st, 'RC4-SHA', False)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', False)
+    connectWithOpenssl(topology_st, 'AES128-SHA', True)
+    connectWithOpenssl(topology_st, 'AES256-SHA', True)
 
 
-def test_run_3(topology):
+def test_run_3(topology_st):
     """
     Check nsSSL3Ciphers: -all
     All ciphers are disabled.
     default allowWeakCipher
     """
-    _header(topology, 'Test Case 4 - Check the ciphers availability for "-all"')
+    _header(topology_st, 'Test Case 4 - Check the ciphers availability for "-all"')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all')])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.48194_2' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.48194_2' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
     time.sleep(1)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', False)
-    connectWithOpenssl(topology, 'AES256-SHA256', False)
+    connectWithOpenssl(topology_st, 'RC4-SHA', False)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', False)
 
 
-def test_run_4(topology):
+def test_run_4(topology_st):
     """
     Check no nsSSL3Ciphers
     Default ciphers are enabled.
     default allowWeakCipher
     """
-    _header(topology, 'Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher')
+    _header(topology_st, 'Test Case 5 - Check no nsSSL3Ciphers (-all) with default allowWeakCipher')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'nsSSL3Ciphers', '-all')])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.48194_3' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.48194_3' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
     time.sleep(2)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', False)
-    connectWithOpenssl(topology, 'AES256-SHA256', True)
+    connectWithOpenssl(topology_st, 'RC4-SHA', False)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', True)
 
 
-def test_run_5(topology):
+def test_run_5(topology_st):
     """
     Check nsSSL3Ciphers: default
     Default ciphers are enabled.
     default allowWeakCipher
     """
-    _header(topology, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher')
+    _header(topology_st, 'Test Case 6 - Check default nsSSL3Ciphers (default setting) with default allowWeakCipher')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default')])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.48194_4' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.48194_4' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
     time.sleep(2)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', False)
-    connectWithOpenssl(topology, 'AES256-SHA256', True)
+    connectWithOpenssl(topology_st, 'RC4-SHA', False)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', True)
 
 
-def test_run_6(topology):
+def test_run_6(topology_st):
     """
     Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256
     All ciphers are disabled.
     default allowWeakCipher
     """
-    _header(topology, 'Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256  with default allowWeakCipher')
+    _header(topology_st,
+            'Test Case 7 - Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256  with default allowWeakCipher')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-TLS_RSA_WITH_AES_256_CBC_SHA256')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(ENCRYPTION_DN,
+                                    [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+all,-TLS_RSA_WITH_AES_256_CBC_SHA256')])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.48194_5' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.48194_5' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
     time.sleep(2)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', False)
-    connectWithOpenssl(topology, 'AES256-SHA256', False)
-    connectWithOpenssl(topology, 'AES128-SHA', True)
+    connectWithOpenssl(topology_st, 'RC4-SHA', False)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', False)
+    connectWithOpenssl(topology_st, 'AES128-SHA', True)
 
 
-def test_run_7(topology):
+def test_run_7(topology_st):
     """
     Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5
     All ciphers are disabled.
     default allowWeakCipher
     """
-    _header(topology, 'Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher')
+    _header(topology_st, 'Test Case 8 - Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5 with default allowWeakCipher')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all,+rsa_rc4_128_md5')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-all,+rsa_rc4_128_md5')])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.48194_6' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.48194_6' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
     time.sleep(2)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', False)
-    connectWithOpenssl(topology, 'AES256-SHA256', False)
-    connectWithOpenssl(topology, 'RC4-MD5', True)
+    connectWithOpenssl(topology_st, 'RC4-SHA', False)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', False)
+    connectWithOpenssl(topology_st, 'RC4-MD5', True)
 
 
-def test_run_8(topology):
+def test_run_8(topology_st):
     """
     Check nsSSL3Ciphers: default + allowWeakCipher: off
     Strong Default ciphers are enabled.
     """
-    _header(topology, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)')
+    _header(topology_st, 'Test Case 9 - Check default nsSSL3Ciphers (default setting + allowWeakCipher: off)')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'),
-                                                 (ldap.MOD_REPLACE, 'allowWeakCipher', 'off')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'),
+                                                    (ldap.MOD_REPLACE, 'allowWeakCipher', 'off')])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.48194_7' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.48194_7' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
     time.sleep(2)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', False)
-    connectWithOpenssl(topology, 'AES256-SHA256', True)
+    connectWithOpenssl(topology_st, 'RC4-SHA', False)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', True)
 
 
-def test_run_9(topology):
+def test_run_9(topology_st):
     """
     Check no nsSSL3Ciphers
     Default ciphers are enabled.
     allowWeakCipher: on
     nsslapd-errorlog-level: 0
     """
-    _header(topology, 'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on')
+    _header(topology_st,
+            'Test Case 10 - Check no nsSSL3Ciphers (default setting) with no errorlog-level & allowWeakCipher on')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None),
-                                                 (ldap.MOD_REPLACE, 'allowWeakCipher', 'on')])
-    topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', None),
+                                                    (ldap.MOD_REPLACE, 'allowWeakCipher', 'on')])
+    topology_st.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.48194_8' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.48194_8' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
     time.sleep(2)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', True)
-    connectWithOpenssl(topology, 'AES256-SHA256', True)
+    connectWithOpenssl(topology_st, 'RC4-SHA', True)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', True)
 
 
-def test_run_10(topology):
+def test_run_10(topology_st):
     """
     Check nsSSL3Ciphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,
         +TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
@@ -449,43 +409,44 @@ def test_run_10(topology):
     allowWeakCipher: on
     nsslapd-errorlog-level: 0
     """
-    _header(topology, 'Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on')
+    _header(topology_st,
+            'Test Case 11 - Check nsSSL3Ciphers: long list using the NSS Cipher Suite name with allowWeakCipher on')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers',
-      '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers',
+                                                     '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.48194_9' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.48194_9' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
     time.sleep(1)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', False)
-    connectWithOpenssl(topology, 'RC4-MD5', True)
-    connectWithOpenssl(topology, 'AES256-SHA256', False)
+    connectWithOpenssl(topology_st, 'RC4-SHA', False)
+    connectWithOpenssl(topology_st, 'RC4-MD5', True)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', False)
 
 
-def test_run_11(topology):
+def test_run_11(topology_st):
     """
     Check nsSSL3Ciphers: +fortezza
     SSL_GetImplementedCiphers does not return this as a secuire cipher suite
     """
-    _header(topology, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported')
+    _header(topology_st, 'Test Case 12 - Check nsSSL3Ciphers: +fortezza, which is not supported')
 
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+fortezza')])
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '+fortezza')])
 
     log.info("\n######################### Restarting the server ######################\n")
-    topology.standalone.stop(timeout=10)
-    os.system('mv %s %s.48194_10' % (topology.standalone.errlog, topology.standalone.errlog))
-    os.system('touch %s' % (topology.standalone.errlog))
+    topology_st.standalone.stop(timeout=10)
+    os.system('mv %s %s.48194_10' % (topology_st.standalone.errlog, topology_st.standalone.errlog))
+    os.system('touch %s' % (topology_st.standalone.errlog))
     time.sleep(1)
-    topology.standalone.start(timeout=120)
+    topology_st.standalone.start(timeout=120)
 
-    connectWithOpenssl(topology, 'RC4-SHA', False)
-    connectWithOpenssl(topology, 'AES256-SHA256', False)
+    connectWithOpenssl(topology_st, 'RC4-SHA', False)
+    connectWithOpenssl(topology_st, 'AES256-SHA256', False)
 
 
 if __name__ == '__main__':

+ 50 - 94
dirsrvtests/tests/tickets/ticket48212_test.py

@@ -1,68 +1,22 @@
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
-from ldap.controls import SimplePagedResultsControl
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
 MYSUFFIX = 'dc=example,dc=com'
 MYSUFFIXBE = 'userRoot'
 _MYLDIF = 'example1k_posix.ldif'
 UIDNUMBERDN = "cn=uidnumber,cn=index,cn=userroot,cn=ldbm database,cn=plugins,cn=config"
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Delete each instance in the end
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Clear out the tmp dir
-    standalone.clearTmpDir(__file__)
-
-    return TopologyStandalone(standalone)
-
-def runDbVerify(topology):
-    topology.standalone.log.info("\n\n	+++++ dbverify +++++\n")
-    sbin_dir = get_sbin_dir(prefix=topology.standalone.prefix)
-    dbverifyCMD = sbin_dir + "/dbverify -Z " + topology.standalone.inst + " -V"
+def runDbVerify(topology_st):
+    topology_st.standalone.log.info("\n\n	+++++ dbverify +++++\n")
+    sbin_dir = get_sbin_dir(prefix=topology_st.standalone.prefix)
+    dbverifyCMD = sbin_dir + "/dbverify -Z " + topology_st.standalone.inst + " -V"
     dbverifyOUT = os.popen(dbverifyCMD, "r")
-    topology.standalone.log.info("Running %s" % dbverifyCMD)
+    topology_st.standalone.log.info("Running %s" % dbverifyCMD)
     running = True
     error = False
     while running:
@@ -72,34 +26,35 @@ def runDbVerify(topology):
         elif "libdb:" in l:
             running = False
             error = True
-            topology.standalone.log.info("%s" % l)
+            topology_st.standalone.log.info("%s" % l)
         elif "verify failed" in l:
             error = True
             running = False
-            topology.standalone.log.info("%s" % l)
+            topology_st.standalone.log.info("%s" % l)
 
     if error:
-        topology.standalone.log.fatal("dbverify failed")
+        topology_st.standalone.log.fatal("dbverify failed")
         assert False
     else:
-        topology.standalone.log.info("dbverify passed")
+        topology_st.standalone.log.info("dbverify passed")
+
 
-def reindexUidNumber(topology):
-    topology.standalone.log.info("\n\n	+++++ reindex uidnumber +++++\n")
-    sbin_dir = get_sbin_dir(prefix=topology.standalone.prefix)
-    indexCMD = sbin_dir + "/db2index.pl -Z " + topology.standalone.inst + " -D \"" + DN_DM + "\" -w \"" + PASSWORD + "\" -n " + MYSUFFIXBE + " -t uidnumber"
+def reindexUidNumber(topology_st):
+    topology_st.standalone.log.info("\n\n	+++++ reindex uidnumber +++++\n")
+    sbin_dir = get_sbin_dir(prefix=topology_st.standalone.prefix)
+    indexCMD = sbin_dir + "/db2index.pl -Z " + topology_st.standalone.inst + " -D \"" + DN_DM + "\" -w \"" + PASSWORD + "\" -n " + MYSUFFIXBE + " -t uidnumber"
 
     indexOUT = os.popen(indexCMD, "r")
-    topology.standalone.log.info("Running %s" % indexCMD)
+    topology_st.standalone.log.info("Running %s" % indexCMD)
 
     time.sleep(30)
 
-    tailCMD = "tail -n 3 " + topology.standalone.errlog
+    tailCMD = "tail -n 3 " + topology_st.standalone.errlog
     tailOUT = os.popen(tailCMD, "r")
     assert 'Finished indexing' in tailOUT.read()
 
 
-def test_ticket48212(topology):
+def test_ticket48212(topology_st):
     """
     Import posixAccount entries.
     Index uidNumber
@@ -109,71 +64,72 @@ def test_ticket48212(topology):
     run dbverify to see if it reports the db corruption or not
     if no corruption is reported, the bug fix was verified.
     """
-    log.info('Testing Ticket 48212 - Dynamic nsMatchingRule changes had no effect on the attrinfo thus following reindexing, as well.')
+    log.info(
+        'Testing Ticket 48212 - Dynamic nsMatchingRule changes had no effect on the attrinfo thus following reindexing, as well.')
 
     # bind as directory manager
-    topology.standalone.log.info("Bind as %s" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
-
+    topology_st.standalone.log.info("Bind as %s" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
-    data_dir_path = topology.standalone.getDir(__file__, DATA_DIR)
+    data_dir_path = topology_st.standalone.getDir(__file__, DATA_DIR)
     ldif_file = data_dir_path + "ticket48212/" + _MYLDIF
     try:
-        ldif_dir = topology.standalone.get_ldif_dir()
+        ldif_dir = topology_st.standalone.get_ldif_dir()
         shutil.copy(ldif_file, ldif_dir)
-        ldif_file = ldif_dir + '/' +  _MYLDIF
+        ldif_file = ldif_dir + '/' + _MYLDIF
     except:
         log.fatal('Failed to copy ldif to instance ldif dir')
         assert False
 
-    topology.standalone.log.info("\n\n######################### Import Test data (%s) ######################\n" % ldif_file)
+    topology_st.standalone.log.info(
+        "\n\n######################### Import Test data (%s) ######################\n" % ldif_file)
     args = {TASK_WAIT: True}
-    importTask = Tasks(topology.standalone)
+    importTask = Tasks(topology_st.standalone)
     importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, ldif_file, args)
     args = {TASK_WAIT: True}
 
-    runDbVerify(topology)
+    runDbVerify(topology_st)
 
-    topology.standalone.log.info("\n\n######################### Add index by uidnumber ######################\n")
+    topology_st.standalone.log.info("\n\n######################### Add index by uidnumber ######################\n")
     try:
-        topology.standalone.add_s(Entry((UIDNUMBERDN, {'objectclass': "top nsIndex".split(),
-                                                       'cn': 'uidnumber',
-                                                       'nsSystemIndex': 'false',
-                                                       'nsIndexType': "pres eq".split()})))
+        topology_st.standalone.add_s(Entry((UIDNUMBERDN, {'objectclass': "top nsIndex".split(),
+                                                          'cn': 'uidnumber',
+                                                          'nsSystemIndex': 'false',
+                                                          'nsIndexType': "pres eq".split()})))
     except ValueError:
-        topology.standalone.log.fatal("add_s failed: %s", ValueError)
+        topology_st.standalone.log.fatal("add_s failed: %s", ValueError)
 
-    topology.standalone.log.info("\n\n######################### reindexing... ######################\n")
-    reindexUidNumber(topology)
+    topology_st.standalone.log.info("\n\n######################### reindexing... ######################\n")
+    reindexUidNumber(topology_st)
 
-    runDbVerify(topology)
+    runDbVerify(topology_st)
 
-    topology.standalone.log.info("\n\n######################### Add nsMatchingRule ######################\n")
+    topology_st.standalone.log.info("\n\n######################### Add nsMatchingRule ######################\n")
     try:
-        topology.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_ADD, 'nsMatchingRule', 'integerOrderingMatch')])
+        topology_st.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_ADD, 'nsMatchingRule', 'integerOrderingMatch')])
     except ValueError:
-        topology.standalone.log.fatal("modify_s failed: %s", ValueError)
+        topology_st.standalone.log.fatal("modify_s failed: %s", ValueError)
 
-    topology.standalone.log.info("\n\n######################### reindexing... ######################\n")
-    reindexUidNumber(topology)
+    topology_st.standalone.log.info("\n\n######################### reindexing... ######################\n")
+    reindexUidNumber(topology_st)
 
-    runDbVerify(topology)
+    runDbVerify(topology_st)
 
-    topology.standalone.log.info("\n\n######################### Delete nsMatchingRule ######################\n")
+    topology_st.standalone.log.info("\n\n######################### Delete nsMatchingRule ######################\n")
     try:
-        topology.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_DELETE, 'nsMatchingRule', 'integerOrderingMatch')])
+        topology_st.standalone.modify_s(UIDNUMBERDN, [(ldap.MOD_DELETE, 'nsMatchingRule', 'integerOrderingMatch')])
     except ValueError:
-        topology.standalone.log.fatal("modify_s failed: %s", ValueError)
+        topology_st.standalone.log.fatal("modify_s failed: %s", ValueError)
 
-    reindexUidNumber(topology)
+    reindexUidNumber(topology_st)
 
-    runDbVerify(topology)
+    runDbVerify(topology_st)
 
     log.info('Testcase PASSED')
 
+
 if __name__ == '__main__':
     # Run isolated
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 37 - 83
dirsrvtests/tests/tickets/ticket48214_test.py

@@ -1,15 +1,8 @@
-import os
-import sys
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
-from ldap.controls import SimplePagedResultsControl
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -17,127 +10,88 @@ MYSUFFIX = 'dc=example,dc=com'
 MYSUFFIXBE = 'userRoot'
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def getMaxBerSizeFromDseLdif(topology):
-    topology.standalone.log.info("		+++++ Get maxbersize from dse.ldif +++++\n")
-    dse_ldif = topology.standalone.confdir + '/dse.ldif'
+def getMaxBerSizeFromDseLdif(topology_st):
+    topology_st.standalone.log.info("		+++++ Get maxbersize from dse.ldif +++++\n")
+    dse_ldif = topology_st.standalone.confdir + '/dse.ldif'
     grepMaxBerCMD = "egrep nsslapd-maxbersize " + dse_ldif
-    topology.standalone.log.info("		Run CMD: %s\n" % grepMaxBerCMD)
+    topology_st.standalone.log.info("		Run CMD: %s\n" % grepMaxBerCMD)
     grepMaxBerOUT = os.popen(grepMaxBerCMD, "r")
     running = True
     maxbersize = -1
     while running:
         l = grepMaxBerOUT.readline()
         if l == "":
-            topology.standalone.log.info("		Empty: %s\n" % l)
+            topology_st.standalone.log.info("		Empty: %s\n" % l)
             running = False
         elif "nsslapd-maxbersize:" in l.lower():
             running = False
             fields = l.split()
             if len(fields) >= 2:
                 maxbersize = fields[1]
-                topology.standalone.log.info("		Right format - %s %s\n" % (fields[0], fields[1]))
+                topology_st.standalone.log.info("		Right format - %s %s\n" % (fields[0], fields[1]))
             else:
-                topology.standalone.log.info("		Wrong format - %s\n" % l)
+                topology_st.standalone.log.info("		Wrong format - %s\n" % l)
         else:
-            topology.standalone.log.info("		Else?: %s\n" % l)
+            topology_st.standalone.log.info("		Else?: %s\n" % l)
     return maxbersize
 
-def checkMaxBerSize(topology):
-    topology.standalone.log.info("	+++++ Check Max Ber Size +++++\n")
-    maxbersizestr = getMaxBerSizeFromDseLdif(topology)
+
+def checkMaxBerSize(topology_st):
+    topology_st.standalone.log.info("	+++++ Check Max Ber Size +++++\n")
+    maxbersizestr = getMaxBerSizeFromDseLdif(topology_st)
     maxbersize = int(maxbersizestr)
     isdefault = True
     defaultvalue = 2097152
     if maxbersize < 0:
-        topology.standalone.log.info("	No nsslapd-maxbersize found in dse.ldif\n")
+        topology_st.standalone.log.info("	No nsslapd-maxbersize found in dse.ldif\n")
     elif maxbersize == 0:
-        topology.standalone.log.info("	nsslapd-maxbersize: %d\n" % maxbersize)
+        topology_st.standalone.log.info("	nsslapd-maxbersize: %d\n" % maxbersize)
     else:
         isdefault = False
-        topology.standalone.log.info("	nsslapd-maxbersize: %d\n" % maxbersize)
+        topology_st.standalone.log.info("	nsslapd-maxbersize: %d\n" % maxbersize)
 
     try:
-        entry = topology.standalone.search_s('cn=config', ldap.SCOPE_BASE,
-                                             "(cn=*)",
-                                              ['nsslapd-maxbersize'])
+        entry = topology_st.standalone.search_s('cn=config', ldap.SCOPE_BASE,
+                                                "(cn=*)",
+                                                ['nsslapd-maxbersize'])
         if entry:
             searchedsize = entry[0].getValue('nsslapd-maxbersize')
-            topology.standalone.log.info("	ldapsearch returned nsslapd-maxbersize: %s\n" % searchedsize)
+            topology_st.standalone.log.info("	ldapsearch returned nsslapd-maxbersize: %s\n" % searchedsize)
         else:
-            topology.standalone.log.fatal('ERROR: cn=config is not found?')
+            topology_st.standalone.log.fatal('ERROR: cn=config is not found?')
             assert False
     except ldap.LDAPError as e:
-        topology.standalone.log.error('ERROR: Failed to search for user entry: ' + e.message['desc'])
+        topology_st.standalone.log.error('ERROR: Failed to search for user entry: ' + e.message['desc'])
         assert False
 
     if isdefault:
-        topology.standalone.log.info("	Checking %d vs %d\n" % (int(searchedsize), defaultvalue))
+        topology_st.standalone.log.info("	Checking %d vs %d\n" % (int(searchedsize), defaultvalue))
         assert int(searchedsize) == defaultvalue
 
 
-def test_ticket48214_run(topology):
+def test_ticket48214_run(topology_st):
     """
     Check ldapsearch returns the correct maxbersize when it is not explicitly set.
     """
     log.info('Testing Ticket 48214 - ldapsearch on nsslapd-maxbersize returns 0 instead of current value')
 
     # bind as directory manager
-    topology.standalone.log.info("Bind as %s" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.log.info("Bind as %s" % DN_DM)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
-    topology.standalone.log.info("\n\n######################### Out of Box ######################\n")
-    checkMaxBerSize(topology)
+    topology_st.standalone.log.info("\n\n######################### Out of Box ######################\n")
+    checkMaxBerSize(topology_st)
 
-    topology.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 0 ######################\n")
-    topology.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '0')])
-    checkMaxBerSize(topology)
+    topology_st.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 0 ######################\n")
+    topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '0')])
+    checkMaxBerSize(topology_st)
 
-    topology.standalone.log.info("\n\n######################### Add nsslapd-maxbersize: 10000 ######################\n")
-    topology.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '10000')])
-    checkMaxBerSize(topology)
+    topology_st.standalone.log.info(
+        "\n\n######################### Add nsslapd-maxbersize: 10000 ######################\n")
+    topology_st.standalone.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '10000')])
+    checkMaxBerSize(topology_st)
 
-    topology.standalone.log.info("ticket48214 was successfully verified.")
+    topology_st.standalone.log.info("ticket48214 was successfully verified.")
 
 
 if __name__ == '__main__':

+ 28 - 142
dirsrvtests/tests/tickets/ticket48226_test.py

@@ -6,163 +6,50 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
-
-class TopologyReplication(object):
-    def __init__(self, master1, master2):
-        master1.open()
-        self.master1 = master1
-        master2.open()
-        self.master2 = master2
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    os.environ['USE_VALGRIND'] = '1'
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating master 1...
-    master1 = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_master = args_instance.copy()
-    master1.allocate(args_master)
-    instance_master1 = master1.exists()
-    if instance_master1:
-        master1.delete()
-    master1.create()
-    master1.open()
-    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
-    # Creating master 2...
-    master2 = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-    args_instance[SER_HOST] = HOST_MASTER_2
-    args_instance[SER_PORT] = PORT_MASTER_2
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_master = args_instance.copy()
-    master2.allocate(args_master)
-    instance_master2 = master2.exists()
-    if instance_master2:
-        master2.delete()
-    master2.create()
-    master2.open()
-    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
-    #
-    # Create all the agreements
-    #
-    # Creating agreement from master 1 to master 2
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-    if not m1_m2_agmt:
-        log.fatal("Fail to create a master -> master replica agreement")
-        sys.exit(1)
-    log.debug("%s created" % m1_m2_agmt)
-
-    # Creating agreement from master 2 to master 1
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-    if not m2_m1_agmt:
-        log.fatal("Fail to create a master -> master replica agreement")
-        sys.exit(1)
-    log.debug("%s created" % m2_m1_agmt)
-
-    # Allow the replicas to get situated with the new agreements...
-    time.sleep(5)
-
-    #
-    # Initialize all the agreements
-    #
-    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    master1.waitForReplInit(m1_m2_agmt)
-
-    def fin():
-        master1.delete()
-        master2.delete()
-        sbin_dir = master2.get_sbin_dir()
-        if not master2.has_asan():
-            valgrind_disable(sbin_dir)
-    request.addfinalizer(fin)
-
-    # Check replication is working...
-    if master1.testReplication(DEFAULT_SUFFIX, master2):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    return TopologyReplication(master1, master2)
 
-
-def test_ticket48226_set_purgedelay(topology):
+def test_ticket48226_set_purgedelay(topology_m2):
     args = {REPLICA_PURGE_DELAY: '5',
             REPLICA_PURGE_INTERVAL: '5'}
     try:
-        topology.master1.replica.setProperties(DEFAULT_SUFFIX, None, None, args)
+        topology_m2.ms["master1"].replica.setProperties(DEFAULT_SUFFIX, None, None, args)
     except:
         log.fatal('Failed to configure replica')
         assert False
     try:
-        topology.master2.replica.setProperties(DEFAULT_SUFFIX, None, None, args)
+        topology_m2.ms["master2"].replica.setProperties(DEFAULT_SUFFIX, None, None, args)
     except:
         log.fatal('Failed to configure replica')
         assert False
-    topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')])
-    topology.master2.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')])
-    topology.master1.restart(30)
-    topology.master2.restart(30)
+    topology_m2.ms["master1"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')])
+    topology_m2.ms["master2"].modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-auditlog-logging-enabled', 'on')])
+    topology_m2.ms["master1"].restart(30)
+    topology_m2.ms["master2"].restart(30)
 
 
-def test_ticket48226_1(topology):
+def test_ticket48226_1(topology_m2):
     name = 'test_entry'
     dn = "cn=%s,%s" % (name, SUFFIX)
 
-    topology.master1.add_s(Entry((dn, {'objectclass': "top person".split(),
-                                        'sn': name,
-                                        'cn': name})))
+    topology_m2.ms["master1"].add_s(Entry((dn, {'objectclass': "top person".split(),
+                                                'sn': name,
+                                                'cn': name})))
 
     # First do an update that is replicated
     mods = [(ldap.MOD_ADD, 'description', '5')]
-    topology.master1.modify_s(dn, mods)
+    topology_m2.ms["master1"].modify_s(dn, mods)
 
     nbtry = 0
     while (nbtry <= 10):
         try:
-            ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
+            ent = topology_m2.ms["master2"].getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)", ['description'])
             if ent.hasAttr('description') and ent.getValue('description') == '5':
                 break
         except ldap.NO_SUCH_OBJECT:
@@ -172,51 +59,51 @@ def test_ticket48226_1(topology):
     assert nbtry <= 10
 
     # Stop M2 so that it will not receive the next update
-    topology.master2.stop(10)
+    topology_m2.ms["master2"].stop(10)
 
     # ADD a new value that is not replicated
     mods = [(ldap.MOD_DELETE, 'description', '5')]
-    topology.master1.modify_s(dn, mods)
+    topology_m2.ms["master1"].modify_s(dn, mods)
 
     # Stop M1 so that it will keep del '5' that is unknown from master2
-    topology.master1.stop(10)
+    topology_m2.ms["master1"].stop(10)
 
     # Get the sbin directory so we know where to replace 'ns-slapd'
-    sbin_dir = topology.master2.get_sbin_dir()
+    sbin_dir = topology_m2.ms["master2"].get_sbin_dir()
 
     # Enable valgrind
-    if not topology.master2.has_asan():
+    if not topology_m2.ms["master2"].has_asan():
         valgrind_enable(sbin_dir)
 
     # start M2 to do the next updates
-    topology.master2.start()
+    topology_m2.ms["master2"].start()
 
     # ADD 'description' by '5'
     mods = [(ldap.MOD_DELETE, 'description', '5')]
-    topology.master2.modify_s(dn, mods)
+    topology_m2.ms["master2"].modify_s(dn, mods)
 
     # DEL 'description' by '5'
     mods = [(ldap.MOD_ADD, 'description', '5')]
-    topology.master2.modify_s(dn, mods)
+    topology_m2.ms["master2"].modify_s(dn, mods)
 
     # sleep of purge delay so that the next update will purge the CSN_7
     time.sleep(6)
 
     # ADD 'description' by '6' that purge the state info
     mods = [(ldap.MOD_ADD, 'description', '6')]
-    topology.master2.modify_s(dn, mods)
+    topology_m2.ms["master2"].modify_s(dn, mods)
 
     # Restart master1
-    #topology.master1.start(30)
+    # topology_m2.ms["master1"].start(30)
 
-    if not topology.master2.has_asan():
-        results_file = valgrind_get_results_file(topology.master2)
+    if not topology_m2.ms["master2"].has_asan():
+        results_file = valgrind_get_results_file(topology_m2.ms["master2"])
 
     # Stop master2
-    topology.master2.stop(30)
+    topology_m2.ms["master2"].stop(30)
 
     # Check for leak
-    if not topology.master2.has_asan():
+    if not topology_m2.ms["master2"].has_asan():
         if valgrind_check_file(results_file, VALGRIND_LEAK_STR, 'csnset_dup'):
             log.info('Valgrind reported leak in csnset_dup!')
             assert False
@@ -245,4 +132,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

+ 76 - 117
dirsrvtests/tests/tickets/ticket48228_test.py

@@ -6,17 +6,11 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -32,60 +26,19 @@ USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX
 USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def set_global_pwpolicy(topology, inhistory):
+def set_global_pwpolicy(topology_st, inhistory):
     log.info("	+++++ Enable global password policy +++++\n")
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
     # Enable password policy
     try:
-        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
+        topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-pwpolicy-local', 'on')])
     except ldap.LDAPError as e:
         log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
         assert False
 
     log.info("		Set global password history on\n")
     try:
-        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordHistory', 'on')])
+        topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordHistory', 'on')])
     except ldap.LDAPError as e:
         log.error('Failed to set passwordHistory: error ' + e.message['desc'])
         assert False
@@ -93,66 +46,68 @@ def set_global_pwpolicy(topology, inhistory):
     log.info("		Set global passwords in history\n")
     try:
         count = "%d" % inhistory
-        topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordInHistory', count)])
+        topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'passwordInHistory', count)])
     except ldap.LDAPError as e:
         log.error('Failed to set passwordInHistory: error ' + e.message['desc'])
         assert False
 
 
-def set_subtree_pwpolicy(topology):
+def set_subtree_pwpolicy(topology_st):
     log.info("	+++++ Enable subtree level password policy +++++\n")
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
     log.info("		Add the container")
     try:
-        topology.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(),
-                                                             'cn': 'nsPwPolicyContainer'})))
+        topology_st.standalone.add_s(Entry((SUBTREE_CONTAINER, {'objectclass': 'top nsContainer'.split(),
+                                                                'cn': 'nsPwPolicyContainer'})))
     except ldap.LDAPError as e:
         log.error('Failed to add subtree container: error ' + e.message['desc'])
         assert False
 
     log.info("		Add the password policy subentry {passwordHistory: on, passwordInHistory: 6}")
     try:
-        topology.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(),
-                                                       'cn': SUBTREE_PWPDN,
-                                                       'passwordMustChange': 'off',
-                                                       'passwordExp': 'off',
-                                                       'passwordHistory': 'on',
-                                                       'passwordInHistory': '6',
-                                                       'passwordMinAge': '0',
-                                                       'passwordChange': 'on',
-                                                       'passwordStorageScheme': 'clear'})))
+        topology_st.standalone.add_s(Entry((SUBTREE_PWP, {'objectclass': 'top ldapsubentry passwordpolicy'.split(),
+                                                          'cn': SUBTREE_PWPDN,
+                                                          'passwordMustChange': 'off',
+                                                          'passwordExp': 'off',
+                                                          'passwordHistory': 'on',
+                                                          'passwordInHistory': '6',
+                                                          'passwordMinAge': '0',
+                                                          'passwordChange': 'on',
+                                                          'passwordStorageScheme': 'clear'})))
     except ldap.LDAPError as e:
         log.error('Failed to add passwordpolicy: error ' + e.message['desc'])
         assert False
 
     log.info("		Add the COS template")
     try:
-        topology.standalone.add_s(Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
-                                                            'cn': SUBTREE_PWPDN,
-                                                            'cosPriority': '1',
-                                                            'cn': SUBTREE_COS_TMPLDN,
-                                                            'pwdpolicysubentry': SUBTREE_PWP})))
+        topology_st.standalone.add_s(
+            Entry((SUBTREE_COS_TMPL, {'objectclass': 'top ldapsubentry costemplate extensibleObject'.split(),
+                                      'cn': SUBTREE_PWPDN,
+                                      'cosPriority': '1',
+                                      'cn': SUBTREE_COS_TMPLDN,
+                                      'pwdpolicysubentry': SUBTREE_PWP})))
     except ldap.LDAPError as e:
         log.error('Failed to add COS template: error ' + e.message['desc'])
         assert False
 
     log.info("		Add the COS definition")
     try:
-        topology.standalone.add_s(Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
-                                                           'cn': SUBTREE_PWPDN,
-                                                           'costemplatedn': SUBTREE_COS_TMPL,
-                                                           'cosAttribute': 'pwdpolicysubentry default operational-default'})))
+        topology_st.standalone.add_s(
+            Entry((SUBTREE_COS_DEF, {'objectclass': 'top ldapsubentry cosSuperDefinition cosPointerDefinition'.split(),
+                                     'cn': SUBTREE_PWPDN,
+                                     'costemplatedn': SUBTREE_COS_TMPL,
+                                     'cosAttribute': 'pwdpolicysubentry default operational-default'})))
     except ldap.LDAPError as e:
         log.error('Failed to add COS def: error ' + e.message['desc'])
         assert False
 
 
-def check_passwd_inhistory(topology, user, cpw, passwd):
+def check_passwd_inhistory(topology_st, user, cpw, passwd):
     inhistory = 0
     log.info("		Bind as {%s,%s}" % (user, cpw))
-    topology.standalone.simple_bind_s(user, cpw)
+    topology_st.standalone.simple_bind_s(user, cpw)
     try:
-        topology.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
+        topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', passwd)])
     except ldap.LDAPError as e:
         log.info('		The password ' + passwd + ' of user' + USER1_DN + ' in history: error ' + e.message['desc'])
         inhistory = 1
@@ -160,114 +115,118 @@ def check_passwd_inhistory(topology, user, cpw, passwd):
     return inhistory
 
 
-def update_passwd(topology, user, passwd, times):
+def update_passwd(topology_st, user, passwd, times):
     cpw = passwd
     for i in range(times):
         log.info("		Bind as {%s,%s}" % (user, cpw))
-        topology.standalone.simple_bind_s(user, cpw)
+        topology_st.standalone.simple_bind_s(user, cpw)
         cpw = 'password%d' % i
         try:
-            topology.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', cpw)])
+            topology_st.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', cpw)])
         except ldap.LDAPError as e:
-            log.fatal('test_ticket48228: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message['desc'])
+            log.fatal(
+                'test_ticket48228: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message[
+                    'desc'])
             assert False
         time.sleep(1)
 
     # checking the first password, which is supposed to be in history
-    inhistory = check_passwd_inhistory(topology, user, cpw, passwd)
+    inhistory = check_passwd_inhistory(topology_st, user, cpw, passwd)
     assert inhistory == 1
 
 
-def test_ticket48228_test_global_policy(topology):
+def test_ticket48228_test_global_policy(topology_st):
     """
     Check global password policy
     """
 
     log.info('	Set inhistory = 6')
-    set_global_pwpolicy(topology, 6)
+    set_global_pwpolicy(topology_st, 6)
 
     log.info('	Bind as directory manager')
     log.info("Bind as %s" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
     log.info('	Add an entry' + USER1_DN)
     try:
-        topology.standalone.add_s(Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                     'sn': '1',
-                                     'cn': 'user 1',
-                                     'uid': 'user1',
-                                     'givenname': 'user',
-                                     'mail': '[email protected]',
-                                     'userpassword': 'password'})))
+        topology_st.standalone.add_s(
+            Entry((USER1_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                              'sn': '1',
+                              'cn': 'user 1',
+                              'uid': 'user1',
+                              'givenname': 'user',
+                              'mail': '[email protected]',
+                              'userpassword': 'password'})))
     except ldap.LDAPError as e:
         log.fatal('test_ticket48228: Failed to add user' + USER1_DN + ': error ' + e.message['desc'])
         assert False
 
     log.info('	Update the password of ' + USER1_DN + ' 6 times')
-    update_passwd(topology, USER1_DN, 'password', 6)
+    update_passwd(topology_st, USER1_DN, 'password', 6)
 
     log.info('	Set inhistory = 4')
-    set_global_pwpolicy(topology, 4)
+    set_global_pwpolicy(topology_st, 4)
 
     log.info('	checking the first password, which is supposed NOT to be in history any more')
     cpw = 'password%d' % 5
     tpw = 'password'
-    inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw)
+    inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw)
     assert inhistory == 0
 
     log.info('	checking the second password, which is supposed NOT to be in history any more')
     cpw = tpw
     tpw = 'password%d' % 0
-    inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw)
+    inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw)
     assert inhistory == 0
 
     log.info('	checking the third password, which is supposed NOT to be in history any more')
     cpw = tpw
     tpw = 'password%d' % 1
-    inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw)
+    inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw)
     assert inhistory == 0
 
     log.info('	checking the sixth password, which is supposed to be in history')
     cpw = tpw
     tpw = 'password%d' % 5
-    inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw)
+    inhistory = check_passwd_inhistory(topology_st, USER1_DN, cpw, tpw)
     assert inhistory == 1
 
     log.info("Global policy was successfully verified.")
 
 
-def test_ticket48228_test_subtree_policy(topology):
+def test_ticket48228_test_subtree_policy(topology_st):
     """
     Check subtree level password policy
     """
 
     log.info('	Set inhistory = 6')
-    set_subtree_pwpolicy(topology)
+    set_subtree_pwpolicy(topology_st)
 
     log.info('	Bind as directory manager')
     log.info("Bind as %s" % DN_DM)
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
 
     log.info('	Add an entry' + USER2_DN)
     try:
-        topology.standalone.add_s(Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                     'sn': '2',
-                                     'cn': 'user 2',
-                                     'uid': 'user2',
-                                     'givenname': 'user',
-                                     'mail': '[email protected]',
-                                     'userpassword': 'password'})))
+        topology_st.standalone.add_s(
+            Entry((USER2_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+                              'sn': '2',
+                              'cn': 'user 2',
+                              'uid': 'user2',
+                              'givenname': 'user',
+                              'mail': '[email protected]',
+                              'userpassword': 'password'})))
     except ldap.LDAPError as e:
         log.fatal('test_ticket48228: Failed to add user' + USER2_DN + ': error ' + e.message['desc'])
         assert False
 
     log.info('	Update the password of ' + USER2_DN + ' 6 times')
-    update_passwd(topology, USER2_DN, 'password', 6)
+    update_passwd(topology_st, USER2_DN, 'password', 6)
 
     log.info('	Set inhistory = 4')
-    topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
     try:
-        topology.standalone.modify_s(SUBTREE_PWP, [(ldap.MOD_REPLACE, 'passwordInHistory', '4')])
+        topology_st.standalone.modify_s(SUBTREE_PWP, [(ldap.MOD_REPLACE, 'passwordInHistory', '4')])
     except ldap.LDAPError as e:
         log.error('Failed to set pwpolicy-local: error ' + e.message['desc'])
         assert False
@@ -275,25 +234,25 @@ def test_ticket48228_test_subtree_policy(topology):
     log.info('	checking the first password, which is supposed NOT to be in history any more')
     cpw = 'password%d' % 5
     tpw = 'password'
-    inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw)
+    inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw)
     assert inhistory == 0
 
     log.info('	checking the second password, which is supposed NOT to be in history any more')
     cpw = tpw
     tpw = 'password%d' % 1
-    inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw)
+    inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw)
     assert inhistory == 0
 
     log.info('	checking the third password, which is supposed NOT to be in history any more')
     cpw = tpw
     tpw = 'password%d' % 2
-    inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw)
+    inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw)
     assert inhistory == 0
 
     log.info('	checking the six password, which is supposed to be in history')
     cpw = tpw
     tpw = 'password%d' % 5
-    inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw)
+    inhistory = check_passwd_inhistory(topology_st, USER2_DN, cpw, tpw)
     assert inhistory == 1
 
     log.info("Subtree level policy was successfully verified.")

+ 8 - 56
dirsrvtests/tests/tickets/ticket48233_test.py

@@ -1,60 +1,12 @@
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
 
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Delete each instance in the end
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Clear out the tmp dir
-    standalone.clearTmpDir(__file__)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket48233(topology):
+def test_ticket48233(topology_st):
     """Test that ACI's that use IP restrictions do not crash the server at
        shutdown
     """
@@ -65,7 +17,7 @@ def test_ticket48233(topology):
                 '(userdn = "ldap:///anyone") and (ip="127.0.0.1");)')
 
     try:
-        topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)])
+        topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)])
     except ldap.LDAPError as e:
         log.error('Failed to add aci: (%s) error %s' % (aci_text, e.message['desc']))
         assert False
@@ -73,13 +25,13 @@ def test_ticket48233(topology):
 
     # Anonymous search to engage the aci
     try:
-        topology.standalone.simple_bind_s("", "")
+        topology_st.standalone.simple_bind_s("", "")
     except ldap.LDAPError as e:
         log.error('Failed to anonymously bind -error %s' % (e.message['desc']))
         assert False
 
     try:
-        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*')
+        entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*')
         if not entries:
             log.fatal('Failed return an entries from search')
             assert False
@@ -88,10 +40,10 @@ def test_ticket48233(topology):
         assert False
 
     # Restart the server
-    topology.standalone.restart(timeout=10)
+    topology_st.standalone.restart(timeout=10)
 
     # Check for crash
-    if topology.standalone.detectDisorderlyShutdown():
+    if topology_st.standalone.detectDisorderlyShutdown():
         log.fatal('Server crashed!')
         assert False
 
@@ -102,4 +54,4 @@ if __name__ == '__main__':
     # Run isolated
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
-    pytest.main("-s %s" % CURRENT_FILE)
+    pytest.main("-s %s" % CURRENT_FILE)

+ 15 - 59
dirsrvtests/tests/tickets/ticket48234_test.py

@@ -1,63 +1,18 @@
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Delete each instance in the end
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Clear out the tmp dir
-    standalone.clearTmpDir(__file__)
-
-    return TopologyStandalone(standalone)
 
 def add_ou_entry(server, name, myparent):
     dn = 'ou=%s,%s' % (name, myparent)
     server.add_s(Entry((dn, {'objectclass': ['top', 'organizationalunit'],
                              'ou': name})))
 
+
 def add_user_entry(server, name, pw, myparent):
     dn = 'cn=%s,%s' % (name, myparent)
     server.add_s(Entry((dn, {'objectclass': ['top', 'person'],
@@ -66,7 +21,8 @@ def add_user_entry(server, name, pw, myparent):
                              'telephonenumber': '+1 222 333-4444',
                              'userpassword': pw})))
 
-def test_ticket48234(topology):
+
+def test_ticket48234(topology_st):
     """
     Test aci which contains an extensible filter.
        shutdown
@@ -74,9 +30,9 @@ def test_ticket48234(topology):
 
     log.info('Bind as root DN')
     try:
-        topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+        topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
+        topology_st.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc'])
         assert False
 
     ouname = 'outest'
@@ -86,11 +42,11 @@ def test_ticket48234(topology):
     log.info('Add aci which contains extensible filter.')
     aci_text = ('(targetattr = "%s")' % (deniedattr) +
                 '(target = "ldap:///%s")' % (DEFAULT_SUFFIX) +
-                '(version 3.0;acl "admin-tel-matching-rule-outest";deny (all)' + 
+                '(version 3.0;acl "admin-tel-matching-rule-outest";deny (all)' +
                 '(userdn = "ldap:///%s??sub?(&(cn=%s)(ou:dn:=%s))");)' % (DEFAULT_SUFFIX, username, ouname))
 
     try:
-        topology.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)])
+        topology_st.standalone.modify_s(DEFAULT_SUFFIX, [(ldap.MOD_ADD, 'aci', aci_text)])
     except ldap.LDAPError as e:
         log.error('Failed to add aci: (%s) error %s' % (aci_text, e.message['desc']))
         assert False
@@ -99,34 +55,34 @@ def test_ticket48234(topology):
     for idx in range(0, 2):
         ou0 = 'OU%d' % idx
         log.info('adding %s under %s...' % (ou0, DEFAULT_SUFFIX))
-        add_ou_entry(topology.standalone, ou0, DEFAULT_SUFFIX)
+        add_ou_entry(topology_st.standalone, ou0, DEFAULT_SUFFIX)
         parent = 'ou=%s,%s' % (ou0, DEFAULT_SUFFIX)
         log.info('adding %s under %s...' % (ouname, parent))
-        add_ou_entry(topology.standalone, ouname, parent)
+        add_ou_entry(topology_st.standalone, ouname, parent)
 
     for idx in range(0, 2):
         parent = 'ou=%s,ou=OU%d,%s' % (ouname, idx, DEFAULT_SUFFIX)
         log.info('adding %s under %s...' % (username, parent))
-        add_user_entry(topology.standalone, username, passwd, parent)
+        add_user_entry(topology_st.standalone, username, passwd, parent)
 
     binddn = 'cn=%s,%s' % (username, parent)
     log.info('Bind as user %s' % binddn)
     try:
-        topology.standalone.simple_bind_s(binddn, passwd)
+        topology_st.standalone.simple_bind_s(binddn, passwd)
     except ldap.LDAPError as e:
-        topology.standalone.log.error(bindn + ' failed to authenticate: ' + e.message['desc'])
+        topology_st.standalone.log.error(bindn + ' failed to authenticate: ' + e.message['desc'])
         assert False
 
     filter = '(cn=%s)' % username
     try:
-        entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filter, [deniedattr, 'dn'])
+        entries = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, filter, [deniedattr, 'dn'])
         assert 2 == len(entries)
         for idx in range(0, 1):
             if entries[idx].hasAttr(deniedattr):
                 log.fatal('aci with extensible filter failed -- %s')
                 assert False
     except ldap.LDAPError as e:
-        topology.standalone.log.error('Search (%s, %s) failed: ' % (DEFAULT_SUFFIX, filter) + e.message['desc'])
+        topology_st.standalone.log.error('Search (%s, %s) failed: ' % (DEFAULT_SUFFIX, filter) + e.message['desc'])
         assert False
 
     log.info('Test complete')

+ 22 - 69
dirsrvtests/tests/tickets/ticket48252_test.py

@@ -6,17 +6,11 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import os
-import sys
-import time
-import ldap
 import logging
+
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
+from lib389.topologies import topology_st
 
 log = logging.getLogger(__name__)
 
@@ -25,48 +19,7 @@ USER_NUM = 10
 TEST_USER = "test_user"
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    '''
-        This fixture is used to standalone topology for the 'module'.
-    '''
-    standalone = DirSrv(verbose=False)
-
-    # Args for the standalone instance
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-
-    # Get the status of the instance and restart it if it exists
-    instance_standalone = standalone.exists()
-
-    # Remove the instance
-    if instance_standalone:
-        standalone.delete()
-
-    # Create the instance
-    standalone.create()
-
-    # Used to retrieve configuration information (dbdir, confdir...)
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    # Here we have standalone instance up and running
-    return TopologyStandalone(standalone)
-
-
-def test_ticket48252_setup(topology):
+def test_ticket48252_setup(topology_st):
     """
     Enable USN plug-in for enabling tombstones
     Add test entries
@@ -74,7 +27,7 @@ def test_ticket48252_setup(topology):
 
     log.info("Enable the USN plugin...")
     try:
-        topology.standalone.plugins.enable(name=PLUGIN_USN)
+        topology_st.standalone.plugins.enable(name=PLUGIN_USN)
     except e:
         log.error("Failed to enable USN Plugin: error " + e.message['desc'])
         assert False
@@ -82,28 +35,28 @@ def test_ticket48252_setup(topology):
     log.info("Adding test entries...")
     for id in range(USER_NUM):
         name = "%s%d" % (TEST_USER, id)
-        topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
-                                         'objectclass': "top person".split(),
-                                         'sn': name,
-                                         'cn': name})))
+        topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+            'objectclass': "top person".split(),
+            'sn': name,
+            'cn': name})))
 
 
-def in_index_file(topology, id, index):
+def in_index_file(topology_st, id, index):
     key = "%s%s" % (TEST_USER, id)
     log.info("	dbscan - checking %s is in index file %s..." % (key, index))
-    dbscanOut = topology.standalone.dbscan(DEFAULT_BENAME, index)
+    dbscanOut = topology_st.standalone.dbscan(DEFAULT_BENAME, index)
 
     if key in dbscanOut:
         found = True
-        topology.standalone.log.info("Found key %s in dbscan output" % key)
+        topology_st.standalone.log.info("Found key %s in dbscan output" % key)
     else:
         found = False
-        topology.standalone.log.info("Did not found key %s in dbscan output" % key)
+        topology_st.standalone.log.info("Did not found key %s in dbscan output" % key)
 
     return found
 
 
-def test_ticket48252_run_0(topology):
+def test_ticket48252_run_0(topology_st):
     """
     Delete an entry cn=test_entry0
     Check it is not in the 'cn' index file
@@ -112,19 +65,19 @@ def test_ticket48252_run_0(topology):
     del_rdn = "cn=%s0" % TEST_USER
     del_entry = "%s,%s" % (del_rdn, SUFFIX)
     log.info("	Deleting a test entry %s..." % del_entry)
-    topology.standalone.delete_s(del_entry)
+    topology_st.standalone.delete_s(del_entry)
 
-    assert in_index_file(topology, 0, 'cn') == False
+    assert in_index_file(topology_st, 0, 'cn') == False
 
     log.info("	db2index - reindexing %s ..." % 'cn')
-    assert topology.standalone.db2index(DEFAULT_BENAME, 'cn')
+    assert topology_st.standalone.db2index(DEFAULT_BENAME, 'cn')
 
-    assert in_index_file(topology, 0, 'cn') == False
+    assert in_index_file(topology_st, 0, 'cn') == False
     log.info("	entry %s is not in the cn index file after reindexed." % del_entry)
     log.info('Case 1 - PASSED')
 
 
-def test_ticket48252_run_1(topology):
+def test_ticket48252_run_1(topology_st):
     """
     Delete an entry cn=test_entry1
     Check it is in the 'objectclass' index file as a tombstone entry
@@ -133,16 +86,16 @@ def test_ticket48252_run_1(topology):
     del_rdn = "cn=%s1" % TEST_USER
     del_entry = "%s,%s" % (del_rdn, SUFFIX)
     log.info("	Deleting a test entry %s..." % del_entry)
-    topology.standalone.delete_s(del_entry)
+    topology_st.standalone.delete_s(del_entry)
 
-    entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn)
+    entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn)
     assert len(entry) == 1
     log.info("	entry %s is in the objectclass index file." % del_entry)
 
     log.info("	db2index - reindexing %s ..." % 'objectclass')
-    assert topology.standalone.db2index(DEFAULT_BENAME, 'objectclass')
+    assert topology_st.standalone.db2index(DEFAULT_BENAME, 'objectclass')
 
-    entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn)
+    entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, '(&(objectclass=nstombstone)(%s))' % del_rdn)
     assert len(entry) == 1
     log.info("	entry %s is in the objectclass index file after reindexed." % del_entry)
     log.info('Case 2 - PASSED')

+ 19 - 57
dirsrvtests/tests/tickets/ticket48265_test.py

@@ -6,59 +6,19 @@
 # See LICENSE for details.
 # --- END COPYRIGHT BLOCK ---
 #
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
 USER_NUM = 20
 TEST_USER = 'test_user'
 
 
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
-
-
-def test_ticket48265_test(topology):
+def test_ticket48265_test(topology_st):
     """
     Complex filter issues
     Ticket 47521 type complex filter:
@@ -72,32 +32,34 @@ def test_ticket48265_test(topology):
         name = "%s%d" % (TEST_USER, id)
         mail = "%[email protected]" % name
         secretary = "cn=%s,ou=secretary,%s" % (name, SUFFIX)
-        topology.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
-                                         'objectclass': "top person organizationalPerson inetOrgPerson".split(),
-                                         'sn': name,
-                                         'cn': name,
-                                         'uid': name,
-                                         'givenname': 'test',
-                                         'mail': mail,
-                                         'description': 'description',
-                                         'secretary': secretary,
-                                         'l': 'MV',
-                                         'title': 'Engineer'})))
+        topology_st.standalone.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+            'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+            'sn': name,
+            'cn': name,
+            'uid': name,
+            'givenname': 'test',
+            'mail': mail,
+            'description': 'description',
+            'secretary': secretary,
+            'l': 'MV',
+            'title': 'Engineer'})))
 
     log.info("Search with Ticket 47521 type complex filter")
     for id in range(USER_NUM):
         name = "%s%d" % (TEST_USER, id)
         mail = "%[email protected]" % name
-        filter47521 = '(&(|(uid=%s*)(cn=%s*))(&(givenname=test))(mail=%s)(&(description=*)))' % (TEST_USER, TEST_USER, mail)
-        entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter47521)
+        filter47521 = '(&(|(uid=%s*)(cn=%s*))(&(givenname=test))(mail=%s)(&(description=*)))' % (
+        TEST_USER, TEST_USER, mail)
+        entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter47521)
         assert len(entry) == 1
 
     log.info("Search with Ticket 48265 type complex filter")
     for id in range(USER_NUM):
         name = "%s%d" % (TEST_USER, id)
         mail = "%[email protected]" % name
-        filter48265 = '(&(&(|(l=AA)(l=BB)(l=MV))(|(title=admin)(title=engineer)))(|(uid=%s)(mail=%s))(description=description))' % (name, mail)
-        entry = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter48265)
+        filter48265 = '(&(&(|(l=AA)(l=BB)(l=MV))(|(title=admin)(title=engineer)))(|(uid=%s)(mail=%s))(description=description))' % (
+        name, mail)
+        entry = topology_st.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, filter48265)
         assert len(entry) == 1
 
     log.info('Test 48265 complete\n')

+ 68 - 179
dirsrvtests/tests/tickets/ticket48266_test.py

@@ -1,134 +1,21 @@
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_m2
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
-
 NEW_ACCOUNT = "new_account"
 MAX_ACCOUNTS = 20
 
 
-class TopologyReplication(object):
-    def __init__(self, master1, master2):
-        master1.open()
-        self.master1 = master1
-        master2.open()
-        self.master2 = master2
-
-
[email protected](scope="module")
-def topology(request):
-    global installation1_prefix
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
-    # Creating master 1...
-    master1 = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-    args_instance[SER_HOST] = HOST_MASTER_1
-    args_instance[SER_PORT] = PORT_MASTER_1
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_master = args_instance.copy()
-    master1.allocate(args_master)
-    instance_master1 = master1.exists()
-    if instance_master1:
-        master1.delete()
-    master1.create()
-    master1.open()
-    master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
-    # Creating master 2...
-    master2 = DirSrv(verbose=False)
-    if installation1_prefix:
-        args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-    args_instance[SER_HOST] = HOST_MASTER_2
-    args_instance[SER_PORT] = PORT_MASTER_2
-    args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_master = args_instance.copy()
-    master2.allocate(args_master)
-    instance_master2 = master2.exists()
-    if instance_master2:
-        master2.delete()
-    master2.create()
-    master2.open()
-    master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
-    #
-    # Create all the agreements
-    #
-    # Creating agreement from master 1 to master 2
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
-    if not m1_m2_agmt:
-        log.fatal("Fail to create a master -> master replica agreement")
-        sys.exit(1)
-    log.debug("%s created" % m1_m2_agmt)
-
-    # Creating agreement from master 2 to master 1
-    properties = {RA_NAME:      r'meTo_$host:$port',
-                  RA_BINDDN:    defaultProperties[REPLICATION_BIND_DN],
-                  RA_BINDPW:    defaultProperties[REPLICATION_BIND_PW],
-                  RA_METHOD:    defaultProperties[REPLICATION_BIND_METHOD],
-                  RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
-    m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
-    if not m2_m1_agmt:
-        log.fatal("Fail to create a master -> master replica agreement")
-        sys.exit(1)
-    log.debug("%s created" % m2_m1_agmt)
-
-    # Allow the replicas to get situated with the new agreements...
-    time.sleep(5)
-
-    #
-    # Initialize all the agreements
-    #
-    master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    master1.waitForReplInit(m1_m2_agmt)
-
-    # Check replication is working...
-    if master1.testReplication(DEFAULT_SUFFIX, master2):
-        log.info('Replication is working.')
-    else:
-        log.fatal('Replication is not working.')
-        assert False
-
-    # Delete each instance in the end
-    def fin():
-        master1.delete()
-        master2.delete()
-    request.addfinalizer(fin)
-
-    # Clear out the tmp dir
-    master1.clearTmpDir(__file__)
-
-    return TopologyReplication(master1, master2)
-
-
 def pattern_accesslog(file, log_pattern):
     try:
         pattern_accesslog.last_pos += 1
     except AttributeError:
         pattern_accesslog.last_pos = 0
 
-
     found = None
     file.seek(pattern_accesslog.last_pos)
 
@@ -148,51 +35,51 @@ def pattern_accesslog(file, log_pattern):
 
 
 @pytest.fixture(scope="module")
-def entries(topology):
+def entries(topology_m2):
     # add dummy entries in the staging DIT
     for cpt in range(MAX_ACCOUNTS):
         name = "%s%d" % (NEW_ACCOUNT, cpt)
-        topology.master1.add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
-                                            'objectclass': "top person".split(),
-                                            'sn': name,
-                                            'cn': name})))
+        topology_m2.ms["master1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
+            'objectclass': "top person".split(),
+            'sn': name,
+            'cn': name})))
     mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192'),
-           (ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(256+4))]
-    topology.master1.modify_s(DN_CONFIG, mod)
-    topology.master2.modify_s(DN_CONFIG, mod)
+           (ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(256 + 4))]
+    topology_m2.ms["master1"].modify_s(DN_CONFIG, mod)
+    topology_m2.ms["master2"].modify_s(DN_CONFIG, mod)
 
 
-def test_ticket48266_fractional(topology, entries):
-    ents = topology.master1.agreement.list(suffix=SUFFIX)
+def test_ticket48266_fractional(topology_m2, entries):
+    ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
 
     mod = [(ldap.MOD_REPLACE, 'nsDS5ReplicatedAttributeList', ['(objectclass=*) $ EXCLUDE telephonenumber']),
            (ldap.MOD_REPLACE, 'nsds5ReplicaStripAttrs', ['modifiersname modifytimestamp'])]
-    ents = topology.master1.agreement.list(suffix=SUFFIX)
+    ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
     m1_m2_agmt = ents[0].dn
-    topology.master1.modify_s(ents[0].dn, mod)
+    topology_m2.ms["master1"].modify_s(ents[0].dn, mod)
 
-    ents = topology.master2.agreement.list(suffix=SUFFIX)
+    ents = topology_m2.ms["master2"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
-    topology.master2.modify_s(ents[0].dn, mod)
+    topology_m2.ms["master2"].modify_s(ents[0].dn, mod)
 
-    topology.master1.restart(timeout=10)
-    topology.master2.restart(timeout=10)
+    topology_m2.ms["master1"].restart(timeout=10)
+    topology_m2.ms["master2"].restart(timeout=10)
 
-    topology.master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
-    topology.master1.waitForReplInit(m1_m2_agmt)
+    topology_m2.ms["master1"].agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+    topology_m2.ms["master1"].waitForReplInit(m1_m2_agmt)
 
 
-def test_ticket48266_check_repl_desc(topology, entries):
+def test_ticket48266_check_repl_desc(topology_m2, entries):
     name = "cn=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
     value = 'check repl. description'
     mod = [(ldap.MOD_REPLACE, 'description', value)]
-    topology.master1.modify_s(name, mod)
+    topology_m2.ms["master1"].modify_s(name, mod)
 
     loop = 0
     while loop <= 10:
-        ent = topology.master2.getEntry(name, ldap.SCOPE_BASE, "(objectclass=*)")
+        ent = topology_m2.ms["master2"].getEntry(name, ldap.SCOPE_BASE, "(objectclass=*)")
         if ent.hasAttr('description') and ent.getValue('description') == value:
             break
         time.sleep(1)
@@ -203,17 +90,17 @@ def test_ticket48266_check_repl_desc(topology, entries):
 # will use this CSN as a starting point on error log
 # after this is one 'Skipped' then the first csn _get_first_not_replicated_csn
 # should no longer be Skipped in the error log
-def _get_last_not_replicated_csn(topology):
+def _get_last_not_replicated_csn(topology_m2):
     name = "cn=%s5,%s" % (NEW_ACCOUNT, SUFFIX)
 
     # read the first CSN that will not be replicated
     mod = [(ldap.MOD_REPLACE, 'telephonenumber', str(123456))]
-    topology.master1.modify_s(name, mod)
-    msgid = topology.master1.search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
-    rtype, rdata, rmsgid = topology.master1.result2(msgid)
+    topology_m2.ms["master1"].modify_s(name, mod)
+    msgid = topology_m2.ms["master1"].search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+    rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid)
     attrs = None
     for dn, raw_attrs in rdata:
-        topology.master1.log.info("dn: %s" % dn)
+        topology_m2.ms["master1"].log.info("dn: %s" % dn)
         if 'nscpentrywsi' in raw_attrs:
             attrs = raw_attrs['nscpentrywsi']
     assert attrs
@@ -224,8 +111,8 @@ def _get_last_not_replicated_csn(topology):
 
     # now retrieve the CSN of the operation we are looking for
     csn = None
-    topology.master1.stop(timeout=10)
-    file_obj = open(topology.master1.accesslog, "r")
+    topology_m2.ms["master1"].stop(timeout=10)
+    file_obj = open(topology_m2.ms["master1"].accesslog, "r")
 
     # First the conn/op of the operation
     regex = re.compile("MOD dn=\"%s\"" % name)
@@ -233,8 +120,8 @@ def _get_last_not_replicated_csn(topology):
     assert found_op
     if found_op:
         conn_op_pattern = '.* (conn=[0-9]* op=[0-9]*) .*'
-        conn_op_re= re.compile(conn_op_pattern)
-        conn_op_match  = conn_op_re.match(found_op)
+        conn_op_re = re.compile(conn_op_pattern)
+        conn_op_match = conn_op_re.match(found_op)
         conn_op = conn_op_match.group(1)
 
         # now the related CSN
@@ -245,21 +132,21 @@ def _get_last_not_replicated_csn(topology):
         csn_match = csn_re.match(found_result)
         csn = csn_match.group(1)
 
-    topology.master1.start(timeout=10)
+    topology_m2.ms["master1"].start(timeout=10)
     return csn
 
 
-def _get_first_not_replicated_csn(topology):
+def _get_first_not_replicated_csn(topology_m2):
     name = "cn=%s2,%s" % (NEW_ACCOUNT, SUFFIX)
 
     # read the first CSN that will not be replicated
     mod = [(ldap.MOD_REPLACE, 'telephonenumber', str(123456))]
-    topology.master1.modify_s(name, mod)
-    msgid = topology.master1.search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
-    rtype, rdata, rmsgid = topology.master1.result2(msgid)
+    topology_m2.ms["master1"].modify_s(name, mod)
+    msgid = topology_m2.ms["master1"].search_ext(name, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi'])
+    rtype, rdata, rmsgid = topology_m2.ms["master1"].result2(msgid)
     attrs = None
     for dn, raw_attrs in rdata:
-        topology.master1.log.info("dn: %s" % dn)
+        topology_m2.ms["master1"].log.info("dn: %s" % dn)
         if 'nscpentrywsi' in raw_attrs:
             attrs = raw_attrs['nscpentrywsi']
     assert attrs
@@ -270,8 +157,8 @@ def _get_first_not_replicated_csn(topology):
 
     # now retrieve the CSN of the operation we are looking for
     csn = None
-    topology.master1.stop(timeout=10)
-    file_obj = open(topology.master1.accesslog, "r")
+    topology_m2.ms["master1"].stop(timeout=10)
+    file_obj = open(topology_m2.ms["master1"].accesslog, "r")
 
     # First the conn/op of the operation
     regex = re.compile("MOD dn=\"%s\"" % name)
@@ -279,8 +166,8 @@ def _get_first_not_replicated_csn(topology):
     assert found_op
     if found_op:
         conn_op_pattern = '.* (conn=[0-9]* op=[0-9]*) .*'
-        conn_op_re= re.compile(conn_op_pattern)
-        conn_op_match  = conn_op_re.match(found_op)
+        conn_op_re = re.compile(conn_op_pattern)
+        conn_op_match = conn_op_re.match(found_op)
         conn_op = conn_op_match.group(1)
 
         # now the related CSN
@@ -291,15 +178,15 @@ def _get_first_not_replicated_csn(topology):
         csn_match = csn_re.match(found_result)
         csn = csn_match.group(1)
 
-    topology.master1.start(timeout=10)
+    topology_m2.ms["master1"].start(timeout=10)
     return csn
 
 
-def _count_full_session(topology):
+def _count_full_session(topology_m2):
     #
     # compute the number of 'No more updates'
     #
-    file_obj = open(topology.master1.errlog, "r")
+    file_obj = open(topology_m2.ms["master1"].errlog, "r")
     # pattern to find
     pattern = ".*No more updates to send.*"
     regex = re.compile(pattern)
@@ -318,60 +205,61 @@ def _count_full_session(topology):
     return no_more_updates
 
 
-def test_ticket48266_count_csn_evaluation(topology, entries):
-    ents = topology.master1.agreement.list(suffix=SUFFIX)
+def test_ticket48266_count_csn_evaluation(topology_m2, entries):
+    ents = topology_m2.ms["master1"].agreement.list(suffix=SUFFIX)
     assert len(ents) == 1
-    first_csn = _get_first_not_replicated_csn(topology)
+    first_csn = _get_first_not_replicated_csn(topology_m2)
     name = "cn=%s3,%s" % (NEW_ACCOUNT, SUFFIX)
     NB_SESSION = 102
 
-    no_more_update_cnt = _count_full_session(topology)
-    topology.master1.agreement.pause(ents[0].dn)
+    no_more_update_cnt = _count_full_session(topology_m2)
+    topology_m2.ms["master1"].agreement.pause(ents[0].dn)
     # now do a set of updates that will NOT be replicated
     for telNumber in range(NB_SESSION):
         mod = [(ldap.MOD_REPLACE, 'telephonenumber', str(telNumber))]
-        topology.master1.modify_s(name, mod)
+        topology_m2.ms["master1"].modify_s(name, mod)
 
-    topology.master1.agreement.resume(ents[0].dn)
+    topology_m2.ms["master1"].agreement.resume(ents[0].dn)
 
     # let's wait all replication session complete
     MAX_LOOP = 10
     cnt = 0
-    current_no_more_update = _count_full_session(topology)
+    current_no_more_update = _count_full_session(topology_m2)
     while (current_no_more_update == no_more_update_cnt):
         cnt = cnt + 1
         if (cnt > MAX_LOOP):
             break
         time.sleep(5)
-        current_no_more_update = _count_full_session(topology)
+        current_no_more_update = _count_full_session(topology_m2)
 
-    log.info('after %d MODs we have completed %d replication sessions' % (NB_SESSION, (current_no_more_update - no_more_update_cnt)))
+    log.info('after %d MODs we have completed %d replication sessions' % (
+    NB_SESSION, (current_no_more_update - no_more_update_cnt)))
     no_more_update_cnt = current_no_more_update
 
-
     # At this point, with the fix a dummy update was made BUT may be not sent it
     # make sure it was sent so that the consumer CSN will be updated
-    last_csn = _get_last_not_replicated_csn(topology)
+    last_csn = _get_last_not_replicated_csn(topology_m2)
 
     # let's wait all replication session complete
     MAX_LOOP = 10
     cnt = 0
-    current_no_more_update = _count_full_session(topology)
+    current_no_more_update = _count_full_session(topology_m2)
     while (current_no_more_update == no_more_update_cnt):
         cnt = cnt + 1
         if (cnt > MAX_LOOP):
             break
         time.sleep(5)
-        current_no_more_update = _count_full_session(topology)
+        current_no_more_update = _count_full_session(topology_m2)
 
-    log.info('This MODs %s triggered the send of the dummy update completed %d replication sessions' % (last_csn, (current_no_more_update - no_more_update_cnt)))
+    log.info('This MODs %s triggered the send of the dummy update completed %d replication sessions' % (
+    last_csn, (current_no_more_update - no_more_update_cnt)))
     no_more_update_cnt = current_no_more_update
 
     # so we should no longer see the first_csn in the log
     # Let's create a new csn (last_csn) and check there is no longer first_csn
-    topology.master1.agreement.pause(ents[0].dn)
-    last_csn = _get_last_not_replicated_csn(topology)
-    topology.master1.agreement.resume(ents[0].dn)
+    topology_m2.ms["master1"].agreement.pause(ents[0].dn)
+    last_csn = _get_last_not_replicated_csn(topology_m2)
+    topology_m2.ms["master1"].agreement.resume(ents[0].dn)
 
     # let's wait for the session to complete
     MAX_LOOP = 10
@@ -381,14 +269,15 @@ def test_ticket48266_count_csn_evaluation(topology, entries):
         if (cnt > MAX_LOOP):
             break
         time.sleep(5)
-        current_no_more_update = _count_full_session(topology)
+        current_no_more_update = _count_full_session(topology_m2)
 
-    log.info('This MODs %s  completed in %d replication sessions, should be sent without evaluating %s' % (last_csn, (current_no_more_update - no_more_update_cnt), first_csn))
+    log.info('This MODs %s  completed in %d replication sessions, should be sent without evaluating %s' % (
+    last_csn, (current_no_more_update - no_more_update_cnt), first_csn))
     no_more_update_cnt = current_no_more_update
 
     # Now determine how many times we have skipped 'csn'
     # no need to stop the server to check the error log
-    file_obj = open(topology.master1.errlog, "r")
+    file_obj = open(topology_m2.ms["master1"].errlog, "r")
 
     # find where the last_csn operation was processed
     pattern = ".*ruv_add_csn_inprogress: successfully inserted csn %s.*" % last_csn
@@ -403,7 +292,7 @@ def test_ticket48266_count_csn_evaluation(topology, entries):
     if (found):
         log.info('last operation was found at %d' % file_obj.tell())
         log.info(line)
-    log.info('Now check the we can not find the first csn %s in the log'% first_csn)
+    log.info('Now check the we can not find the first csn %s in the log' % first_csn)
 
     pattern = ".*Skipping update operation.*CSN %s.*" % first_csn
     regex = re.compile(pattern)

+ 43 - 81
dirsrvtests/tests/tickets/ticket48270_test.py

@@ -1,102 +1,63 @@
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
 logging.getLogger(__name__).setLevel(logging.DEBUG)
 log = logging.getLogger(__name__)
 
-installation1_prefix = None
+NEW_ACCOUNT = "new_account"
+MAX_ACCOUNTS = 20
 
-NEW_ACCOUNT    = "new_account"
-MAX_ACCOUNTS   = 20
-
-MIXED_VALUE="/home/mYhOmEdIrEcToRy"
-LOWER_VALUE="/home/myhomedirectory"
+MIXED_VALUE = "/home/mYhOmEdIrEcToRy"
+LOWER_VALUE = "/home/myhomedirectory"
 HOMEDIRECTORY_INDEX = 'cn=homeDirectory,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
-HOMEDIRECTORY_CN="homedirectory"
+HOMEDIRECTORY_CN = "homedirectory"
 MATCHINGRULE = 'nsMatchingRule'
 UIDNUMBER_INDEX = 'cn=uidnumber,cn=index,cn=userRoot,cn=ldbm database,cn=plugins,cn=config'
-UIDNUMBER_CN="uidnumber"
-
-
-class TopologyStandalone(object):
-    def __init__(self, standalone):
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    # Creating standalone instance ...
-    standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    # Delete each instance in the end
-    def fin():
-        standalone.delete()
-    request.addfinalizer(fin)
-
-    return TopologyStandalone(standalone)
+UIDNUMBER_CN = "uidnumber"
 
 
-def test_ticket48270_init(topology):
+def test_ticket48270_init(topology_st):
     log.info("Initialization: add dummy entries for the tests")
     for cpt in range(MAX_ACCOUNTS):
         name = "%s%d" % (NEW_ACCOUNT, cpt)
-        topology.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), {
-                                            'objectclass': "top posixAccount".split(),
-                                            'uid': name,
-                                            'cn': name,
-                                            'uidnumber': str(111),
-                                            'gidnumber': str(222),
-                                            'homedirectory': "/home/tbordaz_%d" % cpt})))
+        topology_st.standalone.add_s(Entry(("uid=%s,%s" % (name, SUFFIX), {
+            'objectclass': "top posixAccount".split(),
+            'uid': name,
+            'cn': name,
+            'uidnumber': str(111),
+            'gidnumber': str(222),
+            'homedirectory': "/home/tbordaz_%d" % cpt})))
 
 
-def test_ticket48270_homeDirectory_indexed_cis(topology):
+def test_ticket48270_homeDirectory_indexed_cis(topology_st):
     log.info("\n\nindex homeDirectory in caseIgnoreIA5Match and caseExactIA5Match")
     try:
-        ent = topology.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
+        ent = topology_st.standalone.getEntry(HOMEDIRECTORY_INDEX, ldap.SCOPE_BASE)
     except ldap.NO_SUCH_OBJECT:
-        topology.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
-                                            'objectclass': "top nsIndex".split(),
-                                            'cn': HOMEDIRECTORY_CN,
-                                            'nsSystemIndex': 'false',
-                                            'nsIndexType': 'eq'})))
-    #log.info("attach debugger")
-    #time.sleep(60)
-
-    IGNORE_MR_NAME='caseIgnoreIA5Match'
-    EXACT_MR_NAME='caseExactIA5Match'
+        topology_st.standalone.add_s(Entry((HOMEDIRECTORY_INDEX, {
+            'objectclass': "top nsIndex".split(),
+            'cn': HOMEDIRECTORY_CN,
+            'nsSystemIndex': 'false',
+            'nsIndexType': 'eq'})))
+    # log.info("attach debugger")
+    # time.sleep(60)
+
+    IGNORE_MR_NAME = 'caseIgnoreIA5Match'
+    EXACT_MR_NAME = 'caseExactIA5Match'
     mod = [(ldap.MOD_REPLACE, MATCHINGRULE, (IGNORE_MR_NAME, EXACT_MR_NAME))]
-    topology.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
+    topology_st.standalone.modify_s(HOMEDIRECTORY_INDEX, mod)
 
-    #topology.standalone.stop(timeout=10)
+    # topology_st.standalone.stop(timeout=10)
     log.info("successfully checked that filter with exact mr , a filter with lowercase eq is failing")
-    #assert topology.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory'])
-    #topology.standalone.start(timeout=10)
+    # assert topology_st.standalone.db2index(bename=DEFAULT_BENAME, suffixes=None, attrs=['homeDirectory'])
+    # topology_st.standalone.start(timeout=10)
     args = {TASK_WAIT: True}
-    topology.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
+    topology_st.standalone.tasks.reindex(suffix=SUFFIX, attrname='homeDirectory', args=args)
 
     log.info("Check indexing succeeded with a specified matching rule")
-    file_obj = open(topology.standalone.errlog, "r")
+    file_obj = open(topology_st.standalone.errlog, "r")
 
     # Check if the MR configuration failure occurs
     regex = re.compile("unknown or invalid matching rule")
@@ -109,40 +70,41 @@ def test_ticket48270_homeDirectory_indexed_cis(topology):
     if (found):
         log.info("The configuration of a specific MR fails")
         log.info(line)
-        #assert not found
+        # assert not found
 
 
-def test_ticket48270_homeDirectory_mixed_value(topology):
+def test_ticket48270_homeDirectory_mixed_value(topology_st):
     # Set a homedirectory value with mixed case
     name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
     mod = [(ldap.MOD_REPLACE, 'homeDirectory', MIXED_VALUE)]
-    topology.standalone.modify_s(name, mod)
+    topology_st.standalone.modify_s(name, mod)
 
 
-def test_ticket48270_extensible_search(topology):
+def test_ticket48270_extensible_search(topology_st):
     name = "uid=%s1,%s" % (NEW_ACCOUNT, SUFFIX)
 
     # check with the exact stored value
     log.info("Default: can retrieve an entry filter syntax with exact stored value")
-    ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE)
+    ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % MIXED_VALUE)
     log.info("Default: can retrieve an entry filter caseExactIA5Match with exact stored value")
-    ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE)
+    ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % MIXED_VALUE)
 
     # check with a lower case value that is different from the stored value
     log.info("Default: can not retrieve an entry filter syntax match with lowered stored value")
     try:
-        ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE)
+        ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory=%s)" % LOWER_VALUE)
         assert ent is None
     except ldap.NO_SUCH_OBJECT:
         pass
     log.info("Default: can not retrieve an entry filter caseExactIA5Match with lowered stored value")
     try:
-        ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE)
+        ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE,
+                                              "(homeDirectory:caseExactIA5Match:=%s)" % LOWER_VALUE)
         assert ent is None
     except ldap.NO_SUCH_OBJECT:
         pass
     log.info("Default: can retrieve an entry filter caseIgnoreIA5Match with lowered stored value")
-    ent = topology.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE)
+    ent = topology_st.standalone.getEntry(name, ldap.SCOPE_BASE, "(homeDirectory:caseIgnoreIA5Match:=%s)" % LOWER_VALUE)
 
 
 if __name__ == '__main__':

+ 31 - 85
dirsrvtests/tests/tickets/ticket48272_test.py

@@ -1,24 +1,15 @@
-import os
-import sys
-import time
-import ldap
-import logging
 import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
 from lib389.tasks import *
 from lib389.utils import *
+from lib389.topologies import topology_st
 
-DEBUGGING = False
+DEBUGGING = os.getenv('DEBUGGING', False)
 
 if DEBUGGING:
     logging.getLogger(__name__).setLevel(logging.DEBUG)
 else:
     logging.getLogger(__name__).setLevel(logging.INFO)
 
-
 log = logging.getLogger(__name__)
 
 USER1 = 'user1'
@@ -28,58 +19,14 @@ USER1_DN = 'uid=user1,ou=People,%s' % DEFAULT_SUFFIX
 USER1_CONFLICT_DN = 'uid=user1,%s' % DEFAULT_SUFFIX
 
 
-class TopologyStandalone(object):
-    """The DS Topology Class"""
-    def __init__(self, standalone):
-        """Init"""
-        standalone.open()
-        self.standalone = standalone
-
-
[email protected](scope="module")
-def topology(request):
-    """Create DS Deployment"""
-
-    # Creating standalone instance ...
-    if DEBUGGING:
-        standalone = DirSrv(verbose=True)
-    else:
-        standalone = DirSrv(verbose=False)
-    args_instance[SER_HOST] = HOST_STANDALONE
-    args_instance[SER_PORT] = PORT_STANDALONE
-    args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
-    args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
-    args_standalone = args_instance.copy()
-    standalone.allocate(args_standalone)
-    instance_standalone = standalone.exists()
-    if instance_standalone:
-        standalone.delete()
-    standalone.create()
-    standalone.open()
-
-    def fin():
-        """If we are debugging just stop the instances, otherwise remove
-        them
-        """
-        if DEBUGGING:
-            standalone.stop(60)
-        else:
-            standalone.delete()
-
-    request.addfinalizer(fin)
-
-    # Clear out the tmp dir
-    standalone.clearTmpDir(__file__)
-
-    return TopologyStandalone(standalone)
-
 def _create_user(inst, name, dn):
     inst.add_s(Entry((
-                dn, {
-                    'objectClass': 'top account simplesecurityobject'.split(),
-                     'uid': name,
-                     'userpassword': PW
-                })))
+        dn, {
+            'objectClass': 'top account simplesecurityobject'.split(),
+            'uid': name,
+            'userpassword': PW
+        })))
+
 
 def _bind(name, cred):
     # Returns true or false if it worked.
@@ -90,22 +37,22 @@ def _bind(name, cred):
     try:
         conn.simple_bind_s(name, cred)
         conn.unbind_s()
-    except ldap.NO_SUCH_OBJECT:
+    except ldap.INVALID_CREDENTIALS:
         status = False
     return status
 
 
-def test_ticket48272(topology):
+def test_ticket48272(topology_st):
     """
     Test the functionality of the addn bind plugin. This should allow users
     of the type "name" or "[email protected]" to bind.
     """
 
     # There will be a better way to do this in the future.
-    topology.standalone.add_s(Entry((
+    topology_st.standalone.add_s(Entry((
         "cn=addn,cn=plugins,cn=config", {
-            "objectClass" : "top nsSlapdPlugin extensibleObject".split(),
-            "cn" : "addn",
+            "objectClass": "top nsSlapdPlugin extensibleObject".split(),
+            "cn": "addn",
             "nsslapd-pluginPath": "libaddn-plugin",
             "nsslapd-pluginInitfunc": "addn_init",
             "nsslapd-pluginType": "preoperation",
@@ -118,54 +65,54 @@ def test_ticket48272(topology):
         }
     )))
 
-    topology.standalone.add_s(Entry((
+    topology_st.standalone.add_s(Entry((
         "cn=example.com,cn=addn,cn=plugins,cn=config", {
-            "objectClass" : "top extensibleObject".split(),
-            "cn" : "example.com",
+            "objectClass": "top extensibleObject".split(),
+            "cn": "example.com",
             "addn_base": "ou=People,%s" % DEFAULT_SUFFIX,
             "addn_filter": "(&(objectClass=account)(uid=%s))",
         }
     )))
 
-    topology.standalone.restart(60)
+    topology_st.standalone.restart(60)
 
     # Add a user
-    _create_user(topology.standalone, USER1, USER1_DN)
+    _create_user(topology_st.standalone, USER1, USER1_DN)
 
     # Make sure our binds still work.
-    assert(_bind(USER1_DN, PW))
+    assert (_bind(USER1_DN, PW))
     # Test an anonymous bind
-    for i in range(0,10):
-
+    for i in range(0, 10):
         # Test bind as name
-        assert(_bind(USER1, PW))
+        assert (_bind(USER1, PW))
 
         # Make sure that name@fakedom fails
-        assert(_bind(USER1_DOMAIN, PW))
+        assert (_bind(USER1_DOMAIN, PW))
 
     # Add a conflicting user to an alternate subtree
-    _create_user(topology.standalone, USER1, USER1_CONFLICT_DN)
+    _create_user(topology_st.standalone, USER1, USER1_CONFLICT_DN)
     # Change the plugin to search from the rootdn instead
     # This means we have a conflicting user in scope now!
 
-    topology.standalone.modify_s("cn=example.com,cn=addn,cn=plugins,cn=config", [(ldap.MOD_REPLACE, 'addn_base', DEFAULT_SUFFIX)])
-    topology.standalone.restart(60)
+    topology_st.standalone.modify_s("cn=example.com,cn=addn,cn=plugins,cn=config",
+                                    [(ldap.MOD_REPLACE, 'addn_base', DEFAULT_SUFFIX)])
+    topology_st.standalone.restart(60)
 
     # Make sure our binds still work.
-    assert(_bind(USER1_DN, PW))
-    assert(_bind(USER1_CONFLICT_DN, PW))
-    for i in range(0,10):
+    assert (_bind(USER1_DN, PW))
+    assert (_bind(USER1_CONFLICT_DN, PW))
+    for i in range(0, 10):
 
         # Test bind as name fails
         try:
             _bind(USER1, PW)
-            assert(False)
+            assert (False)
         except:
             pass
         # Test bind as name@domain fails too
         try:
             _bind(USER1_DOMAIN, PW)
-            assert(False)
+            assert (False)
         except:
             pass
 
@@ -177,4 +124,3 @@ if __name__ == '__main__':
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
     pytest.main("-s %s" % CURRENT_FILE)
-

Some files were not shown because too many files changed in this diff