regression_test.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. # --- BEGIN COPYRIGHT BLOCK ---
  2. # Copyright (C) 2017 Red Hat, Inc.
  3. # All rights reserved.
  4. #
  5. # License: GPL (version 3 or any later version).
  6. # See LICENSE for details.
  7. # --- END COPYRIGHT BLOCK ---
  8. #
  9. import ldif
  10. import pytest
  11. import subprocess
  12. from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts
  13. from lib389.utils import *
  14. from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db
  15. from lib389._constants import *
  16. from . import get_repl_entries
  17. from lib389.idm.organizationalunit import OrganizationalUnits
  18. from lib389.agreement import Agreements
  19. from lib389.idm.user import UserAccount
  20. from lib389 import Entry
  21. from lib389.idm.group import Groups, Group
  22. from lib389.replica import Replicas, ReplicationManager
  23. from lib389.changelog import Changelog5
  24. from lib389 import pid_from_file
  25. NEW_SUFFIX_NAME = 'test_repl'
  26. NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME)
  27. NEW_BACKEND = 'repl_base'
  28. MAXAGE_ATTR = 'nsslapd-changelogmaxage'
  29. MAXAGE_STR = '30'
  30. TRIMINTERVAL_STR = '5'
  31. TRIMINTERVAL = 'nsslapd-changelogtrim-interval'
  32. DEBUGGING = os.getenv("DEBUGGING", default=False)
  33. if DEBUGGING:
  34. logging.getLogger(__name__).setLevel(logging.DEBUG)
  35. else:
  36. logging.getLogger(__name__).setLevel(logging.INFO)
  37. log = logging.getLogger(__name__)
  38. def find_start_location(file, no):
  39. log_pattern = re.compile("slapd_daemon - slapd started.")
  40. count = 0
  41. while True:
  42. line = file.readline()
  43. log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line))
  44. found = log_pattern.search(line)
  45. if (found):
  46. count = count + 1
  47. if (count == no):
  48. return file.tell()
  49. if (line == ''):
  50. break
  51. return -1
  52. def pattern_errorlog(file, log_pattern, start_location=0):
  53. count = 0
  54. log.debug("_pattern_errorlog: start from the beginning" )
  55. file.seek(start_location)
  56. # Use a while true iteration because 'for line in file: hit a
  57. # python bug that break file.tell()
  58. while True:
  59. line = file.readline()
  60. log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line))
  61. found = log_pattern.search(line)
  62. if (found):
  63. count = count + 1
  64. if (line == ''):
  65. break
  66. log.debug("_pattern_errorlog: complete (count=%d)" % count)
  67. return count
  68. def _move_ruv(ldif_file):
  69. """ Move RUV entry in an ldif file to the top"""
  70. with open(ldif_file) as f:
  71. parser = ldif.LDIFRecordList(f)
  72. parser.parse()
  73. ldif_list = parser.all_records
  74. for dn in ldif_list:
  75. if dn[0].startswith('nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff'):
  76. ruv_index = ldif_list.index(dn)
  77. ldif_list.insert(0, ldif_list.pop(ruv_index))
  78. break
  79. with open(ldif_file, 'w') as f:
  80. ldif_writer = ldif.LDIFWriter(f)
  81. for dn, entry in ldif_list:
  82. ldif_writer.unparse(dn, entry)
  83. @pytest.fixture(scope="module")
  84. def topo_with_sigkill(request):
  85. """Create Replication Deployment with two masters"""
  86. topology = create_topology({ReplicaRole.MASTER: 2})
  87. def _kill_ns_slapd(inst):
  88. pid = str(pid_from_file(inst.ds_paths.pid_file))
  89. cmd = ['kill', '-9', pid]
  90. subprocess.Popen(cmd, stdout=subprocess.PIPE)
  91. def fin():
  92. if DEBUGGING:
  93. # Kill the hanging process at the end of test to prevent failures in the following tests
  94. [_kill_ns_slapd(inst) for inst in topology]
  95. #[inst.stop() for inst in topology]
  96. else:
  97. # Kill the hanging process at the end of test to prevent failures in the following tests
  98. [_kill_ns_slapd(inst) for inst in topology]
  99. assert _remove_ssca_db(topology)
  100. [inst.delete() for inst in topology if inst.exists()]
  101. request.addfinalizer(fin)
  102. return topology
  103. @pytest.fixture()
  104. def create_entry(topo_m2, request):
  105. """Add test entry using UserAccounts"""
  106. log.info('Adding a test entry user')
  107. users = UserAccounts(topo_m2.ms["master1"], DEFAULT_SUFFIX)
  108. tuser = users.ensure_state(properties=TEST_USER_PROPERTIES)
  109. return tuser
  110. def test_double_delete(topo_m2, create_entry):
  111. """Check that double delete of the entry doesn't crash server
  112. :id: 3496c82d-636a-48c9-973c-2455b12164cc
  113. :setup: Two masters replication setup, a test entry
  114. :steps:
  115. 1. Delete the entry on the first master
  116. 2. Delete the entry on the second master
  117. 3. Check that server is alive
  118. :expectedresults:
  119. 1. Entry should be successfully deleted from first master
  120. 2. Entry should be successfully deleted from second aster
  121. 3. Server should me alive
  122. """
  123. m1 = topo_m2.ms["master1"]
  124. m2 = topo_m2.ms["master2"]
  125. repl = ReplicationManager(DEFAULT_SUFFIX)
  126. repl.disable_to_master(m1, [m2])
  127. repl.disable_to_master(m2, [m1])
  128. log.info('Deleting entry {} from master1'.format(create_entry.dn))
  129. topo_m2.ms["master1"].delete_s(create_entry.dn)
  130. log.info('Deleting entry {} from master2'.format(create_entry.dn))
  131. topo_m2.ms["master2"].delete_s(create_entry.dn)
  132. repl.enable_to_master(m2, [m1])
  133. repl.enable_to_master(m1, [m2])
  134. repl.test_replication(m1, m2)
  135. repl.test_replication(m2, m1)
  136. @pytest.mark.bz1506831
  137. def test_repl_modrdn(topo_m2):
  138. """Test that replicated MODRDN does not break replication
  139. :id: a3e17698-9eb4-41e0-b537-8724b9915fa6
  140. :setup: Two masters replication setup
  141. :steps:
  142. 1. Add 3 test OrganizationalUnits A, B and C
  143. 2. Add 1 test user under OU=A
  144. 3. Add same test user under OU=B
  145. 4. Stop Replication
  146. 5. Apply modrdn to M1 - move test user from OU A -> C
  147. 6. Apply modrdn on M2 - move test user from OU B -> C
  148. 7. Start Replication
  149. 8. Check that there should be only one test entry under ou=C on both masters
  150. 9. Check that the replication is working fine both ways M1 <-> M2
  151. :expectedresults:
  152. 1. This should pass
  153. 2. This should pass
  154. 3. This should pass
  155. 4. This should pass
  156. 5. This should pass
  157. 6. This should pass
  158. 7. This should pass
  159. 8. This should pass
  160. 9. This should pass
  161. """
  162. master1 = topo_m2.ms["master1"]
  163. master2 = topo_m2.ms["master2"]
  164. repl = ReplicationManager(DEFAULT_SUFFIX)
  165. log.info("Add test entries - Add 3 OUs and 2 same users under 2 different OUs")
  166. OUs = OrganizationalUnits(master1, DEFAULT_SUFFIX)
  167. OU_A = OUs.create(properties={
  168. 'ou': 'A',
  169. 'description': 'A',
  170. })
  171. OU_B = OUs.create(properties={
  172. 'ou': 'B',
  173. 'description': 'B',
  174. })
  175. OU_C = OUs.create(properties={
  176. 'ou': 'C',
  177. 'description': 'C',
  178. })
  179. users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_A.rdn))
  180. tuser_A = users.create(properties=TEST_USER_PROPERTIES)
  181. users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_B.rdn))
  182. tuser_B = users.create(properties=TEST_USER_PROPERTIES)
  183. repl.test_replication(master1, master2)
  184. repl.test_replication(master2, master1)
  185. log.info("Stop Replication")
  186. topo_m2.pause_all_replicas()
  187. log.info("Apply modrdn to M1 - move test user from OU A -> C")
  188. master1.rename_s(tuser_A.dn,'uid=testuser1',newsuperior=OU_C.dn,delold=1)
  189. log.info("Apply modrdn on M2 - move test user from OU B -> C")
  190. master2.rename_s(tuser_B.dn,'uid=testuser1',newsuperior=OU_C.dn,delold=1)
  191. log.info("Start Replication")
  192. topo_m2.resume_all_replicas()
  193. log.info("Wait for sometime for repl to resume")
  194. repl.test_replication(master1, master2)
  195. repl.test_replication(master2, master1)
  196. log.info("Check that there should be only one test entry under ou=C on both masters")
  197. users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
  198. assert len(users.list()) == 1
  199. users = UserAccounts(master2, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
  200. assert len(users.list()) == 1
  201. log.info("Check that the replication is working fine both ways, M1 <-> M2")
  202. repl.test_replication(master1, master2)
  203. repl.test_replication(master2, master1)
  204. def test_password_repl_error(topo_m2, create_entry):
  205. """Check that error about userpassword replication is properly logged
  206. :id: 714130ff-e4f0-4633-9def-c1f4b24abfef
  207. :setup: Four masters replication setup, a test entry
  208. :steps:
  209. 1. Change userpassword on the first master
  210. 2. Restart the servers to flush the logs
  211. 3. Check the error log for an replication error
  212. :expectedresults:
  213. 1. Password should be successfully changed
  214. 2. Server should be successfully restarted
  215. 3. There should be no replication errors in the error log
  216. """
  217. m1 = topo_m2.ms["master1"]
  218. m2 = topo_m2.ms["master2"]
  219. TEST_ENTRY_NEW_PASS = 'new_pass'
  220. log.info('Clean the error log')
  221. m2.deleteErrorLogs()
  222. log.info('Set replication loglevel')
  223. m2.config.loglevel((ErrorLog.REPLICA,))
  224. log.info('Modifying entry {} - change userpassword on master 1'.format(create_entry.dn))
  225. create_entry.set('userpassword', TEST_ENTRY_NEW_PASS)
  226. repl = ReplicationManager(DEFAULT_SUFFIX)
  227. repl.wait_for_replication(m1, m2)
  228. log.info('Restart the servers to flush the logs')
  229. for num in range(1, 3):
  230. topo_m2.ms["master{}".format(num)].restart()
  231. try:
  232. log.info('Check that password works on master 2')
  233. create_entry_m2 = UserAccount(m2, create_entry.dn)
  234. create_entry_m2.bind(TEST_ENTRY_NEW_PASS)
  235. log.info('Check the error log for the error with {}'.format(create_entry.dn))
  236. assert not m2.ds_error_log.match('.*can.t add a change for {}.*'.format(create_entry.dn))
  237. finally:
  238. log.info('Set the default loglevel')
  239. m2.config.loglevel((ErrorLog.DEFAULT,))
  240. def test_invalid_agmt(topo_m2):
  241. """Test adding that an invalid agreement is properly rejected and does not crash the server
  242. :id: 6c3b2a7e-edcd-4327-a003-6bd878ff722b
  243. :setup: Four masters replication setup
  244. :steps:
  245. 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value)
  246. 2. Verify the server is still running
  247. :expectedresults:
  248. 1. Invalid repl agreement should be rejected
  249. 2. Server should be still running
  250. """
  251. m1 = topo_m2.ms["master1"]
  252. m2 = topo_m2.ms["master2"]
  253. repl = ReplicationManager(DEFAULT_SUFFIX)
  254. replicas = Replicas(m1)
  255. replica = replicas.get(DEFAULT_SUFFIX)
  256. agmts = replica.get_agreements()
  257. # Add invalid agreement (nsds5ReplicaEnabled set to invalid value)
  258. with pytest.raises(ldap.UNWILLING_TO_PERFORM):
  259. agmts.create(properties={
  260. 'cn': 'whatever',
  261. 'nsDS5ReplicaRoot': DEFAULT_SUFFIX,
  262. 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config',
  263. 'nsDS5ReplicaBindMethod': 'simple' ,
  264. 'nsDS5ReplicaTransportInfo': 'LDAP',
  265. 'nsds5replicaTimeout': '5',
  266. 'description': "test agreement",
  267. 'nsDS5ReplicaHost': m2.host,
  268. 'nsDS5ReplicaPort': str(m2.port),
  269. 'nsDS5ReplicaCredentials': 'whatever',
  270. 'nsds5ReplicaEnabled': 'YEAH MATE, LETS REPLICATE'
  271. })
  272. # Verify the server is still running
  273. repl = ReplicationManager(DEFAULT_SUFFIX)
  274. repl.test_replication(m1, m2)
  275. repl.test_replication(m2, m1)
  276. def test_fetch_bindDnGroup(topo_m2):
  277. """Check the bindDNGroup is fetched on first replication session
  278. :id: 5f1b1f59-6744-4260-b091-c82d22130025
  279. :setup: 2 Master Instances
  280. :steps:
  281. 1. Create a replication bound user and group, but the user *not* member of the group
  282. 2. Check that replication is working
  283. 3. Some preparation is required because of lib389 magic that already define a replication via group
  284. - define the group as groupDN for replication and 60sec as fetch interval
  285. - pause RA in both direction
  286. - Define the user as bindDn of the RAs
  287. 4. restart servers.
  288. It sets the fetch time to 0, so next session will refetch the group
  289. 5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time)
  290. 6. trigger an update and check replication is working and
  291. there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica'
  292. :expectedresults:
  293. 1. Success
  294. 2. Success
  295. 3. Success
  296. 4. Success
  297. 5. Success
  298. 6. Success
  299. """
  300. # If you need any test suite initialization,
  301. # please, write additional fixture for that (including finalizer).
  302. # Topology for suites are predefined in lib389/topologies.py.
  303. # If you need host, port or any other data about instance,
  304. # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid)
  305. M1 = topo_m2.ms['master1']
  306. M2 = topo_m2.ms['master2']
  307. # Enable replication log level. Not really necessary
  308. M1.modify_s('cn=config',[(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
  309. M2.modify_s('cn=config',[(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
  310. # Create a group and a user
  311. PEOPLE = "ou=People,%s" % SUFFIX
  312. PASSWD = 'password'
  313. REPL_MGR_BOUND_DN='repl_mgr_bound_dn'
  314. uid = REPL_MGR_BOUND_DN.encode()
  315. users = UserAccounts(M1, PEOPLE, rdn=None)
  316. user_props = TEST_USER_PROPERTIES.copy()
  317. user_props.update({'uid': uid, 'cn': uid, 'sn': '_%s' % uid, 'userpassword': PASSWD.encode(), 'description': b'value creation'})
  318. create_user = users.create(properties=user_props)
  319. groups_M1 = Groups(M1, DEFAULT_SUFFIX)
  320. group_properties = {
  321. 'cn' : 'group1',
  322. 'description' : 'testgroup'}
  323. group_M1 = groups_M1.create(properties=group_properties)
  324. group_M2 = Group(M2, group_M1.dn)
  325. assert(not group_M1.is_member(create_user.dn))
  326. # Check that M1 and M2 are in sync
  327. repl = ReplicationManager(DEFAULT_SUFFIX)
  328. repl.wait_for_replication(M1, M2, timeout=20)
  329. # Define the group as the replication manager and fetch interval as 60sec
  330. replicas = Replicas(M1)
  331. replica = replicas.list()[0]
  332. replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
  333. (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)])
  334. replicas = Replicas(M2)
  335. replica = replicas.list()[0]
  336. replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
  337. (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)])
  338. # Then pause the replication agreement to prevent them trying to acquire
  339. # while the user is not member of the group
  340. topo_m2.pause_all_replicas()
  341. # Define the user as the bindDN of the RAs
  342. for inst in (M1, M2):
  343. agmts = Agreements(inst)
  344. agmt = agmts.list()[0]
  345. agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode())
  346. agmt.replace('nsds5ReplicaCredentials', PASSWD.encode())
  347. # Key step
  348. # The restart will fetch the group/members define in the replica
  349. #
  350. # The user NOT member of the group replication will not work until bindDNcheckInterval
  351. #
  352. # With the fix, the first fetch is not taken into account (fetch time=0)
  353. # so on the first session, the group will be fetched
  354. M1.restart()
  355. M2.restart()
  356. # Replication being broken here we need to directly do the same update.
  357. # Sorry not found another solution except total update
  358. group_M1.add_member(create_user.dn)
  359. group_M2.add_member(create_user.dn)
  360. topo_m2.resume_all_replicas()
  361. # trigger updates to be sure to have a replication session, giving some time
  362. M1.modify_s(create_user.dn,[(ldap.MOD_ADD, 'description', b'value_1_1')])
  363. M2.modify_s(create_user.dn,[(ldap.MOD_ADD, 'description', b'value_2_2')])
  364. time.sleep(10)
  365. # Check replication is working
  366. ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
  367. for ent in ents:
  368. assert (ent.hasAttr('description'))
  369. found = 0
  370. for val in ent.getValues('description'):
  371. if (val == b'value_1_1'):
  372. found = found + 1
  373. elif (val == b'value_2_2'):
  374. found = found + 1
  375. assert (found == 2)
  376. ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
  377. for ent in ents:
  378. assert (ent.hasAttr('description'))
  379. found = 0
  380. for val in ent.getValues('description'):
  381. if (val == b'value_1_1'):
  382. found = found + 1
  383. elif (val == b'value_2_2'):
  384. found = found + 1
  385. assert (found == 2)
  386. # Check in the logs that the member was detected in the group although
  387. # at startup it was not member of the group
  388. regex = re.compile("does not have permission to supply replication updates to the replica.")
  389. errorlog_M1 = open(M1.errlog, "r")
  390. errorlog_M2 = open(M1.errlog, "r")
  391. # Find the last restart position
  392. restart_location_M1 = find_start_location(errorlog_M1, 2)
  393. assert (restart_location_M1 != -1)
  394. restart_location_M2 = find_start_location(errorlog_M2, 2)
  395. assert (restart_location_M2 != -1)
  396. # Then check there is no failure to authenticate
  397. count = pattern_errorlog(errorlog_M1, regex, start_location=restart_location_M1)
  398. assert(count <= 1)
  399. count = pattern_errorlog(errorlog_M2, regex, start_location=restart_location_M2)
  400. assert(count <=1)
  401. if DEBUGGING:
  402. # Add debugging steps(if any)...
  403. pass
  404. def test_cleanallruv_repl(topo_m3):
  405. """Test that cleanallruv could not break replication if anchor csn in ruv originated in deleted replica
  406. :id: 46faba9a-897e-45b8-98dc-aec7fa8cec9a
  407. :setup: 3 Masters
  408. :steps:
  409. 1. Configure error log level to 8192 in all masters
  410. 2. Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2
  411. 3. Add test users to 3 masters
  412. 4. Launch ClearRuv but withForce
  413. 5. Check the users after CleanRUV, because of changelog trimming, it will effect the CLs
  414. :expectedresults:
  415. 1. Error logs should be configured successfully
  416. 2. Modify should be successful
  417. 3. Test users should be added successfully
  418. 4. ClearRuv should be launched successfully
  419. 5. Users should be present according to the changelog trimming effect
  420. """
  421. M1 = topo_m3.ms["master1"]
  422. M2 = topo_m3.ms["master2"]
  423. M3 = topo_m3.ms["master3"]
  424. log.info("Change the error log levels for all masters")
  425. for s in (M1, M2, M3):
  426. s.config.replace('nsslapd-errorlog-level', "8192")
  427. log.info("Get the replication agreements for all 3 masters")
  428. m1_m2 = M1.agreement.list(suffix=SUFFIX, consumer_host=M2.host, consumer_port=M2.port)
  429. m1_m3 = M1.agreement.list(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)
  430. m3_m1 = M3.agreement.list(suffix=SUFFIX, consumer_host=M1.host, consumer_port=M1.port)
  431. log.info("Get the changelog enteries for M1 and M2")
  432. changelog_m1 = Changelog5(M1)
  433. changelog_m2 = Changelog5(M2)
  434. log.info("Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2")
  435. changelog_m1.set_max_age(MAXAGE_STR)
  436. changelog_m1.set_trim_interval(TRIMINTERVAL_STR)
  437. log.info("Add test users to 3 masters")
  438. users_m1 = UserAccounts(M1, DEFAULT_SUFFIX)
  439. users_m2 = UserAccounts(M2, DEFAULT_SUFFIX)
  440. users_m3 = UserAccounts(M3, DEFAULT_SUFFIX)
  441. user_props = TEST_USER_PROPERTIES.copy()
  442. user_props.update({'uid': "testuser10"})
  443. user10 = users_m1.create(properties=user_props)
  444. user_props.update({'uid': "testuser20"})
  445. user20 = users_m2.create(properties=user_props)
  446. user_props.update({'uid': "testuser30"})
  447. user30 = users_m3.create(properties=user_props)
  448. # ::important:: the testuser31 is the oldest csn in M2,
  449. # because it will be cleared by changelog trimming
  450. user_props.update({'uid': "testuser31"})
  451. user31 = users_m3.create(properties=user_props)
  452. user_props.update({'uid': "testuser11"})
  453. user11 = users_m1.create(properties=user_props)
  454. user_props.update({'uid': "testuser21"})
  455. user21 = users_m2.create(properties=user_props)
  456. # this is to trigger changelog trim and interval values
  457. time.sleep(40)
  458. # Here M1, M2, M3 should have 11,21,31 and 10,20,30 are CL cleared
  459. M2.stop()
  460. M1.agreement.pause(m1_m2[0].dn)
  461. user_props.update({'uid': "testuser32"})
  462. user32 = users_m3.create(properties=user_props)
  463. user_props.update({'uid': "testuser33"})
  464. user33 = users_m3.create(properties=user_props)
  465. user_props.update({'uid': "testuser12"})
  466. user12 = users_m1.create(properties=user_props)
  467. M3.agreement.pause(m3_m1[0].dn)
  468. M3.agreement.resume(m3_m1[0].dn)
  469. time.sleep(40)
  470. # Here because of changelog trimming testusers 31 and 32 are CL cleared
  471. # ClearRuv is launched but with Force
  472. M3.stop()
  473. M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3',
  474. force=True,args={TASK_WAIT: False})
  475. # here M1 should clear 31
  476. M2.start()
  477. M1.agreement.pause(m1_m2[0].dn)
  478. M1.agreement.resume(m1_m2[0].dn)
  479. time.sleep(10)
  480. #Check the users after CleanRUV
  481. expected_m1_users = [user31.dn, user11.dn, user21.dn, user32.dn, user33.dn, user12.dn]
  482. expected_m2_users = [user31.dn, user11.dn, user21.dn, user12.dn]
  483. current_m1_users = [user.dn for user in users_m1.list()]
  484. current_m2_users = [user.dn for user in users_m2.list()]
  485. assert set(expected_m1_users).issubset(current_m1_users)
  486. assert set(expected_m2_users).issubset(current_m2_users)
  487. @pytest.mark.ds49915
  488. @pytest.mark.bz1626375
  489. def test_online_reinit_may_hang(topo_with_sigkill):
  490. """Online reinitialization may hang when the first
  491. entry of the DB is RUV entry instead of the suffix
  492. :id: cded6afa-66c0-4c65-9651-993ba3f7a49c
  493. :setup: 2 Master Instances
  494. :steps:
  495. 1. Export the database
  496. 2. Move RUV entry to the top in the ldif file
  497. 3. Import the ldif file
  498. 4. Online replica initializaton
  499. :expectedresults:
  500. 1. Ldif file should be created successfully
  501. 2. RUV entry should be on top in the ldif file
  502. 3. Import should be successful
  503. 4. Server should not hang and consume 100% CPU
  504. """
  505. M1 = topo_with_sigkill.ms["master1"]
  506. M2 = topo_with_sigkill.ms["master2"]
  507. M1.stop()
  508. ldif_file = '/tmp/master1.ldif'
  509. M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
  510. excludeSuffixes=None, repl_data=True,
  511. outputfile=ldif_file, encrypt=False)
  512. _move_ruv(ldif_file)
  513. M1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
  514. M1.start()
  515. # After this server may hang
  516. agmt = Agreements(M1).list()[0]
  517. agmt.begin_reinit()
  518. (done, error) = agmt.wait_reinit()
  519. assert done is True
  520. assert error is False
  521. repl = ReplicationManager(DEFAULT_SUFFIX)
  522. repl.test_replication_topology(topo_with_sigkill)
  523. if DEBUGGING:
  524. # Add debugging steps(if any)...
  525. pass
  526. if __name__ == '__main__':
  527. # Run isolated
  528. # -s for DEBUG mode
  529. CURRENT_FILE = os.path.realpath(__file__)
  530. pytest.main("-s %s" % CURRENT_FILE)