regression_test.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. # --- BEGIN COPYRIGHT BLOCK ---
  2. # Copyright (C) 2017 Red Hat, Inc.
  3. # All rights reserved.
  4. #
  5. # License: GPL (version 3 or any later version).
  6. # See LICENSE for details.
  7. # --- END COPYRIGHT BLOCK ---
  8. #
  9. import ldif
  10. import pytest
  11. import subprocess
  12. from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts
  13. from lib389.utils import *
  14. from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db
  15. from lib389._constants import *
  16. from . import get_repl_entries
  17. from lib389.idm.organizationalunit import OrganizationalUnits
  18. from lib389.agreement import Agreements
  19. from lib389.idm.user import UserAccount
  20. from lib389 import Entry
  21. from lib389.idm.group import Groups, Group
  22. from lib389.replica import Replicas, ReplicationManager
  23. from lib389.changelog import Changelog5
  24. from lib389 import pid_from_file
  25. pytestmark = pytest.mark.tier1
  26. NEW_SUFFIX_NAME = 'test_repl'
  27. NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME)
  28. NEW_BACKEND = 'repl_base'
  29. MAXAGE_ATTR = 'nsslapd-changelogmaxage'
  30. MAXAGE_STR = '30'
  31. TRIMINTERVAL_STR = '5'
  32. TRIMINTERVAL = 'nsslapd-changelogtrim-interval'
  33. DEBUGGING = os.getenv("DEBUGGING", default=False)
  34. if DEBUGGING:
  35. logging.getLogger(__name__).setLevel(logging.DEBUG)
  36. else:
  37. logging.getLogger(__name__).setLevel(logging.INFO)
  38. log = logging.getLogger(__name__)
  39. def find_start_location(file, no):
  40. log_pattern = re.compile("slapd_daemon - slapd started.")
  41. count = 0
  42. while True:
  43. line = file.readline()
  44. log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line))
  45. found = log_pattern.search(line)
  46. if (found):
  47. count = count + 1
  48. if (count == no):
  49. return file.tell()
  50. if (line == ''):
  51. break
  52. return -1
  53. def pattern_errorlog(file, log_pattern, start_location=0):
  54. count = 0
  55. log.debug("_pattern_errorlog: start from the beginning" )
  56. file.seek(start_location)
  57. # Use a while true iteration because 'for line in file: hit a
  58. # python bug that break file.tell()
  59. while True:
  60. line = file.readline()
  61. log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line))
  62. found = log_pattern.search(line)
  63. if (found):
  64. count = count + 1
  65. if (line == ''):
  66. break
  67. log.debug("_pattern_errorlog: complete (count=%d)" % count)
  68. return count
  69. def _move_ruv(ldif_file):
  70. """ Move RUV entry in an ldif file to the top"""
  71. with open(ldif_file) as f:
  72. parser = ldif.LDIFRecordList(f)
  73. parser.parse()
  74. ldif_list = parser.all_records
  75. for dn in ldif_list:
  76. if dn[0].startswith('nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff'):
  77. ruv_index = ldif_list.index(dn)
  78. ldif_list.insert(0, ldif_list.pop(ruv_index))
  79. break
  80. with open(ldif_file, 'w') as f:
  81. ldif_writer = ldif.LDIFWriter(f)
  82. for dn, entry in ldif_list:
  83. ldif_writer.unparse(dn, entry)
  84. @pytest.fixture(scope="module")
  85. def topo_with_sigkill(request):
  86. """Create Replication Deployment with two masters"""
  87. topology = create_topology({ReplicaRole.MASTER: 2})
  88. def _kill_ns_slapd(inst):
  89. pid = str(pid_from_file(inst.ds_paths.pid_file))
  90. cmd = ['kill', '-9', pid]
  91. subprocess.Popen(cmd, stdout=subprocess.PIPE)
  92. def fin():
  93. if DEBUGGING:
  94. # Kill the hanging process at the end of test to prevent failures in the following tests
  95. [_kill_ns_slapd(inst) for inst in topology]
  96. #[inst.stop() for inst in topology]
  97. else:
  98. # Kill the hanging process at the end of test to prevent failures in the following tests
  99. [_kill_ns_slapd(inst) for inst in topology]
  100. assert _remove_ssca_db(topology)
  101. [inst.delete() for inst in topology if inst.exists()]
  102. request.addfinalizer(fin)
  103. return topology
  104. @pytest.fixture()
  105. def create_entry(topo_m2, request):
  106. """Add test entry using UserAccounts"""
  107. log.info('Adding a test entry user')
  108. users = UserAccounts(topo_m2.ms["master1"], DEFAULT_SUFFIX)
  109. tuser = users.ensure_state(properties=TEST_USER_PROPERTIES)
  110. return tuser
  111. def test_double_delete(topo_m2, create_entry):
  112. """Check that double delete of the entry doesn't crash server
  113. :id: 3496c82d-636a-48c9-973c-2455b12164cc
  114. :setup: Two masters replication setup, a test entry
  115. :steps:
  116. 1. Delete the entry on the first master
  117. 2. Delete the entry on the second master
  118. 3. Check that server is alive
  119. :expectedresults:
  120. 1. Entry should be successfully deleted from first master
  121. 2. Entry should be successfully deleted from second aster
  122. 3. Server should me alive
  123. """
  124. m1 = topo_m2.ms["master1"]
  125. m2 = topo_m2.ms["master2"]
  126. repl = ReplicationManager(DEFAULT_SUFFIX)
  127. repl.disable_to_master(m1, [m2])
  128. repl.disable_to_master(m2, [m1])
  129. log.info('Deleting entry {} from master1'.format(create_entry.dn))
  130. topo_m2.ms["master1"].delete_s(create_entry.dn)
  131. log.info('Deleting entry {} from master2'.format(create_entry.dn))
  132. topo_m2.ms["master2"].delete_s(create_entry.dn)
  133. repl.enable_to_master(m2, [m1])
  134. repl.enable_to_master(m1, [m2])
  135. repl.test_replication(m1, m2)
  136. repl.test_replication(m2, m1)
  137. @pytest.mark.bz1506831
  138. def test_repl_modrdn(topo_m2):
  139. """Test that replicated MODRDN does not break replication
  140. :id: a3e17698-9eb4-41e0-b537-8724b9915fa6
  141. :setup: Two masters replication setup
  142. :steps:
  143. 1. Add 3 test OrganizationalUnits A, B and C
  144. 2. Add 1 test user under OU=A
  145. 3. Add same test user under OU=B
  146. 4. Stop Replication
  147. 5. Apply modrdn to M1 - move test user from OU A -> C
  148. 6. Apply modrdn on M2 - move test user from OU B -> C
  149. 7. Start Replication
  150. 8. Check that there should be only one test entry under ou=C on both masters
  151. 9. Check that the replication is working fine both ways M1 <-> M2
  152. :expectedresults:
  153. 1. This should pass
  154. 2. This should pass
  155. 3. This should pass
  156. 4. This should pass
  157. 5. This should pass
  158. 6. This should pass
  159. 7. This should pass
  160. 8. This should pass
  161. 9. This should pass
  162. """
  163. master1 = topo_m2.ms["master1"]
  164. master2 = topo_m2.ms["master2"]
  165. repl = ReplicationManager(DEFAULT_SUFFIX)
  166. log.info("Add test entries - Add 3 OUs and 2 same users under 2 different OUs")
  167. OUs = OrganizationalUnits(master1, DEFAULT_SUFFIX)
  168. OU_A = OUs.create(properties={
  169. 'ou': 'A',
  170. 'description': 'A',
  171. })
  172. OU_B = OUs.create(properties={
  173. 'ou': 'B',
  174. 'description': 'B',
  175. })
  176. OU_C = OUs.create(properties={
  177. 'ou': 'C',
  178. 'description': 'C',
  179. })
  180. users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_A.rdn))
  181. tuser_A = users.create(properties=TEST_USER_PROPERTIES)
  182. users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_B.rdn))
  183. tuser_B = users.create(properties=TEST_USER_PROPERTIES)
  184. repl.test_replication(master1, master2)
  185. repl.test_replication(master2, master1)
  186. log.info("Stop Replication")
  187. topo_m2.pause_all_replicas()
  188. log.info("Apply modrdn to M1 - move test user from OU A -> C")
  189. master1.rename_s(tuser_A.dn,'uid=testuser1',newsuperior=OU_C.dn,delold=1)
  190. log.info("Apply modrdn on M2 - move test user from OU B -> C")
  191. master2.rename_s(tuser_B.dn,'uid=testuser1',newsuperior=OU_C.dn,delold=1)
  192. log.info("Start Replication")
  193. topo_m2.resume_all_replicas()
  194. log.info("Wait for sometime for repl to resume")
  195. repl.test_replication(master1, master2)
  196. repl.test_replication(master2, master1)
  197. log.info("Check that there should be only one test entry under ou=C on both masters")
  198. users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
  199. assert len(users.list()) == 1
  200. users = UserAccounts(master2, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
  201. assert len(users.list()) == 1
  202. log.info("Check that the replication is working fine both ways, M1 <-> M2")
  203. repl.test_replication(master1, master2)
  204. repl.test_replication(master2, master1)
  205. def test_password_repl_error(topo_m2, create_entry):
  206. """Check that error about userpassword replication is properly logged
  207. :id: 714130ff-e4f0-4633-9def-c1f4b24abfef
  208. :setup: Four masters replication setup, a test entry
  209. :steps:
  210. 1. Change userpassword on the first master
  211. 2. Restart the servers to flush the logs
  212. 3. Check the error log for an replication error
  213. :expectedresults:
  214. 1. Password should be successfully changed
  215. 2. Server should be successfully restarted
  216. 3. There should be no replication errors in the error log
  217. """
  218. m1 = topo_m2.ms["master1"]
  219. m2 = topo_m2.ms["master2"]
  220. TEST_ENTRY_NEW_PASS = 'new_pass'
  221. log.info('Clean the error log')
  222. m2.deleteErrorLogs()
  223. log.info('Set replication loglevel')
  224. m2.config.loglevel((ErrorLog.REPLICA,))
  225. log.info('Modifying entry {} - change userpassword on master 1'.format(create_entry.dn))
  226. create_entry.set('userpassword', TEST_ENTRY_NEW_PASS)
  227. repl = ReplicationManager(DEFAULT_SUFFIX)
  228. repl.wait_for_replication(m1, m2)
  229. log.info('Restart the servers to flush the logs')
  230. for num in range(1, 3):
  231. topo_m2.ms["master{}".format(num)].restart()
  232. try:
  233. log.info('Check that password works on master 2')
  234. create_entry_m2 = UserAccount(m2, create_entry.dn)
  235. create_entry_m2.bind(TEST_ENTRY_NEW_PASS)
  236. log.info('Check the error log for the error with {}'.format(create_entry.dn))
  237. assert not m2.ds_error_log.match('.*can.t add a change for {}.*'.format(create_entry.dn))
  238. finally:
  239. log.info('Set the default loglevel')
  240. m2.config.loglevel((ErrorLog.DEFAULT,))
  241. def test_invalid_agmt(topo_m2):
  242. """Test adding that an invalid agreement is properly rejected and does not crash the server
  243. :id: 6c3b2a7e-edcd-4327-a003-6bd878ff722b
  244. :setup: Four masters replication setup
  245. :steps:
  246. 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value)
  247. 2. Verify the server is still running
  248. :expectedresults:
  249. 1. Invalid repl agreement should be rejected
  250. 2. Server should be still running
  251. """
  252. m1 = topo_m2.ms["master1"]
  253. m2 = topo_m2.ms["master2"]
  254. repl = ReplicationManager(DEFAULT_SUFFIX)
  255. replicas = Replicas(m1)
  256. replica = replicas.get(DEFAULT_SUFFIX)
  257. agmts = replica.get_agreements()
  258. # Add invalid agreement (nsds5ReplicaEnabled set to invalid value)
  259. with pytest.raises(ldap.UNWILLING_TO_PERFORM):
  260. agmts.create(properties={
  261. 'cn': 'whatever',
  262. 'nsDS5ReplicaRoot': DEFAULT_SUFFIX,
  263. 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config',
  264. 'nsDS5ReplicaBindMethod': 'simple' ,
  265. 'nsDS5ReplicaTransportInfo': 'LDAP',
  266. 'nsds5replicaTimeout': '5',
  267. 'description': "test agreement",
  268. 'nsDS5ReplicaHost': m2.host,
  269. 'nsDS5ReplicaPort': str(m2.port),
  270. 'nsDS5ReplicaCredentials': 'whatever',
  271. 'nsds5ReplicaEnabled': 'YEAH MATE, LETS REPLICATE'
  272. })
  273. # Verify the server is still running
  274. repl = ReplicationManager(DEFAULT_SUFFIX)
  275. repl.test_replication(m1, m2)
  276. repl.test_replication(m2, m1)
  277. def test_fetch_bindDnGroup(topo_m2):
  278. """Check the bindDNGroup is fetched on first replication session
  279. :id: 5f1b1f59-6744-4260-b091-c82d22130025
  280. :setup: 2 Master Instances
  281. :steps:
  282. 1. Create a replication bound user and group, but the user *not* member of the group
  283. 2. Check that replication is working
  284. 3. Some preparation is required because of lib389 magic that already define a replication via group
  285. - define the group as groupDN for replication and 60sec as fetch interval
  286. - pause RA in both direction
  287. - Define the user as bindDn of the RAs
  288. 4. restart servers.
  289. It sets the fetch time to 0, so next session will refetch the group
  290. 5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time)
  291. 6. trigger an update and check replication is working and
  292. there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica'
  293. :expectedresults:
  294. 1. Success
  295. 2. Success
  296. 3. Success
  297. 4. Success
  298. 5. Success
  299. 6. Success
  300. """
  301. # If you need any test suite initialization,
  302. # please, write additional fixture for that (including finalizer).
  303. # Topology for suites are predefined in lib389/topologies.py.
  304. # If you need host, port or any other data about instance,
  305. # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid)
  306. M1 = topo_m2.ms['master1']
  307. M2 = topo_m2.ms['master2']
  308. # Enable replication log level. Not really necessary
  309. M1.modify_s('cn=config',[(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
  310. M2.modify_s('cn=config',[(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
  311. # Create a group and a user
  312. PEOPLE = "ou=People,%s" % SUFFIX
  313. PASSWD = 'password'
  314. REPL_MGR_BOUND_DN='repl_mgr_bound_dn'
  315. uid = REPL_MGR_BOUND_DN.encode()
  316. users = UserAccounts(M1, PEOPLE, rdn=None)
  317. user_props = TEST_USER_PROPERTIES.copy()
  318. user_props.update({'uid': uid, 'cn': uid, 'sn': '_%s' % uid, 'userpassword': PASSWD.encode(), 'description': b'value creation'})
  319. create_user = users.create(properties=user_props)
  320. groups_M1 = Groups(M1, DEFAULT_SUFFIX)
  321. group_properties = {
  322. 'cn' : 'group1',
  323. 'description' : 'testgroup'}
  324. group_M1 = groups_M1.create(properties=group_properties)
  325. group_M2 = Group(M2, group_M1.dn)
  326. assert(not group_M1.is_member(create_user.dn))
  327. # Check that M1 and M2 are in sync
  328. repl = ReplicationManager(DEFAULT_SUFFIX)
  329. repl.wait_for_replication(M1, M2, timeout=20)
  330. # Define the group as the replication manager and fetch interval as 60sec
  331. replicas = Replicas(M1)
  332. replica = replicas.list()[0]
  333. replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
  334. (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)])
  335. replicas = Replicas(M2)
  336. replica = replicas.list()[0]
  337. replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
  338. (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)])
  339. # Then pause the replication agreement to prevent them trying to acquire
  340. # while the user is not member of the group
  341. topo_m2.pause_all_replicas()
  342. # Define the user as the bindDN of the RAs
  343. for inst in (M1, M2):
  344. agmts = Agreements(inst)
  345. agmt = agmts.list()[0]
  346. agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode())
  347. agmt.replace('nsds5ReplicaCredentials', PASSWD.encode())
  348. # Key step
  349. # The restart will fetch the group/members define in the replica
  350. #
  351. # The user NOT member of the group replication will not work until bindDNcheckInterval
  352. #
  353. # With the fix, the first fetch is not taken into account (fetch time=0)
  354. # so on the first session, the group will be fetched
  355. M1.restart()
  356. M2.restart()
  357. # Replication being broken here we need to directly do the same update.
  358. # Sorry not found another solution except total update
  359. group_M1.add_member(create_user.dn)
  360. group_M2.add_member(create_user.dn)
  361. topo_m2.resume_all_replicas()
  362. # trigger updates to be sure to have a replication session, giving some time
  363. M1.modify_s(create_user.dn,[(ldap.MOD_ADD, 'description', b'value_1_1')])
  364. M2.modify_s(create_user.dn,[(ldap.MOD_ADD, 'description', b'value_2_2')])
  365. time.sleep(10)
  366. # Check replication is working
  367. ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
  368. for ent in ents:
  369. assert (ent.hasAttr('description'))
  370. found = 0
  371. for val in ent.getValues('description'):
  372. if (val == b'value_1_1'):
  373. found = found + 1
  374. elif (val == b'value_2_2'):
  375. found = found + 1
  376. assert (found == 2)
  377. ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
  378. for ent in ents:
  379. assert (ent.hasAttr('description'))
  380. found = 0
  381. for val in ent.getValues('description'):
  382. if (val == b'value_1_1'):
  383. found = found + 1
  384. elif (val == b'value_2_2'):
  385. found = found + 1
  386. assert (found == 2)
  387. # Check in the logs that the member was detected in the group although
  388. # at startup it was not member of the group
  389. regex = re.compile("does not have permission to supply replication updates to the replica.")
  390. errorlog_M1 = open(M1.errlog, "r")
  391. errorlog_M2 = open(M1.errlog, "r")
  392. # Find the last restart position
  393. restart_location_M1 = find_start_location(errorlog_M1, 2)
  394. assert (restart_location_M1 != -1)
  395. restart_location_M2 = find_start_location(errorlog_M2, 2)
  396. assert (restart_location_M2 != -1)
  397. # Then check there is no failure to authenticate
  398. count = pattern_errorlog(errorlog_M1, regex, start_location=restart_location_M1)
  399. assert(count <= 1)
  400. count = pattern_errorlog(errorlog_M2, regex, start_location=restart_location_M2)
  401. assert(count <=1)
  402. if DEBUGGING:
  403. # Add debugging steps(if any)...
  404. pass
  405. def test_cleanallruv_repl(topo_m3):
  406. """Test that cleanallruv could not break replication if anchor csn in ruv originated in deleted replica
  407. :id: 46faba9a-897e-45b8-98dc-aec7fa8cec9a
  408. :setup: 3 Masters
  409. :steps:
  410. 1. Configure error log level to 8192 in all masters
  411. 2. Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2
  412. 3. Add test users to 3 masters
  413. 4. Launch ClearRuv but withForce
  414. 5. Check the users after CleanRUV, because of changelog trimming, it will effect the CLs
  415. :expectedresults:
  416. 1. Error logs should be configured successfully
  417. 2. Modify should be successful
  418. 3. Test users should be added successfully
  419. 4. ClearRuv should be launched successfully
  420. 5. Users should be present according to the changelog trimming effect
  421. """
  422. M1 = topo_m3.ms["master1"]
  423. M2 = topo_m3.ms["master2"]
  424. M3 = topo_m3.ms["master3"]
  425. log.info("Change the error log levels for all masters")
  426. for s in (M1, M2, M3):
  427. s.config.replace('nsslapd-errorlog-level', "8192")
  428. log.info("Get the replication agreements for all 3 masters")
  429. m1_m2 = M1.agreement.list(suffix=SUFFIX, consumer_host=M2.host, consumer_port=M2.port)
  430. m1_m3 = M1.agreement.list(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)
  431. m3_m1 = M3.agreement.list(suffix=SUFFIX, consumer_host=M1.host, consumer_port=M1.port)
  432. log.info("Get the changelog enteries for M1 and M2")
  433. changelog_m1 = Changelog5(M1)
  434. changelog_m2 = Changelog5(M2)
  435. log.info("Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2")
  436. changelog_m1.set_max_age(MAXAGE_STR)
  437. changelog_m1.set_trim_interval(TRIMINTERVAL_STR)
  438. log.info("Add test users to 3 masters")
  439. users_m1 = UserAccounts(M1, DEFAULT_SUFFIX)
  440. users_m2 = UserAccounts(M2, DEFAULT_SUFFIX)
  441. users_m3 = UserAccounts(M3, DEFAULT_SUFFIX)
  442. user_props = TEST_USER_PROPERTIES.copy()
  443. user_props.update({'uid': "testuser10"})
  444. user10 = users_m1.create(properties=user_props)
  445. user_props.update({'uid': "testuser20"})
  446. user20 = users_m2.create(properties=user_props)
  447. user_props.update({'uid': "testuser30"})
  448. user30 = users_m3.create(properties=user_props)
  449. # ::important:: the testuser31 is the oldest csn in M2,
  450. # because it will be cleared by changelog trimming
  451. user_props.update({'uid': "testuser31"})
  452. user31 = users_m3.create(properties=user_props)
  453. user_props.update({'uid': "testuser11"})
  454. user11 = users_m1.create(properties=user_props)
  455. user_props.update({'uid': "testuser21"})
  456. user21 = users_m2.create(properties=user_props)
  457. # this is to trigger changelog trim and interval values
  458. time.sleep(40)
  459. # Here M1, M2, M3 should have 11,21,31 and 10,20,30 are CL cleared
  460. M2.stop()
  461. M1.agreement.pause(m1_m2[0].dn)
  462. user_props.update({'uid': "testuser32"})
  463. user32 = users_m3.create(properties=user_props)
  464. user_props.update({'uid': "testuser33"})
  465. user33 = users_m3.create(properties=user_props)
  466. user_props.update({'uid': "testuser12"})
  467. user12 = users_m1.create(properties=user_props)
  468. M3.agreement.pause(m3_m1[0].dn)
  469. M3.agreement.resume(m3_m1[0].dn)
  470. time.sleep(40)
  471. # Here because of changelog trimming testusers 31 and 32 are CL cleared
  472. # ClearRuv is launched but with Force
  473. M3.stop()
  474. M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3',
  475. force=True,args={TASK_WAIT: False})
  476. # here M1 should clear 31
  477. M2.start()
  478. M1.agreement.pause(m1_m2[0].dn)
  479. M1.agreement.resume(m1_m2[0].dn)
  480. time.sleep(10)
  481. #Check the users after CleanRUV
  482. expected_m1_users = [user31.dn, user11.dn, user21.dn, user32.dn, user33.dn, user12.dn]
  483. expected_m2_users = [user31.dn, user11.dn, user21.dn, user12.dn]
  484. current_m1_users = [user.dn for user in users_m1.list()]
  485. current_m2_users = [user.dn for user in users_m2.list()]
  486. assert set(expected_m1_users).issubset(current_m1_users)
  487. assert set(expected_m2_users).issubset(current_m2_users)
  488. @pytest.mark.ds49915
  489. @pytest.mark.bz1626375
  490. def test_online_reinit_may_hang(topo_with_sigkill):
  491. """Online reinitialization may hang when the first
  492. entry of the DB is RUV entry instead of the suffix
  493. :id: cded6afa-66c0-4c65-9651-993ba3f7a49c
  494. :setup: 2 Master Instances
  495. :steps:
  496. 1. Export the database
  497. 2. Move RUV entry to the top in the ldif file
  498. 3. Import the ldif file
  499. 4. Online replica initializaton
  500. :expectedresults:
  501. 1. Ldif file should be created successfully
  502. 2. RUV entry should be on top in the ldif file
  503. 3. Import should be successful
  504. 4. Server should not hang and consume 100% CPU
  505. """
  506. M1 = topo_with_sigkill.ms["master1"]
  507. M2 = topo_with_sigkill.ms["master2"]
  508. M1.stop()
  509. ldif_file = '%s/master1.ldif' % M1.get_ldif_dir()
  510. M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
  511. excludeSuffixes=None, repl_data=True,
  512. outputfile=ldif_file, encrypt=False)
  513. _move_ruv(ldif_file)
  514. M1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
  515. M1.start()
  516. # After this server may hang
  517. agmt = Agreements(M1).list()[0]
  518. agmt.begin_reinit()
  519. (done, error) = agmt.wait_reinit()
  520. assert done is True
  521. assert error is False
  522. repl = ReplicationManager(DEFAULT_SUFFIX)
  523. repl.test_replication_topology(topo_with_sigkill)
  524. if DEBUGGING:
  525. # Add debugging steps(if any)...
  526. pass
  527. if __name__ == '__main__':
  528. # Run isolated
  529. # -s for DEBUG mode
  530. CURRENT_FILE = os.path.realpath(__file__)
  531. pytest.main("-s %s" % CURRENT_FILE)