regression_test.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. # --- BEGIN COPYRIGHT BLOCK ---
  2. # Copyright (C) 2017 Red Hat, Inc.
  3. # All rights reserved.
  4. #
  5. # License: GPL (version 3 or any later version).
  6. # See LICENSE for details.
  7. # --- END COPYRIGHT BLOCK ---
  8. #
  9. import ldif
  10. import pytest
  11. import subprocess
  12. from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts
  13. from lib389.pwpolicy import PwPolicyManager
  14. from lib389.utils import *
  15. from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2
  16. from lib389._constants import *
  17. from lib389.idm.organizationalunit import OrganizationalUnits
  18. from lib389.idm.user import UserAccount
  19. from lib389.idm.group import Groups, Group
  20. from lib389.idm.domain import Domain
  21. from lib389.idm.directorymanager import DirectoryManager
  22. from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager
  23. from lib389.agreement import Agreements
  24. from lib389 import pid_from_file
  25. pytestmark = pytest.mark.tier1
  26. NEW_SUFFIX_NAME = 'test_repl'
  27. NEW_SUFFIX = 'o={}'.format(NEW_SUFFIX_NAME)
  28. NEW_BACKEND = 'repl_base'
  29. CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM)
  30. MAXAGE_ATTR = 'nsslapd-changelogmaxage'
  31. MAXAGE_STR = '30'
  32. TRIMINTERVAL_STR = '5'
  33. TRIMINTERVAL = 'nsslapd-changelogtrim-interval'
  34. DEBUGGING = os.getenv("DEBUGGING", default=False)
  35. if DEBUGGING:
  36. logging.getLogger(__name__).setLevel(logging.DEBUG)
  37. else:
  38. logging.getLogger(__name__).setLevel(logging.INFO)
  39. log = logging.getLogger(__name__)
  40. @pytest.fixture(scope="module")
  41. def set_value(master, attr, val):
  42. """
  43. Helper function to add/replace attr: val and check the added value
  44. """
  45. try:
  46. master.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, attr, ensure_bytes(val))])
  47. except ldap.LDAPError as e:
  48. log.error('Failed to add ' + attr + ': ' + val + ' to ' + plugin + ': error {}'.format(get_ldap_error_msg(e,'desc')))
  49. assert False
  50. def find_start_location(file, no):
  51. log_pattern = re.compile("slapd_daemon - slapd started.")
  52. count = 0
  53. while True:
  54. line = file.readline()
  55. log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line))
  56. found = log_pattern.search(line)
  57. if (found):
  58. count = count + 1
  59. if (count == no):
  60. return file.tell()
  61. if (line == ''):
  62. break
  63. return -1
  64. def pattern_errorlog(file, log_pattern, start_location=0):
  65. count = 0
  66. log.debug("_pattern_errorlog: start from the beginning")
  67. file.seek(start_location)
  68. # Use a while true iteration because 'for line in file: hit a
  69. # python bug that break file.tell()
  70. while True:
  71. line = file.readline()
  72. log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line))
  73. found = log_pattern.search(line)
  74. if (found):
  75. count = count + 1
  76. if (line == ''):
  77. break
  78. log.debug("_pattern_errorlog: complete (count=%d)" % count)
  79. return count
  80. def _move_ruv(ldif_file):
  81. """ Move RUV entry in an ldif file to the top"""
  82. with open(ldif_file) as f:
  83. parser = ldif.LDIFRecordList(f)
  84. parser.parse()
  85. ldif_list = parser.all_records
  86. for dn in ldif_list:
  87. if dn[0].startswith('nsuniqueid=ffffffff-ffffffff-ffffffff-ffffffff'):
  88. ruv_index = ldif_list.index(dn)
  89. ldif_list.insert(0, ldif_list.pop(ruv_index))
  90. break
  91. with open(ldif_file, 'w') as f:
  92. ldif_writer = ldif.LDIFWriter(f)
  93. for dn, entry in ldif_list:
  94. ldif_writer.unparse(dn, entry)
  95. @pytest.fixture(scope="module")
  96. def topo_with_sigkill(request):
  97. """Create Replication Deployment with two masters"""
  98. topology = create_topology({ReplicaRole.MASTER: 2})
  99. def _kill_ns_slapd(inst):
  100. pid = str(pid_from_file(inst.ds_paths.pid_file))
  101. cmd = ['kill', '-9', pid]
  102. subprocess.Popen(cmd, stdout=subprocess.PIPE)
  103. def fin():
  104. # Kill the hanging process at the end of test to prevent failures in the following tests
  105. if DEBUGGING:
  106. [_kill_ns_slapd(inst) for inst in topology]
  107. else:
  108. [_kill_ns_slapd(inst) for inst in topology]
  109. assert _remove_ssca_db(topology)
  110. [inst.delete() for inst in topology if inst.exists()]
  111. request.addfinalizer(fin)
  112. return topology
  113. @pytest.fixture()
  114. def create_entry(topo_m2, request):
  115. """Add test entry using UserAccounts"""
  116. log.info('Adding a test entry user')
  117. users = UserAccounts(topo_m2.ms["master1"], DEFAULT_SUFFIX)
  118. tuser = users.ensure_state(properties=TEST_USER_PROPERTIES)
  119. return tuser
  120. def add_ou_entry(server, idx, parent):
  121. ous = OrganizationalUnits(server, parent)
  122. name = 'OU%d' % idx
  123. ous.create(properties={'ou': '%s' % name})
  124. def add_user_entry(server, idx, parent):
  125. users = UserAccounts(server, DEFAULT_SUFFIX, rdn=parent)
  126. user_properties = {
  127. 'uid': 'tuser%d' % idx,
  128. 'givenname': 'test',
  129. 'cn': 'Test User%d' % idx,
  130. 'sn': 'user%d' % idx,
  131. 'userpassword': PW_DM,
  132. 'uidNumber' : '1000%d' % idx,
  133. 'gidNumber': '2000%d' % idx,
  134. 'homeDirectory': '/home/{}'.format('tuser%d' % idx)
  135. }
  136. users.create(properties=user_properties)
  137. def del_user_entry(server, idx, parent):
  138. users = UserAccounts(server, DEFAULT_SUFFIX, rdn=parent)
  139. test_user = users.get('tuser%d' % idx)
  140. test_user.delete()
  141. def rename_entry(server, idx, ou_name, new_parent):
  142. users = UserAccounts(server, DEFAULT_SUFFIX, rdn=ou_name)
  143. name = 'tuser%d' % idx
  144. rdn = 'uid=%s' % name
  145. test_user = users.get(name)
  146. test_user.rename(new_rdn=rdn, newsuperior=new_parent)
  147. def add_ldapsubentry(server, parent):
  148. pwp = PwPolicyManager(server)
  149. policy_props = {'passwordStorageScheme': 'ssha',
  150. 'passwordCheckSyntax': 'on',
  151. 'passwordInHistory': '6',
  152. 'passwordChange': 'on',
  153. 'passwordMinAge': '0',
  154. 'passwordExp': 'off',
  155. 'passwordMustChange': 'off',}
  156. log.info('Create password policy for subtree {}'.format(parent))
  157. pwp.create_subtree_policy(parent, policy_props)
  158. def test_special_symbol_replica_agreement(topo_i2):
  159. """ Check if agreement starts with "cn=->..." then
  160. after upgrade does it get removed.
  161. :id: 68aa0072-4dd4-4e33-b107-cb383a439125
  162. :setup: two standalone instance
  163. :steps:
  164. 1. Create and Enable Replication on standalone2 and role as consumer
  165. 2. Create and Enable Replication on standalone1 and role as master
  166. 3. Create a Replication agreement starts with "cn=->..."
  167. 4. Perform an upgrade operation over the master
  168. 5. Check if the agreement is still present or not.
  169. :expectedresults:
  170. 1. It should be successful
  171. 2. It should be successful
  172. 3. It should be successful
  173. 4. It should be successful
  174. 5. It should be successful
  175. """
  176. master = topo_i2.ins["standalone1"]
  177. consumer = topo_i2.ins["standalone2"]
  178. consumer.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=ReplicaRole.CONSUMER, replicaId=CONSUMER_REPLICAID)
  179. repl = ReplicationManager(DEFAULT_SUFFIX)
  180. repl.create_first_master(master)
  181. properties = {RA_NAME: '-\\3meTo_{}:{}'.format(consumer.host,
  182. str(consumer.port)),
  183. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  184. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  185. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  186. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  187. master.agreement.create(suffix=SUFFIX,
  188. host=consumer.host,
  189. port=consumer.port,
  190. properties=properties)
  191. master.agreement.init(SUFFIX, consumer.host, consumer.port)
  192. replica_server = Replicas(master).get(DEFAULT_SUFFIX)
  193. master.upgrade('online')
  194. agmt = replica_server.get_agreements().list()[0]
  195. assert agmt.get_attr_val_utf8('cn') == '-\\3meTo_{}:{}'.format(consumer.host,
  196. str(consumer.port))
  197. def test_double_delete(topo_m2, create_entry):
  198. """Check that double delete of the entry doesn't crash server
  199. :id: 3496c82d-636a-48c9-973c-2455b12164cc
  200. :setup: Two masters replication setup, a test entry
  201. :steps:
  202. 1. Delete the entry on the first master
  203. 2. Delete the entry on the second master
  204. 3. Check that server is alive
  205. :expectedresults:
  206. 1. Entry should be successfully deleted from first master
  207. 2. Entry should be successfully deleted from second aster
  208. 3. Server should me alive
  209. """
  210. m1 = topo_m2.ms["master1"]
  211. m2 = topo_m2.ms["master2"]
  212. repl = ReplicationManager(DEFAULT_SUFFIX)
  213. repl.disable_to_master(m1, [m2])
  214. repl.disable_to_master(m2, [m1])
  215. log.info('Deleting entry {} from master1'.format(create_entry.dn))
  216. topo_m2.ms["master1"].delete_s(create_entry.dn)
  217. log.info('Deleting entry {} from master2'.format(create_entry.dn))
  218. topo_m2.ms["master2"].delete_s(create_entry.dn)
  219. repl.enable_to_master(m2, [m1])
  220. repl.enable_to_master(m1, [m2])
  221. repl.test_replication(m1, m2)
  222. repl.test_replication(m2, m1)
  223. @pytest.mark.bz1506831
  224. def test_repl_modrdn(topo_m2):
  225. """Test that replicated MODRDN does not break replication
  226. :id: a3e17698-9eb4-41e0-b537-8724b9915fa6
  227. :setup: Two masters replication setup
  228. :steps:
  229. 1. Add 3 test OrganizationalUnits A, B and C
  230. 2. Add 1 test user under OU=A
  231. 3. Add same test user under OU=B
  232. 4. Stop Replication
  233. 5. Apply modrdn to M1 - move test user from OU A -> C
  234. 6. Apply modrdn on M2 - move test user from OU B -> C
  235. 7. Start Replication
  236. 8. Check that there should be only one test entry under ou=C on both masters
  237. 9. Check that the replication is working fine both ways M1 <-> M2
  238. :expectedresults:
  239. 1. This should pass
  240. 2. This should pass
  241. 3. This should pass
  242. 4. This should pass
  243. 5. This should pass
  244. 6. This should pass
  245. 7. This should pass
  246. 8. This should pass
  247. 9. This should pass
  248. """
  249. master1 = topo_m2.ms["master1"]
  250. master2 = topo_m2.ms["master2"]
  251. repl = ReplicationManager(DEFAULT_SUFFIX)
  252. log.info("Add test entries - Add 3 OUs and 2 same users under 2 different OUs")
  253. OUs = OrganizationalUnits(master1, DEFAULT_SUFFIX)
  254. OU_A = OUs.create(properties={
  255. 'ou': 'A',
  256. 'description': 'A',
  257. })
  258. OU_B = OUs.create(properties={
  259. 'ou': 'B',
  260. 'description': 'B',
  261. })
  262. OU_C = OUs.create(properties={
  263. 'ou': 'C',
  264. 'description': 'C',
  265. })
  266. users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_A.rdn))
  267. tuser_A = users.create(properties=TEST_USER_PROPERTIES)
  268. users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_B.rdn))
  269. tuser_B = users.create(properties=TEST_USER_PROPERTIES)
  270. repl.test_replication(master1, master2)
  271. repl.test_replication(master2, master1)
  272. log.info("Stop Replication")
  273. topo_m2.pause_all_replicas()
  274. log.info("Apply modrdn to M1 - move test user from OU A -> C")
  275. master1.rename_s(tuser_A.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1)
  276. log.info("Apply modrdn on M2 - move test user from OU B -> C")
  277. master2.rename_s(tuser_B.dn, 'uid=testuser1', newsuperior=OU_C.dn, delold=1)
  278. log.info("Start Replication")
  279. topo_m2.resume_all_replicas()
  280. log.info("Wait for sometime for repl to resume")
  281. repl.test_replication(master1, master2)
  282. repl.test_replication(master2, master1)
  283. log.info("Check that there should be only one test entry under ou=C on both masters")
  284. users = UserAccounts(master1, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
  285. assert len(users.list()) == 1
  286. users = UserAccounts(master2, DEFAULT_SUFFIX, rdn='ou={}'.format(OU_C.rdn))
  287. assert len(users.list()) == 1
  288. log.info("Check that the replication is working fine both ways, M1 <-> M2")
  289. repl.test_replication(master1, master2)
  290. repl.test_replication(master2, master1)
  291. def test_password_repl_error(topo_m2, create_entry):
  292. """Check that error about userpassword replication is properly logged
  293. :id: 714130ff-e4f0-4633-9def-c1f4b24abfef
  294. :setup: Four masters replication setup, a test entry
  295. :steps:
  296. 1. Change userpassword on the first master
  297. 2. Restart the servers to flush the logs
  298. 3. Check the error log for an replication error
  299. :expectedresults:
  300. 1. Password should be successfully changed
  301. 2. Server should be successfully restarted
  302. 3. There should be no replication errors in the error log
  303. """
  304. m1 = topo_m2.ms["master1"]
  305. m2 = topo_m2.ms["master2"]
  306. TEST_ENTRY_NEW_PASS = 'new_pass'
  307. log.info('Clean the error log')
  308. m2.deleteErrorLogs()
  309. log.info('Set replication loglevel')
  310. m2.config.loglevel((ErrorLog.REPLICA,))
  311. log.info('Modifying entry {} - change userpassword on master 1'.format(create_entry.dn))
  312. create_entry.set('userpassword', TEST_ENTRY_NEW_PASS)
  313. repl = ReplicationManager(DEFAULT_SUFFIX)
  314. repl.wait_for_replication(m1, m2)
  315. log.info('Restart the servers to flush the logs')
  316. for num in range(1, 3):
  317. topo_m2.ms["master{}".format(num)].restart()
  318. try:
  319. log.info('Check that password works on master 2')
  320. create_entry_m2 = UserAccount(m2, create_entry.dn)
  321. create_entry_m2.bind(TEST_ENTRY_NEW_PASS)
  322. log.info('Check the error log for the error with {}'.format(create_entry.dn))
  323. assert not m2.ds_error_log.match('.*can.t add a change for {}.*'.format(create_entry.dn))
  324. finally:
  325. log.info('Set the default loglevel')
  326. m2.config.loglevel((ErrorLog.DEFAULT,))
  327. def test_invalid_agmt(topo_m2):
  328. """Test adding that an invalid agreement is properly rejected and does not crash the server
  329. :id: 6c3b2a7e-edcd-4327-a003-6bd878ff722b
  330. :setup: Four masters replication setup
  331. :steps:
  332. 1. Add invalid agreement (nsds5ReplicaEnabled set to invalid value)
  333. 2. Verify the server is still running
  334. :expectedresults:
  335. 1. Invalid repl agreement should be rejected
  336. 2. Server should be still running
  337. """
  338. m1 = topo_m2.ms["master1"]
  339. m2 = topo_m2.ms["master2"]
  340. repl = ReplicationManager(DEFAULT_SUFFIX)
  341. replicas = Replicas(m1)
  342. replica = replicas.get(DEFAULT_SUFFIX)
  343. agmts = replica.get_agreements()
  344. # Add invalid agreement (nsds5ReplicaEnabled set to invalid value)
  345. with pytest.raises(ldap.UNWILLING_TO_PERFORM):
  346. agmts.create(properties={
  347. 'cn': 'whatever',
  348. 'nsDS5ReplicaRoot': DEFAULT_SUFFIX,
  349. 'nsDS5ReplicaBindDN': 'cn=replication manager,cn=config',
  350. 'nsDS5ReplicaBindMethod': 'simple',
  351. 'nsDS5ReplicaTransportInfo': 'LDAP',
  352. 'nsds5replicaTimeout': '5',
  353. 'description': "test agreement",
  354. 'nsDS5ReplicaHost': m2.host,
  355. 'nsDS5ReplicaPort': str(m2.port),
  356. 'nsDS5ReplicaCredentials': 'whatever',
  357. 'nsds5ReplicaEnabled': 'YEAH MATE, LETS REPLICATE'
  358. })
  359. # Verify the server is still running
  360. repl = ReplicationManager(DEFAULT_SUFFIX)
  361. repl.test_replication(m1, m2)
  362. repl.test_replication(m2, m1)
  363. def test_fetch_bindDnGroup(topo_m2):
  364. """Check the bindDNGroup is fetched on first replication session
  365. :id: 5f1b1f59-6744-4260-b091-c82d22130025
  366. :setup: 2 Master Instances
  367. :steps:
  368. 1. Create a replication bound user and group, but the user *not* member of the group
  369. 2. Check that replication is working
  370. 3. Some preparation is required because of lib389 magic that already define a replication via group
  371. - define the group as groupDN for replication and 60sec as fetch interval
  372. - pause RA in both direction
  373. - Define the user as bindDn of the RAs
  374. 4. restart servers.
  375. It sets the fetch time to 0, so next session will refetch the group
  376. 5. Before resuming RA, add user to groupDN (on both side as replication is not working at that time)
  377. 6. trigger an update and check replication is working and
  378. there is no failure logged on supplier side 'does not have permission to supply replication updates to the replica'
  379. :expectedresults:
  380. 1. Success
  381. 2. Success
  382. 3. Success
  383. 4. Success
  384. 5. Success
  385. 6. Success
  386. """
  387. # If you need any test suite initialization,
  388. # please, write additional fixture for that (including finalizer).
  389. # Topology for suites are predefined in lib389/topologies.py.
  390. # If you need host, port or any other data about instance,
  391. # Please, use the instance object attributes for that (for example, topo.ms["master1"].serverid)
  392. M1 = topo_m2.ms['master1']
  393. M2 = topo_m2.ms['master2']
  394. # Enable replication log level. Not really necessary
  395. M1.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
  396. M2.modify_s('cn=config', [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', b'8192')])
  397. # Create a group and a user
  398. PEOPLE = "ou=People,%s" % SUFFIX
  399. PASSWD = 'password'
  400. REPL_MGR_BOUND_DN = 'repl_mgr_bound_dn'
  401. uid = REPL_MGR_BOUND_DN.encode()
  402. users = UserAccounts(M1, PEOPLE, rdn=None)
  403. user_props = TEST_USER_PROPERTIES.copy()
  404. user_props.update({'uid': uid, 'cn': uid, 'sn': '_%s' % uid, 'userpassword': PASSWD.encode(), 'description': b'value creation'})
  405. create_user = users.create(properties=user_props)
  406. groups_M1 = Groups(M1, DEFAULT_SUFFIX)
  407. group_properties = {
  408. 'cn': 'group1',
  409. 'description': 'testgroup'}
  410. group_M1 = groups_M1.create(properties=group_properties)
  411. group_M2 = Group(M2, group_M1.dn)
  412. assert(not group_M1.is_member(create_user.dn))
  413. # Check that M1 and M2 are in sync
  414. repl = ReplicationManager(DEFAULT_SUFFIX)
  415. repl.wait_for_replication(M1, M2, timeout=20)
  416. # Define the group as the replication manager and fetch interval as 60sec
  417. replicas = Replicas(M1)
  418. replica = replicas.list()[0]
  419. replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
  420. (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)])
  421. replicas = Replicas(M2)
  422. replica = replicas.list()[0]
  423. replica.apply_mods([(ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroupCheckInterval', '60'),
  424. (ldap.MOD_REPLACE, 'nsDS5ReplicaBindDnGroup', group_M1.dn)])
  425. # Then pause the replication agreement to prevent them trying to acquire
  426. # while the user is not member of the group
  427. topo_m2.pause_all_replicas()
  428. # Define the user as the bindDN of the RAs
  429. for inst in (M1, M2):
  430. agmts = Agreements(inst)
  431. agmt = agmts.list()[0]
  432. agmt.replace('nsDS5ReplicaBindDN', create_user.dn.encode())
  433. agmt.replace('nsds5ReplicaCredentials', PASSWD.encode())
  434. # Key step
  435. # The restart will fetch the group/members define in the replica
  436. #
  437. # The user NOT member of the group replication will not work until bindDNcheckInterval
  438. #
  439. # With the fix, the first fetch is not taken into account (fetch time=0)
  440. # so on the first session, the group will be fetched
  441. M1.restart()
  442. M2.restart()
  443. # Replication being broken here we need to directly do the same update.
  444. # Sorry not found another solution except total update
  445. group_M1.add_member(create_user.dn)
  446. group_M2.add_member(create_user.dn)
  447. topo_m2.resume_all_replicas()
  448. # trigger updates to be sure to have a replication session, giving some time
  449. M1.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_1_1')])
  450. M2.modify_s(create_user.dn, [(ldap.MOD_ADD, 'description', b'value_2_2')])
  451. time.sleep(10)
  452. # Check replication is working
  453. ents = M1.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
  454. for ent in ents:
  455. assert (ent.hasAttr('description'))
  456. found = 0
  457. for val in ent.getValues('description'):
  458. if (val == b'value_1_1'):
  459. found = found + 1
  460. elif (val == b'value_2_2'):
  461. found = found + 1
  462. assert (found == 2)
  463. ents = M2.search_s(create_user.dn, ldap.SCOPE_BASE, '(objectclass=*)')
  464. for ent in ents:
  465. assert (ent.hasAttr('description'))
  466. found = 0
  467. for val in ent.getValues('description'):
  468. if (val == b'value_1_1'):
  469. found = found + 1
  470. elif (val == b'value_2_2'):
  471. found = found + 1
  472. assert (found == 2)
  473. # Check in the logs that the member was detected in the group although
  474. # at startup it was not member of the group
  475. regex = re.compile("does not have permission to supply replication updates to the replica.")
  476. errorlog_M1 = open(M1.errlog, "r")
  477. errorlog_M2 = open(M1.errlog, "r")
  478. # Find the last restart position
  479. restart_location_M1 = find_start_location(errorlog_M1, 2)
  480. assert (restart_location_M1 != -1)
  481. restart_location_M2 = find_start_location(errorlog_M2, 2)
  482. assert (restart_location_M2 != -1)
  483. # Then check there is no failure to authenticate
  484. count = pattern_errorlog(errorlog_M1, regex, start_location=restart_location_M1)
  485. assert(count <= 1)
  486. count = pattern_errorlog(errorlog_M2, regex, start_location=restart_location_M2)
  487. assert(count <= 1)
  488. def test_plugin_bind_dn_tracking_and_replication(topo_m2):
  489. """Testing nsslapd-plugin-binddn-tracking does not cause issues around
  490. access control and reconfiguring replication/repl agmt.
  491. :id: dd689d03-69b8-4bf9-a06e-2acd19d5e2c9
  492. :setup: 2 master topology
  493. :steps:
  494. 1. Turn on plugin binddn tracking
  495. 2. Add some users
  496. 3. Make an update as a user
  497. 4. Make an update to the replica config
  498. 5. Make an update to the repliocation agreement
  499. :expectedresults:
  500. 1. Success
  501. 2. Success
  502. 3. Success
  503. 4. Success
  504. 5. Success
  505. """
  506. m1 = topo_m2.ms["master1"]
  507. # Turn on bind dn tracking
  508. m1.config.set('nsslapd-plugin-binddn-tracking', 'on')
  509. # Add two users
  510. users = UserAccounts(m1, DEFAULT_SUFFIX)
  511. user1 = users.create_test_user(uid=1011)
  512. user1.set('userpassword', PASSWORD)
  513. user2 = users.create_test_user(uid=1012)
  514. # Add an aci
  515. acival = '(targetattr ="cn")(version 3.0;acl "Test bind dn tracking"' + \
  516. ';allow (all) (userdn = "ldap:///{}");)'.format(user1.dn)
  517. Domain(m1, DEFAULT_SUFFIX).add('aci', acival)
  518. # Bind as user and make an update
  519. user1.rebind(PASSWORD)
  520. user2.set('cn', 'new value')
  521. dm = DirectoryManager(m1)
  522. dm.rebind()
  523. # modify replica
  524. replica = Replicas(m1).get(DEFAULT_SUFFIX)
  525. replica.set(REPL_PROTOCOL_TIMEOUT, "30")
  526. # modify repl agmt
  527. agmt = replica.get_agreements().list()[0]
  528. agmt.set(REPL_PROTOCOL_TIMEOUT, "20")
  529. def test_cleanallruv_repl(topo_m3):
  530. """Test that cleanallruv could not break replication if anchor csn in ruv originated in deleted replica
  531. :id: 46faba9a-897e-45b8-98dc-aec7fa8cec9a
  532. :setup: 3 Masters
  533. :steps:
  534. 1. Configure error log level to 8192 in all masters
  535. 2. Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2
  536. 3. Add test users to 3 masters
  537. 4. Launch ClearRuv but withForce
  538. 5. Check the users after CleanRUV, because of changelog trimming, it will effect the CLs
  539. :expectedresults:
  540. 1. Error logs should be configured successfully
  541. 2. Modify should be successful
  542. 3. Test users should be added successfully
  543. 4. ClearRuv should be launched successfully
  544. 5. Users should be present according to the changelog trimming effect
  545. """
  546. M1 = topo_m3.ms["master1"]
  547. M2 = topo_m3.ms["master2"]
  548. M3 = topo_m3.ms["master3"]
  549. log.info("Change the error log levels for all masters")
  550. for s in (M1, M2, M3):
  551. s.config.replace('nsslapd-errorlog-level', "8192")
  552. log.info("Get the replication agreements for all 3 masters")
  553. m1_m2 = M1.agreement.list(suffix=SUFFIX, consumer_host=M2.host, consumer_port=M2.port)
  554. m1_m3 = M1.agreement.list(suffix=SUFFIX, consumer_host=M3.host, consumer_port=M3.port)
  555. m3_m1 = M3.agreement.list(suffix=SUFFIX, consumer_host=M1.host, consumer_port=M1.port)
  556. log.info("Modify nsslapd-changelogmaxage=30 and nsslapd-changelogtrim-interval=5 for M1 and M2")
  557. if ds_supports_new_changelog():
  558. CHANGELOG = 'cn=changelog,{}'.format(DN_USERROOT_LDBM)
  559. #set_value(M1, MAXAGE_ATTR, MAXAGE_STR)
  560. try:
  561. M1.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, MAXAGE_ATTR, ensure_bytes(MAXAGE_STR))])
  562. except ldap.LDAPError as e:
  563. log.error('Failed to add ' + MAXAGE_ATTR, + ': ' + MAXAGE_STR + ' to ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc')))
  564. assert False
  565. #set_value(M2, TRIMINTERVAL, TRIMINTERVAL_STR)
  566. try:
  567. M2.modify_s(CHANGELOG, [(ldap.MOD_REPLACE, TRIMINTERVAL, ensure_bytes(TRIMINTERVAL_STR))])
  568. except ldap.LDAPError as e:
  569. log.error('Failed to add ' + TRIMINTERVAL, + ': ' + TRIMINTERVAL_STR + ' to ' + CHANGELOG + ': error {}'.format(get_ldap_error_msg(e,'desc')))
  570. assert False
  571. else:
  572. log.info("Get the changelog enteries for M1 and M2")
  573. changelog_m1 = Changelog5(M1)
  574. changelog_m1.set_max_age(MAXAGE_STR)
  575. changelog_m1.set_trim_interval(TRIMINTERVAL_STR)
  576. log.info("Add test users to 3 masters")
  577. users_m1 = UserAccounts(M1, DEFAULT_SUFFIX)
  578. users_m2 = UserAccounts(M2, DEFAULT_SUFFIX)
  579. users_m3 = UserAccounts(M3, DEFAULT_SUFFIX)
  580. user_props = TEST_USER_PROPERTIES.copy()
  581. user_props.update({'uid': "testuser10"})
  582. user10 = users_m1.create(properties=user_props)
  583. user_props.update({'uid': "testuser20"})
  584. user20 = users_m2.create(properties=user_props)
  585. user_props.update({'uid': "testuser30"})
  586. user30 = users_m3.create(properties=user_props)
  587. # ::important:: the testuser31 is the oldest csn in M2,
  588. # because it will be cleared by changelog trimming
  589. user_props.update({'uid': "testuser31"})
  590. user31 = users_m3.create(properties=user_props)
  591. user_props.update({'uid': "testuser11"})
  592. user11 = users_m1.create(properties=user_props)
  593. user_props.update({'uid': "testuser21"})
  594. user21 = users_m2.create(properties=user_props)
  595. # this is to trigger changelog trim and interval values
  596. time.sleep(40)
  597. # Here M1, M2, M3 should have 11,21,31 and 10,20,30 are CL cleared
  598. M2.stop()
  599. M1.agreement.pause(m1_m2[0].dn)
  600. user_props.update({'uid': "testuser32"})
  601. user32 = users_m3.create(properties=user_props)
  602. user_props.update({'uid': "testuser33"})
  603. user33 = users_m3.create(properties=user_props)
  604. user_props.update({'uid': "testuser12"})
  605. user12 = users_m1.create(properties=user_props)
  606. M3.agreement.pause(m3_m1[0].dn)
  607. M3.agreement.resume(m3_m1[0].dn)
  608. time.sleep(40)
  609. # Here because of changelog trimming testusers 31 and 32 are CL cleared
  610. # ClearRuv is launched but with Force
  611. M3.stop()
  612. M1.tasks.cleanAllRUV(suffix=SUFFIX, replicaid='3',
  613. force=True, args={TASK_WAIT: False})
  614. # here M1 should clear 31
  615. M2.start()
  616. M1.agreement.pause(m1_m2[0].dn)
  617. M1.agreement.resume(m1_m2[0].dn)
  618. time.sleep(10)
  619. # Check the users after CleanRUV
  620. expected_m1_users = [user31.dn, user11.dn, user21.dn, user32.dn, user33.dn, user12.dn]
  621. expected_m1_users = [x.lower() for x in expected_m1_users]
  622. expected_m2_users = [user31.dn, user11.dn, user21.dn, user12.dn]
  623. expected_m2_users = [x.lower() for x in expected_m2_users]
  624. current_m1_users = [user.dn for user in users_m1.list()]
  625. current_m1_users = [x.lower() for x in current_m1_users]
  626. current_m2_users = [user.dn for user in users_m2.list()]
  627. current_m2_users = [x.lower() for x in current_m2_users]
  628. assert set(expected_m1_users).issubset(current_m1_users)
  629. assert set(expected_m2_users).issubset(current_m2_users)
  630. @pytest.mark.ds49915
  631. @pytest.mark.bz1626375
  632. def test_online_reinit_may_hang(topo_with_sigkill):
  633. """Online reinitialization may hang when the first
  634. entry of the DB is RUV entry instead of the suffix
  635. :id: cded6afa-66c0-4c65-9651-993ba3f7a49c
  636. :setup: 2 Master Instances
  637. :steps:
  638. 1. Export the database
  639. 2. Move RUV entry to the top in the ldif file
  640. 3. Import the ldif file
  641. 4. Online replica initializaton
  642. :expectedresults:
  643. 1. Ldif file should be created successfully
  644. 2. RUV entry should be on top in the ldif file
  645. 3. Import should be successful
  646. 4. Server should not hang and consume 100% CPU
  647. """
  648. M1 = topo_with_sigkill.ms["master1"]
  649. M2 = topo_with_sigkill.ms["master2"]
  650. M1.stop()
  651. ldif_file = '%s/master1.ldif' % M1.get_ldif_dir()
  652. M1.db2ldif(bename=DEFAULT_BENAME, suffixes=[DEFAULT_SUFFIX],
  653. excludeSuffixes=None, repl_data=True,
  654. outputfile=ldif_file, encrypt=False)
  655. _move_ruv(ldif_file)
  656. M1.ldif2db(DEFAULT_BENAME, None, None, None, ldif_file)
  657. M1.start()
  658. # After this server may hang
  659. agmt = Agreements(M1).list()[0]
  660. agmt.begin_reinit()
  661. (done, error) = agmt.wait_reinit()
  662. assert done is True
  663. assert error is False
  664. repl = ReplicationManager(DEFAULT_SUFFIX)
  665. repl.test_replication_topology(topo_with_sigkill)
  666. if DEBUGGING:
  667. # Add debugging steps(if any)...
  668. pass
  669. @pytest.mark.bz1314956
  670. @pytest.mark.ds48755
  671. def test_moving_entry_make_online_init_fail(topology_m2):
  672. """
  673. Moving an entry could make the online init fail
  674. :id: e3895be7-884a-4e9f-80e3-24e9a5167c9e
  675. :setup: Two masters replication setup
  676. :steps:
  677. 1. Generate DIT_0
  678. 2. Generate password policy for DIT_0
  679. 3. Create users for DIT_0
  680. 4. Turn idx % 2 == 0 users into tombstones
  681. 5. Generate DIT_1
  682. 6. Move 'ou=OU0,ou=OU0,dc=example,dc=com' to DIT_1
  683. 7. Move 'ou=OU0,dc=example,dc=com' to DIT_1
  684. 8. Move idx % 2 == 1 users to 'ou=OU0,ou=OU0,ou=OU1,dc=example,dc=com'
  685. 9. Init replicas
  686. 10. Number of entries should match on both masters
  687. :expectedresults:
  688. 1. Success
  689. 2. Success
  690. 3. Success
  691. 4. Success
  692. 5. Success
  693. 6. Success
  694. 7. Success
  695. 8. Success
  696. 9. Success
  697. 10. Success
  698. """
  699. M1 = topology_m2.ms["master1"]
  700. M2 = topology_m2.ms["master2"]
  701. log.info("Generating DIT_0")
  702. idx = 0
  703. add_ou_entry(M1, idx, DEFAULT_SUFFIX)
  704. log.info("Created entry: ou=OU0, dc=example, dc=com")
  705. ou0 = 'ou=OU%d' % idx
  706. first_parent = '%s,%s' % (ou0, DEFAULT_SUFFIX)
  707. add_ou_entry(M1, idx, first_parent)
  708. log.info("Created entry: ou=OU0, ou=OU0, dc=example, dc=com")
  709. add_ldapsubentry(M1, first_parent)
  710. ou_name = 'ou=OU%d,ou=OU%d' % (idx, idx)
  711. second_parent = 'ou=OU%d,%s' % (idx, first_parent)
  712. for idx in range(0, 9):
  713. add_user_entry(M1, idx, ou_name)
  714. if idx % 2 == 0:
  715. log.info("Turning tuser%d into a tombstone entry" % idx)
  716. del_user_entry(M1, idx, ou_name)
  717. log.info('%s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, first_parent, second_parent))
  718. log.info("Generating DIT_1")
  719. idx = 1
  720. add_ou_entry(M1, idx, DEFAULT_SUFFIX)
  721. log.info("Created entry: ou=OU1,dc=example,dc=com")
  722. third_parent = 'ou=OU%d,%s' % (idx, DEFAULT_SUFFIX)
  723. add_ou_entry(M1, idx, third_parent)
  724. log.info("Created entry: ou=OU1, ou=OU1, dc=example, dc=com")
  725. add_ldapsubentry(M1, third_parent)
  726. log.info("Moving %s to DIT_1" % second_parent)
  727. OrganizationalUnits(M1, second_parent).get('OU0').rename(ou0, newsuperior=third_parent)
  728. log.info("Moving %s to DIT_1" % first_parent)
  729. fourth_parent = '%s,%s' % (ou0, third_parent)
  730. OrganizationalUnits(M1, first_parent).get('OU0').rename(ou0, newsuperior=fourth_parent)
  731. fifth_parent = '%s,%s' % (ou0, fourth_parent)
  732. ou_name = 'ou=OU0,ou=OU1'
  733. log.info("Moving USERS to %s" % fifth_parent)
  734. for idx in range(0, 9):
  735. if idx % 2 == 1:
  736. rename_entry(M1, idx, ou_name, fifth_parent)
  737. log.info('%s => %s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, third_parent, fourth_parent, fifth_parent))
  738. log.info("Run Initialization.")
  739. repl = ReplicationManager(DEFAULT_SUFFIX)
  740. repl.wait_for_replication(M1, M2, timeout=5)
  741. m1entries = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
  742. '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))')
  743. m2entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE,
  744. '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))')
  745. log.info("m1entry count - %d", len(m1entries))
  746. log.info("m2entry count - %d", len(m2entries))
  747. assert len(m1entries) == len(m2entries)
  748. if __name__ == '__main__':
  749. # Run isolated
  750. # -s for DEBUG mode
  751. CURRENT_FILE = os.path.realpath(__file__)
  752. pytest.main("-s %s" % CURRENT_FILE)