ticket47787_test.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537
  1. # --- BEGIN COPYRIGHT BLOCK ---
  2. # Copyright (C) 2015 Red Hat, Inc.
  3. # All rights reserved.
  4. #
  5. # License: GPL (version 3 or any later version).
  6. # See LICENSE for details.
  7. # --- END COPYRIGHT BLOCK ---
  8. #
  9. '''
  10. Created on April 14, 2014
  11. @author: tbordaz
  12. '''
  13. import os
  14. import sys
  15. import time
  16. import ldap
  17. import logging
  18. import pytest
  19. import re
  20. from lib389 import DirSrv, Entry, tools, NoSuchEntryError
  21. from lib389.tools import DirSrvTools
  22. from lib389._constants import *
  23. from lib389.properties import *
  24. from lib389._constants import REPLICAROLE_MASTER
  25. logging.getLogger(__name__).setLevel(logging.DEBUG)
  26. log = logging.getLogger(__name__)
  27. #
  28. # important part. We can deploy Master1 and Master2 on different versions
  29. #
  30. installation1_prefix = None
  31. installation2_prefix = None
  32. # set this flag to False so that it will assert on failure _status_entry_both_server
  33. DEBUG_FLAG = False
  34. TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
  35. STAGING_CN = "staged user"
  36. PRODUCTION_CN = "accounts"
  37. EXCEPT_CN = "excepts"
  38. STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX)
  39. PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX)
  40. PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN)
  41. STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX)
  42. PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX)
  43. BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX)
  44. BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX)
  45. BIND_CN = "bind_entry"
  46. BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX)
  47. BIND_PW = "password"
  48. NEW_ACCOUNT = "new_account"
  49. MAX_ACCOUNTS = 20
  50. CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci"
  51. class TopologyMaster1Master2(object):
  52. def __init__(self, master1, master2):
  53. master1.open()
  54. self.master1 = master1
  55. master2.open()
  56. self.master2 = master2
  57. @pytest.fixture(scope="module")
  58. def topology(request):
  59. '''
  60. This fixture is used to create a replicated topology for the 'module'.
  61. The replicated topology is MASTER1 <-> Master2.
  62. '''
  63. global installation1_prefix
  64. global installation2_prefix
  65. # allocate master1 on a given deployement
  66. master1 = DirSrv(verbose=False)
  67. if installation1_prefix:
  68. args_instance[SER_DEPLOYED_DIR] = installation1_prefix
  69. # Args for the master1 instance
  70. args_instance[SER_HOST] = HOST_MASTER_1
  71. args_instance[SER_PORT] = PORT_MASTER_1
  72. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
  73. args_master = args_instance.copy()
  74. master1.allocate(args_master)
  75. # allocate master1 on a given deployement
  76. master2 = DirSrv(verbose=False)
  77. if installation2_prefix:
  78. args_instance[SER_DEPLOYED_DIR] = installation2_prefix
  79. # Args for the consumer instance
  80. args_instance[SER_HOST] = HOST_MASTER_2
  81. args_instance[SER_PORT] = PORT_MASTER_2
  82. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
  83. args_master = args_instance.copy()
  84. master2.allocate(args_master)
  85. # Get the status of the instance and restart it if it exists
  86. instance_master1 = master1.exists()
  87. instance_master2 = master2.exists()
  88. # Remove all the instances
  89. if instance_master1:
  90. master1.delete()
  91. if instance_master2:
  92. master2.delete()
  93. # Create the instances
  94. master1.create()
  95. master1.open()
  96. master2.create()
  97. master2.open()
  98. #
  99. # Now prepare the Master-Consumer topology
  100. #
  101. # First Enable replication
  102. master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
  103. master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
  104. # Initialize the supplier->consumer
  105. properties = {RA_NAME: r'meTo_$host:$port',
  106. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  107. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  108. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  109. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  110. repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
  111. if not repl_agreement:
  112. log.fatal("Fail to create a replica agreement")
  113. sys.exit(1)
  114. log.debug("%s created" % repl_agreement)
  115. properties = {RA_NAME: r'meTo_$host:$port',
  116. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  117. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  118. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  119. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  120. master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
  121. master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
  122. master1.waitForReplInit(repl_agreement)
  123. # Check replication is working fine
  124. if master1.testReplication(DEFAULT_SUFFIX, master2):
  125. log.info('Replication is working.')
  126. else:
  127. log.fatal('Replication is not working.')
  128. assert False
  129. def fin():
  130. master1.delete()
  131. master2.delete()
  132. request.addfinalizer(fin)
  133. # Here we have two instances master and consumer
  134. # with replication working.
  135. return TopologyMaster1Master2(master1, master2)
  136. def _bind_manager(server):
  137. server.log.info("Bind as %s " % DN_DM)
  138. server.simple_bind_s(DN_DM, PASSWORD)
  139. def _bind_normal(server):
  140. server.log.info("Bind as %s " % BIND_DN)
  141. server.simple_bind_s(BIND_DN, BIND_PW)
  142. def _header(topology, label):
  143. topology.master1.log.info("\n\n###############################################")
  144. topology.master1.log.info("#######")
  145. topology.master1.log.info("####### %s" % label)
  146. topology.master1.log.info("#######")
  147. topology.master1.log.info("###############################################")
  148. def _status_entry_both_server(topology, name=None, desc=None, debug=True):
  149. if not name:
  150. return
  151. topology.master1.log.info("\n\n######################### Tombstone on M1 ######################\n")
  152. attr = 'description'
  153. found = False
  154. attempt = 0
  155. while not found and attempt < 10:
  156. ent_m1 = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
  157. if attr in ent_m1.getAttrs():
  158. found = True
  159. else:
  160. time.sleep(1)
  161. attempt = attempt + 1
  162. assert ent_m1
  163. topology.master1.log.info("\n\n######################### Tombstone on M2 ######################\n")
  164. ent_m2 = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
  165. assert ent_m2
  166. topology.master1.log.info("\n\n######################### Description ######################\n%s\n" % desc)
  167. topology.master1.log.info("M1 only\n")
  168. for attr in ent_m1.getAttrs():
  169. if not debug:
  170. assert attr in ent_m2.getAttrs()
  171. if not attr in ent_m2.getAttrs():
  172. topology.master1.log.info(" %s" % attr)
  173. for val in ent_m1.getValues(attr):
  174. topology.master1.log.info(" %s" % val)
  175. topology.master1.log.info("M2 only\n")
  176. for attr in ent_m2.getAttrs():
  177. if not debug:
  178. assert attr in ent_m1.getAttrs()
  179. if not attr in ent_m1.getAttrs():
  180. topology.master1.log.info(" %s" % attr)
  181. for val in ent_m2.getValues(attr):
  182. topology.master1.log.info(" %s" % val)
  183. topology.master1.log.info("M1 differs M2\n")
  184. if not debug:
  185. assert ent_m1.dn == ent_m2.dn
  186. if ent_m1.dn != ent_m2.dn:
  187. topology.master1.log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn))
  188. for attr1 in ent_m1.getAttrs():
  189. if attr1 in ent_m2.getAttrs():
  190. for val1 in ent_m1.getValues(attr1):
  191. found = False
  192. for val2 in ent_m2.getValues(attr1):
  193. if val1 == val2:
  194. found = True
  195. break
  196. if not debug:
  197. assert found
  198. if not found:
  199. topology.master1.log.info(" M1[%s] = %s" % (attr1, val1))
  200. for attr2 in ent_m2.getAttrs():
  201. if attr2 in ent_m1.getAttrs():
  202. for val2 in ent_m2.getValues(attr2):
  203. found = False
  204. for val1 in ent_m1.getValues(attr2):
  205. if val2 == val1:
  206. found = True
  207. break
  208. if not debug:
  209. assert found
  210. if not found:
  211. topology.master1.log.info(" M2[%s] = %s" % (attr2, val2))
  212. def _pause_RAs(topology):
  213. topology.master1.log.info("\n\n######################### Pause RA M1<->M2 ######################\n")
  214. ents = topology.master1.agreement.list(suffix=SUFFIX)
  215. assert len(ents) == 1
  216. topology.master1.agreement.pause(ents[0].dn)
  217. ents = topology.master2.agreement.list(suffix=SUFFIX)
  218. assert len(ents) == 1
  219. topology.master2.agreement.pause(ents[0].dn)
  220. def _resume_RAs(topology):
  221. topology.master1.log.info("\n\n######################### resume RA M1<->M2 ######################\n")
  222. ents = topology.master1.agreement.list(suffix=SUFFIX)
  223. assert len(ents) == 1
  224. topology.master1.agreement.resume(ents[0].dn)
  225. ents = topology.master2.agreement.list(suffix=SUFFIX)
  226. assert len(ents) == 1
  227. topology.master2.agreement.resume(ents[0].dn)
  228. def _find_tombstone(instance, base, attr, value):
  229. #
  230. # we can not use a filter with a (&(objeclass=nsTombstone)(sn=name)) because
  231. # tombstone are not index in 'sn' so 'sn=name' will return NULL
  232. # and even if tombstone are indexed for objectclass the '&' will set
  233. # the candidate list to NULL
  234. #
  235. filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE
  236. ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt)
  237. #found = False
  238. for ent in ents:
  239. if ent.hasAttr(attr):
  240. for val in ent.getValues(attr):
  241. if val == value:
  242. instance.log.debug("tombstone found: %r" % ent)
  243. return ent
  244. return None
  245. def _delete_entry(instance, entry_dn, name):
  246. instance.log.info("\n\n######################### DELETE %s (M1) ######################\n" % name)
  247. # delete the entry
  248. instance.delete_s(entry_dn)
  249. assert _find_tombstone(instance, SUFFIX, 'sn', name) is not None
  250. def _mod_entry(instance, entry_dn, attr, value):
  251. instance.log.info("\n\n######################### MOD %s (M2) ######################\n" % entry_dn)
  252. mod = [(ldap.MOD_REPLACE, attr, value)]
  253. instance.modify_s(entry_dn, mod)
  254. def _modrdn_entry(instance=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
  255. assert instance is not None
  256. assert entry_dn is not None
  257. if not new_rdn:
  258. pattern = 'cn=(.*),(.*)'
  259. rdnre = re.compile(pattern)
  260. match = rdnre.match(entry_dn)
  261. old_value = match.group(1)
  262. new_rdn_val = "%s_modrdn" % old_value
  263. new_rdn = "cn=%s" % new_rdn_val
  264. instance.log.info("\n\n######################### MODRDN %s (M2) ######################\n" % new_rdn)
  265. if new_superior:
  266. instance.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
  267. else:
  268. instance.rename_s(entry_dn, new_rdn, delold=del_old)
  269. def _check_entry_exists(instance, entry_dn):
  270. loop = 0
  271. ent = None
  272. while loop <= 10:
  273. try:
  274. ent = instance.getEntry(entry_dn, ldap.SCOPE_BASE, "(objectclass=*)")
  275. break
  276. except ldap.NO_SUCH_OBJECT:
  277. time.sleep(1)
  278. loop += 1
  279. if ent is None:
  280. assert False
  281. def _check_mod_received(instance, base, filt, attr, value):
  282. instance.log.info("\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid)
  283. loop = 0
  284. while loop <= 10:
  285. ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt)
  286. if ent.hasAttr(attr) and ent.getValue(attr) == value:
  287. break
  288. time.sleep(1)
  289. loop += 1
  290. assert loop <= 10
  291. def _check_replication(topology, entry_dn):
  292. # prepare the filter to retrieve the entry
  293. filt = entry_dn.split(',')[0]
  294. topology.master1.log.info("\n######################### Check replicat M1->M2 ######################\n")
  295. loop = 0
  296. while loop <= 10:
  297. attr = 'description'
  298. value = 'test_value_%d' % loop
  299. mod = [(ldap.MOD_REPLACE, attr, value)]
  300. topology.master1.modify_s(entry_dn, mod)
  301. _check_mod_received(topology.master2, SUFFIX, filt, attr, value)
  302. loop += 1
  303. topology.master1.log.info("\n######################### Check replicat M2->M1 ######################\n")
  304. loop = 0
  305. while loop <= 10:
  306. attr = 'description'
  307. value = 'test_value_%d' % loop
  308. mod = [(ldap.MOD_REPLACE, attr, value)]
  309. topology.master2.modify_s(entry_dn, mod)
  310. _check_mod_received(topology.master1, SUFFIX, filt, attr, value)
  311. loop += 1
  312. def test_ticket47787_init(topology):
  313. """
  314. Creates
  315. - a staging DIT
  316. - a production DIT
  317. - add accounts in staging DIT
  318. """
  319. topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n")
  320. # entry used to bind with
  321. topology.master1.log.info("Add %s" % BIND_DN)
  322. topology.master1.add_s(Entry((BIND_DN, {
  323. 'objectclass': "top person".split(),
  324. 'sn': BIND_CN,
  325. 'cn': BIND_CN,
  326. 'userpassword': BIND_PW})))
  327. # DIT for staging
  328. topology.master1.log.info("Add %s" % STAGING_DN)
  329. topology.master1.add_s(Entry((STAGING_DN, {
  330. 'objectclass': "top organizationalRole".split(),
  331. 'cn': STAGING_CN,
  332. 'description': "staging DIT"})))
  333. # DIT for production
  334. topology.master1.log.info("Add %s" % PRODUCTION_DN)
  335. topology.master1.add_s(Entry((PRODUCTION_DN, {
  336. 'objectclass': "top organizationalRole".split(),
  337. 'cn': PRODUCTION_CN,
  338. 'description': "production DIT"})))
  339. # enable replication error logging
  340. mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192')]
  341. topology.master1.modify_s(DN_CONFIG, mod)
  342. topology.master2.modify_s(DN_CONFIG, mod)
  343. # add dummy entries in the staging DIT
  344. for cpt in range(MAX_ACCOUNTS):
  345. name = "%s%d" % (NEW_ACCOUNT, cpt)
  346. topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), {
  347. 'objectclass': "top person".split(),
  348. 'sn': name,
  349. 'cn': name})))
  350. def test_ticket47787_2(topology):
  351. '''
  352. Disable replication so that updates are not replicated
  353. Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior).
  354. update a test entry on M2
  355. Reenable the RA.
  356. checks that entry was deleted on M2 (with the modified RDN)
  357. checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn)
  358. '''
  359. _header(topology, "test_ticket47787_2")
  360. _bind_manager(topology.master1)
  361. _bind_manager(topology.master2)
  362. #entry to test the replication is still working
  363. name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 1)
  364. test_rdn = "cn=%s" % (name)
  365. testentry_dn = "%s,%s" % (test_rdn, STAGING_DN)
  366. name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 2)
  367. test2_rdn = "cn=%s" % (name)
  368. testentry2_dn = "%s,%s" % (test2_rdn, STAGING_DN)
  369. # value of updates to test the replication both ways
  370. attr = 'description'
  371. value = 'test_ticket47787_2'
  372. # entry for the modrdn
  373. name = "%s%d" % (NEW_ACCOUNT, 1)
  374. rdn = "cn=%s" % (name)
  375. entry_dn = "%s,%s" % (rdn, STAGING_DN)
  376. # created on M1, wait the entry exists on M2
  377. _check_entry_exists(topology.master2, entry_dn)
  378. _check_entry_exists(topology.master2, testentry_dn)
  379. _pause_RAs(topology)
  380. # Delete 'entry_dn' on M1.
  381. # dummy update is only have a first CSN before the DEL
  382. # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
  383. _mod_entry(topology.master1, testentry2_dn, attr, 'dummy')
  384. _delete_entry(topology.master1, entry_dn, name)
  385. _mod_entry(topology.master1, testentry2_dn, attr, value)
  386. time.sleep(1) # important to have MOD.csn != DEL.csn
  387. # MOD 'entry_dn' on M1.
  388. # dummy update is only have a first CSN before the MOD entry_dn
  389. # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
  390. _mod_entry(topology.master2, testentry_dn, attr, 'dummy')
  391. _mod_entry(topology.master2, entry_dn, attr, value)
  392. _mod_entry(topology.master2, testentry_dn, attr, value)
  393. _resume_RAs(topology)
  394. topology.master1.log.info("\n\n######################### Check DEL replicated on M2 ######################\n")
  395. loop = 0
  396. while loop <= 10:
  397. ent = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
  398. if ent:
  399. break
  400. time.sleep(1)
  401. loop += 1
  402. assert loop <= 10
  403. assert ent
  404. # the following checks are not necessary
  405. # as this bug is only for failing replicated MOD (entry_dn) on M1
  406. #_check_mod_received(topology.master1, SUFFIX, "(%s)" % (test_rdn), attr, value)
  407. #_check_mod_received(topology.master2, SUFFIX, "(%s)" % (test2_rdn), attr, value)
  408. #
  409. #_check_replication(topology, testentry_dn)
  410. _status_entry_both_server(topology, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG)
  411. topology.master1.log.info("\n\n######################### Check MOD replicated on M1 ######################\n")
  412. loop = 0
  413. while loop <= 10:
  414. ent = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
  415. if ent:
  416. break
  417. time.sleep(1)
  418. loop += 1
  419. assert loop <= 10
  420. assert ent
  421. assert ent.hasAttr(attr)
  422. assert ent.getValue(attr) == value
  423. if __name__ == '__main__':
  424. # Run isolated
  425. # -s for DEBUG mode
  426. CURRENT_FILE = os.path.realpath(__file__)
  427. pytest.main("-s %s" % CURRENT_FILE)