ticket47787_test.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. '''
  2. Created on April 14, 2014
  3. @author: tbordaz
  4. '''
  5. import os
  6. import sys
  7. import time
  8. import ldap
  9. import logging
  10. import socket
  11. import time
  12. import logging
  13. import pytest
  14. import re
  15. from lib389 import DirSrv, Entry, tools, NoSuchEntryError
  16. from lib389.tools import DirSrvTools
  17. from lib389._constants import *
  18. from lib389.properties import *
  19. from constants import *
  20. from lib389._constants import REPLICAROLE_MASTER
  21. logging.getLogger(__name__).setLevel(logging.DEBUG)
  22. log = logging.getLogger(__name__)
  23. #
  24. # important part. We can deploy Master1 and Master2 on different versions
  25. #
  26. installation1_prefix = None
  27. installation2_prefix = None
  28. # set this flag to False so that it will assert on failure _status_entry_both_server
  29. DEBUG_FLAG = False
  30. TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
  31. STAGING_CN = "staged user"
  32. PRODUCTION_CN = "accounts"
  33. EXCEPT_CN = "excepts"
  34. STAGING_DN = "cn=%s,%s" % (STAGING_CN, SUFFIX)
  35. PRODUCTION_DN = "cn=%s,%s" % (PRODUCTION_CN, SUFFIX)
  36. PROD_EXCEPT_DN = "cn=%s,%s" % (EXCEPT_CN, PRODUCTION_DN)
  37. STAGING_PATTERN = "cn=%s*,%s" % (STAGING_CN[:2], SUFFIX)
  38. PRODUCTION_PATTERN = "cn=%s*,%s" % (PRODUCTION_CN[:2], SUFFIX)
  39. BAD_STAGING_PATTERN = "cn=bad*,%s" % (SUFFIX)
  40. BAD_PRODUCTION_PATTERN = "cn=bad*,%s" % (SUFFIX)
  41. BIND_CN = "bind_entry"
  42. BIND_DN = "cn=%s,%s" % (BIND_CN, SUFFIX)
  43. BIND_PW = "password"
  44. NEW_ACCOUNT = "new_account"
  45. MAX_ACCOUNTS = 20
  46. CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci"
  47. class TopologyMaster1Master2(object):
  48. def __init__(self, master1, master2):
  49. master1.open()
  50. self.master1 = master1
  51. master2.open()
  52. self.master2 = master2
  53. @pytest.fixture(scope="module")
  54. def topology(request):
  55. '''
  56. This fixture is used to create a replicated topology for the 'module'.
  57. The replicated topology is MASTER1 <-> Master2.
  58. At the beginning, It may exists a master2 instance and/or a master2 instance.
  59. It may also exists a backup for the master1 and/or the master2.
  60. Principle:
  61. If master1 instance exists:
  62. restart it
  63. If master2 instance exists:
  64. restart it
  65. If backup of master1 AND backup of master2 exists:
  66. create or rebind to master1
  67. create or rebind to master2
  68. restore master1 from backup
  69. restore master2 from backup
  70. else:
  71. Cleanup everything
  72. remove instances
  73. remove backups
  74. Create instances
  75. Initialize replication
  76. Create backups
  77. '''
  78. global installation1_prefix
  79. global installation2_prefix
  80. # allocate master1 on a given deployement
  81. master1 = DirSrv(verbose=False)
  82. if installation1_prefix:
  83. args_instance[SER_DEPLOYED_DIR] = installation1_prefix
  84. # Args for the master1 instance
  85. args_instance[SER_HOST] = HOST_MASTER_1
  86. args_instance[SER_PORT] = PORT_MASTER_1
  87. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
  88. args_master = args_instance.copy()
  89. master1.allocate(args_master)
  90. # allocate master1 on a given deployement
  91. master2 = DirSrv(verbose=False)
  92. if installation2_prefix:
  93. args_instance[SER_DEPLOYED_DIR] = installation2_prefix
  94. # Args for the consumer instance
  95. args_instance[SER_HOST] = HOST_MASTER_2
  96. args_instance[SER_PORT] = PORT_MASTER_2
  97. args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
  98. args_master = args_instance.copy()
  99. master2.allocate(args_master)
  100. # Get the status of the backups
  101. backup_master1 = master1.checkBackupFS()
  102. backup_master2 = master2.checkBackupFS()
  103. # Get the status of the instance and restart it if it exists
  104. instance_master1 = master1.exists()
  105. if instance_master1:
  106. master1.stop(timeout=10)
  107. master1.start(timeout=10)
  108. instance_master2 = master2.exists()
  109. if instance_master2:
  110. master2.stop(timeout=10)
  111. master2.start(timeout=10)
  112. if backup_master1 and backup_master2:
  113. # The backups exist, assuming they are correct
  114. # we just re-init the instances with them
  115. if not instance_master1:
  116. master1.create()
  117. # Used to retrieve configuration information (dbdir, confdir...)
  118. master1.open()
  119. if not instance_master2:
  120. master2.create()
  121. # Used to retrieve configuration information (dbdir, confdir...)
  122. master2.open()
  123. # restore master1 from backup
  124. master1.stop(timeout=10)
  125. master1.restoreFS(backup_master1)
  126. master1.start(timeout=10)
  127. # restore master2 from backup
  128. master2.stop(timeout=10)
  129. master2.restoreFS(backup_master2)
  130. master2.start(timeout=10)
  131. else:
  132. # We should be here only in two conditions
  133. # - This is the first time a test involve master-consumer
  134. # so we need to create everything
  135. # - Something weird happened (instance/backup destroyed)
  136. # so we discard everything and recreate all
  137. # Remove all the backups. So even if we have a specific backup file
  138. # (e.g backup_master) we clear all backups that an instance my have created
  139. if backup_master1:
  140. master1.clearBackupFS()
  141. if backup_master2:
  142. master2.clearBackupFS()
  143. # Remove all the instances
  144. if instance_master1:
  145. master1.delete()
  146. if instance_master2:
  147. master2.delete()
  148. # Create the instances
  149. master1.create()
  150. master1.open()
  151. master2.create()
  152. master2.open()
  153. #
  154. # Now prepare the Master-Consumer topology
  155. #
  156. # First Enable replication
  157. master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
  158. master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
  159. # Initialize the supplier->consumer
  160. properties = {RA_NAME: r'meTo_$host:$port',
  161. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  162. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  163. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  164. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  165. repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
  166. if not repl_agreement:
  167. log.fatal("Fail to create a replica agreement")
  168. sys.exit(1)
  169. log.debug("%s created" % repl_agreement)
  170. properties = {RA_NAME: r'meTo_$host:$port',
  171. RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
  172. RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
  173. RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
  174. RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
  175. master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
  176. master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
  177. master1.waitForReplInit(repl_agreement)
  178. # Check replication is working fine
  179. master1.add_s(Entry((TEST_REPL_DN, {
  180. 'objectclass': "top person".split(),
  181. 'sn': 'test_repl',
  182. 'cn': 'test_repl'})))
  183. loop = 0
  184. ent = None
  185. while loop <= 10:
  186. try:
  187. ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)")
  188. break
  189. except ldap.NO_SUCH_OBJECT:
  190. time.sleep(1)
  191. loop += 1
  192. if ent is None:
  193. assert False
  194. # Time to create the backups
  195. master1.stop(timeout=10)
  196. master1.backupfile = master1.backupFS()
  197. master1.start(timeout=10)
  198. master2.stop(timeout=10)
  199. master2.backupfile = master2.backupFS()
  200. master2.start(timeout=10)
  201. # clear the tmp directory
  202. master1.clearTmpDir(__file__)
  203. #
  204. # Here we have two instances master and consumer
  205. # with replication working. Either coming from a backup recovery
  206. # or from a fresh (re)init
  207. # Time to return the topology
  208. return TopologyMaster1Master2(master1, master2)
  209. def _bind_manager(server):
  210. server.log.info("Bind as %s " % DN_DM)
  211. server.simple_bind_s(DN_DM, PASSWORD)
  212. def _bind_normal(server):
  213. server.log.info("Bind as %s " % BIND_DN)
  214. server.simple_bind_s(BIND_DN, BIND_PW)
  215. def _header(topology, label):
  216. topology.master1.log.info("\n\n###############################################")
  217. topology.master1.log.info("#######")
  218. topology.master1.log.info("####### %s" % label)
  219. topology.master1.log.info("#######")
  220. topology.master1.log.info("###############################################")
  221. def _status_entry_both_server(topology, name=None, desc=None, debug=True):
  222. if not name:
  223. return
  224. topology.master1.log.info("\n\n######################### Tombstone on M1 ######################\n")
  225. attr = 'description'
  226. found = False
  227. attempt = 0
  228. while not found and attempt < 10:
  229. ent_m1 = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
  230. if attr in ent_m1.getAttrs():
  231. found = True
  232. else:
  233. time.sleep(1)
  234. attempt = attempt + 1
  235. assert ent_m1
  236. topology.master1.log.info("\n\n######################### Tombstone on M2 ######################\n")
  237. ent_m2 = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
  238. assert ent_m2
  239. topology.master1.log.info("\n\n######################### Description ######################\n%s\n" % desc)
  240. topology.master1.log.info("M1 only\n")
  241. for attr in ent_m1.getAttrs():
  242. if not debug:
  243. assert attr in ent_m2.getAttrs()
  244. if not attr in ent_m2.getAttrs():
  245. topology.master1.log.info(" %s" % attr)
  246. for val in ent_m1.getValues(attr):
  247. topology.master1.log.info(" %s" % val)
  248. topology.master1.log.info("M2 only\n")
  249. for attr in ent_m2.getAttrs():
  250. if not debug:
  251. assert attr in ent_m1.getAttrs()
  252. if not attr in ent_m1.getAttrs():
  253. topology.master1.log.info(" %s" % attr)
  254. for val in ent_m2.getValues(attr):
  255. topology.master1.log.info(" %s" % val)
  256. topology.master1.log.info("M1 differs M2\n")
  257. if not debug:
  258. assert ent_m1.dn == ent_m2.dn
  259. if ent_m1.dn != ent_m2.dn:
  260. topology.master1.log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn))
  261. for attr1 in ent_m1.getAttrs():
  262. if attr1 in ent_m2.getAttrs():
  263. for val1 in ent_m1.getValues(attr1):
  264. found = False
  265. for val2 in ent_m2.getValues(attr1):
  266. if val1 == val2:
  267. found = True
  268. break
  269. if not debug:
  270. assert found
  271. if not found:
  272. topology.master1.log.info(" M1[%s] = %s" % (attr1, val1))
  273. for attr2 in ent_m2.getAttrs():
  274. if attr2 in ent_m1.getAttrs():
  275. for val2 in ent_m2.getValues(attr2):
  276. found = False
  277. for val1 in ent_m1.getValues(attr2):
  278. if val2 == val1:
  279. found = True
  280. break
  281. if not debug:
  282. assert found
  283. if not found:
  284. topology.master1.log.info(" M2[%s] = %s" % (attr2, val2))
  285. def _pause_RAs(topology):
  286. topology.master1.log.info("\n\n######################### Pause RA M1<->M2 ######################\n")
  287. ents = topology.master1.agreement.list(suffix=SUFFIX)
  288. assert len(ents) == 1
  289. topology.master1.agreement.pause(ents[0].dn)
  290. ents = topology.master2.agreement.list(suffix=SUFFIX)
  291. assert len(ents) == 1
  292. topology.master2.agreement.pause(ents[0].dn)
  293. def _resume_RAs(topology):
  294. topology.master1.log.info("\n\n######################### resume RA M1<->M2 ######################\n")
  295. ents = topology.master1.agreement.list(suffix=SUFFIX)
  296. assert len(ents) == 1
  297. topology.master1.agreement.resume(ents[0].dn)
  298. ents = topology.master2.agreement.list(suffix=SUFFIX)
  299. assert len(ents) == 1
  300. topology.master2.agreement.resume(ents[0].dn)
  301. def _find_tombstone(instance, base, attr, value):
  302. #
  303. # we can not use a filter with a (&(objeclass=nsTombstone)(sn=name)) because
  304. # tombstone are not index in 'sn' so 'sn=name' will return NULL
  305. # and even if tombstone are indexed for objectclass the '&' will set
  306. # the candidate list to NULL
  307. #
  308. filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE
  309. ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt)
  310. #found = False
  311. for ent in ents:
  312. if ent.hasAttr(attr):
  313. for val in ent.getValues(attr):
  314. if val == value:
  315. instance.log.debug("tombstone found: %r" % ent)
  316. return ent
  317. return None
  318. def _delete_entry(instance, entry_dn, name):
  319. instance.log.info("\n\n######################### DELETE %s (M1) ######################\n" % name)
  320. # delete the entry
  321. instance.delete_s(entry_dn)
  322. assert _find_tombstone(instance, SUFFIX, 'sn', name) is not None
  323. def _mod_entry(instance, entry_dn, attr, value):
  324. instance.log.info("\n\n######################### MOD %s (M2) ######################\n" % entry_dn)
  325. mod = [(ldap.MOD_REPLACE, attr, value)]
  326. instance.modify_s(entry_dn, mod)
  327. def _modrdn_entry(instance=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None):
  328. assert instance is not None
  329. assert entry_dn is not None
  330. if not new_rdn:
  331. pattern = 'cn=(.*),(.*)'
  332. rdnre = re.compile(pattern)
  333. match = rdnre.match(entry_dn)
  334. old_value = match.group(1)
  335. new_rdn_val = "%s_modrdn" % old_value
  336. new_rdn = "cn=%s" % new_rdn_val
  337. instance.log.info("\n\n######################### MODRDN %s (M2) ######################\n" % new_rdn)
  338. if new_superior:
  339. instance.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old)
  340. else:
  341. instance.rename_s(entry_dn, new_rdn, delold=del_old)
  342. def _check_entry_exists(instance, entry_dn):
  343. loop = 0
  344. ent = None
  345. while loop <= 10:
  346. try:
  347. ent = instance.getEntry(entry_dn, ldap.SCOPE_BASE, "(objectclass=*)")
  348. break
  349. except ldap.NO_SUCH_OBJECT:
  350. time.sleep(1)
  351. loop += 1
  352. if ent is None:
  353. assert False
  354. def _check_mod_received(instance, base, filt, attr, value):
  355. instance.log.info("\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid)
  356. loop = 0
  357. while loop <= 10:
  358. ent = instance.getEntry(base, ldap.SCOPE_SUBTREE, filt)
  359. if ent.hasAttr(attr) and ent.getValue(attr) == value:
  360. break
  361. time.sleep(1)
  362. loop += 1
  363. assert loop <= 10
  364. def _check_replication(topology, entry_dn):
  365. # prepare the filter to retrieve the entry
  366. filt = entry_dn.split(',')[0]
  367. topology.master1.log.info("\n######################### Check replicat M1->M2 ######################\n")
  368. loop = 0
  369. while loop <= 10:
  370. attr = 'description'
  371. value = 'test_value_%d' % loop
  372. mod = [(ldap.MOD_REPLACE, attr, value)]
  373. topology.master1.modify_s(entry_dn, mod)
  374. _check_mod_received(topology.master2, SUFFIX, filt, attr, value)
  375. loop += 1
  376. topology.master1.log.info("\n######################### Check replicat M2->M1 ######################\n")
  377. loop = 0
  378. while loop <= 10:
  379. attr = 'description'
  380. value = 'test_value_%d' % loop
  381. mod = [(ldap.MOD_REPLACE, attr, value)]
  382. topology.master2.modify_s(entry_dn, mod)
  383. _check_mod_received(topology.master1, SUFFIX, filt, attr, value)
  384. loop += 1
  385. def test_ticket47787_init(topology):
  386. """
  387. Creates
  388. - a staging DIT
  389. - a production DIT
  390. - add accounts in staging DIT
  391. """
  392. topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n")
  393. # entry used to bind with
  394. topology.master1.log.info("Add %s" % BIND_DN)
  395. topology.master1.add_s(Entry((BIND_DN, {
  396. 'objectclass': "top person".split(),
  397. 'sn': BIND_CN,
  398. 'cn': BIND_CN,
  399. 'userpassword': BIND_PW})))
  400. # DIT for staging
  401. topology.master1.log.info("Add %s" % STAGING_DN)
  402. topology.master1.add_s(Entry((STAGING_DN, {
  403. 'objectclass': "top organizationalRole".split(),
  404. 'cn': STAGING_CN,
  405. 'description': "staging DIT"})))
  406. # DIT for production
  407. topology.master1.log.info("Add %s" % PRODUCTION_DN)
  408. topology.master1.add_s(Entry((PRODUCTION_DN, {
  409. 'objectclass': "top organizationalRole".split(),
  410. 'cn': PRODUCTION_CN,
  411. 'description': "production DIT"})))
  412. # enable replication error logging
  413. mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192')]
  414. topology.master1.modify_s(DN_CONFIG, mod)
  415. topology.master2.modify_s(DN_CONFIG, mod)
  416. # add dummy entries in the staging DIT
  417. for cpt in range(MAX_ACCOUNTS):
  418. name = "%s%d" % (NEW_ACCOUNT, cpt)
  419. topology.master1.add_s(Entry(("cn=%s,%s" % (name, STAGING_DN), {
  420. 'objectclass': "top person".split(),
  421. 'sn': name,
  422. 'cn': name})))
  423. def test_ticket47787_2(topology):
  424. '''
  425. Disable replication so that updates are not replicated
  426. Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior).
  427. update a test entry on M2
  428. Reenable the RA.
  429. checks that entry was deleted on M2 (with the modified RDN)
  430. checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn)
  431. '''
  432. _header(topology, "test_ticket47787_2")
  433. _bind_manager(topology.master1)
  434. _bind_manager(topology.master2)
  435. #entry to test the replication is still working
  436. name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 1)
  437. test_rdn = "cn=%s" % (name)
  438. testentry_dn = "%s,%s" % (test_rdn, STAGING_DN)
  439. name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 2)
  440. test2_rdn = "cn=%s" % (name)
  441. testentry2_dn = "%s,%s" % (test2_rdn, STAGING_DN)
  442. # value of updates to test the replication both ways
  443. attr = 'description'
  444. value = 'test_ticket47787_2'
  445. # entry for the modrdn
  446. name = "%s%d" % (NEW_ACCOUNT, 1)
  447. rdn = "cn=%s" % (name)
  448. entry_dn = "%s,%s" % (rdn, STAGING_DN)
  449. # created on M1, wait the entry exists on M2
  450. _check_entry_exists(topology.master2, entry_dn)
  451. _check_entry_exists(topology.master2, testentry_dn)
  452. _pause_RAs(topology)
  453. # Delete 'entry_dn' on M1.
  454. # dummy update is only have a first CSN before the DEL
  455. # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
  456. _mod_entry(topology.master1, testentry2_dn, attr, 'dummy')
  457. _delete_entry(topology.master1, entry_dn, name)
  458. _mod_entry(topology.master1, testentry2_dn, attr, value)
  459. time.sleep(1) # important to have MOD.csn != DEL.csn
  460. # MOD 'entry_dn' on M1.
  461. # dummy update is only have a first CSN before the MOD entry_dn
  462. # else the DEL will be in min_csn RUV and make diagnostic a bit more complex
  463. _mod_entry(topology.master2, testentry_dn, attr, 'dummy')
  464. _mod_entry(topology.master2, entry_dn, attr, value)
  465. _mod_entry(topology.master2, testentry_dn, attr, value)
  466. _resume_RAs(topology)
  467. topology.master1.log.info("\n\n######################### Check DEL replicated on M2 ######################\n")
  468. loop = 0
  469. while loop <= 10:
  470. ent = _find_tombstone(topology.master2, SUFFIX, 'sn', name)
  471. if ent:
  472. break
  473. time.sleep(1)
  474. loop += 1
  475. assert loop <= 10
  476. assert ent
  477. # the following checks are not necessary
  478. # as this bug is only for failing replicated MOD (entry_dn) on M1
  479. #_check_mod_received(topology.master1, SUFFIX, "(%s)" % (test_rdn), attr, value)
  480. #_check_mod_received(topology.master2, SUFFIX, "(%s)" % (test2_rdn), attr, value)
  481. #
  482. #_check_replication(topology, testentry_dn)
  483. _status_entry_both_server(topology, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG)
  484. topology.master1.log.info("\n\n######################### Check MOD replicated on M1 ######################\n")
  485. loop = 0
  486. while loop <= 10:
  487. ent = _find_tombstone(topology.master1, SUFFIX, 'sn', name)
  488. if ent:
  489. break
  490. time.sleep(1)
  491. loop += 1
  492. assert loop <= 10
  493. assert ent
  494. assert ent.hasAttr(attr)
  495. assert ent.getValue(attr) == value
  496. def test_ticket47787_final(topology):
  497. topology.master1.delete()
  498. topology.master2.delete()
  499. def run_isolated():
  500. '''
  501. run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
  502. To run isolated without py.test, you need to
  503. - edit this file and comment '@pytest.fixture' line before 'topology' function.
  504. - set the installation prefix
  505. - run this program
  506. '''
  507. global installation1_prefix
  508. global installation2_prefix
  509. installation1_prefix = None
  510. installation2_prefix = None
  511. topo = topology(True)
  512. topo.master1.log.info("\n\n######################### Ticket 47787 ######################\n")
  513. test_ticket47787_init(topo)
  514. test_ticket47787_2(topo)
  515. test_ticket47787_final(topo)
  516. if __name__ == '__main__':
  517. run_isolated()